text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
#include "CUFLU.h" #if ( MODEL == HYDRO ) // internal function prototypes // --> only necessary for GPU since they are included in Prototype.h for the CPU codes #ifdef __CUDACC__ GPU_DEVICE static real Hydro_Con2Pres( const real Dens, const real MomX, const real MomY, const real MomZ, const real Engy, const real Passive[], const bool CheckMinPres, const real MinPres, const real Emag, const EoS_DE2P_t EoS_DensEint2Pres, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real *const EoS_Table[EOS_NTABLE_MAX], real *EintOut ); GPU_DEVICE static real Hydro_Con2Eint( const real Dens, const real MomX, const real MomY, const real MomZ, const real Engy, const bool CheckMinEint, const real MinEint, const real Emag ); GPU_DEVICE static real Hydro_ConEint2Etot( const real Dens, const real MomX, const real MomY, const real MomZ, const real Eint, const real Emag ); GPU_DEVICE static real Hydro_CheckMinPres( const real InPres, const real MinPres ); GPU_DEVICE static real Hydro_CheckMinEint( const real InEint, const real MinEint ); GPU_DEVICE static real Hydro_CheckMinTemp( const real InTemp, const real MinTemp ); #endif //------------------------------------------------------------------------------------------------------- // Function : Hydro_Rotate3D // Description : Rotate the input fluid variables properly to simplify the 3D calculation // // Note : 1. x : (x,y,z) <--> (x,y,z) // y : (x,y,z) <--> (y,z,x) // z : (x,y,z) <--> (z,x,y) // 2. Work no matter InOut[] includes passive scalars or not since they are not modified at all // --> For MHD, specify the array offset of magnetic field by Mag_Offset // // Parameter : InOut : Array storing both the input and output data // XYZ : Target spatial direction : (0/1/2) --> (x/y/z) // Forward : (true/false) <--> (forward/backward) // Mag_Offset : Array offset of magnetic field (for MHD only) //------------------------------------------------------------------------------------------------------- GPU_DEVICE void Hydro_Rotate3D( real InOut[], const int XYZ, const bool Forward, const int Mag_Offset ) { if ( XYZ == 0 ) return; // check # ifdef GAMER_DEBUG # ifdef MHD if ( Mag_Offset < NCOMP_FLUID || Mag_Offset > NCOMP_TOTAL_PLUS_MAG - NCOMP_MAG ) printf( "ERROR : invalid Mag_Offset = %d !!\n", Mag_Offset ); # endif # endif real Temp_Flu[3]; for (int v=0; v<3; v++) Temp_Flu[v] = InOut[ v + 1 ]; # ifdef MHD real Temp_Mag[3]; for (int v=0; v<3; v++) Temp_Mag[v] = InOut[ v + Mag_Offset ]; # endif if ( Forward ) { switch ( XYZ ) { case 1 : InOut[ 1 ] = Temp_Flu[1]; InOut[ 2 ] = Temp_Flu[2]; InOut[ 3 ] = Temp_Flu[0]; # ifdef MHD InOut[ Mag_Offset + 0 ] = Temp_Mag[1]; InOut[ Mag_Offset + 1 ] = Temp_Mag[2]; InOut[ Mag_Offset + 2 ] = Temp_Mag[0]; # endif break; case 2 : InOut[ 1 ] = Temp_Flu[2]; InOut[ 2 ] = Temp_Flu[0]; InOut[ 3 ] = Temp_Flu[1]; # ifdef MHD InOut[ Mag_Offset + 0 ] = Temp_Mag[2]; InOut[ Mag_Offset + 1 ] = Temp_Mag[0]; InOut[ Mag_Offset + 2 ] = Temp_Mag[1]; # endif break; } } else // backward { switch ( XYZ ) { case 1 : InOut[ 1 ] = Temp_Flu[2]; InOut[ 2 ] = Temp_Flu[0]; InOut[ 3 ] = Temp_Flu[1]; # ifdef MHD InOut[ Mag_Offset + 0 ] = Temp_Mag[2]; InOut[ Mag_Offset + 1 ] = Temp_Mag[0]; InOut[ Mag_Offset + 2 ] = Temp_Mag[1]; # endif break; case 2 : InOut[ 1 ] = Temp_Flu[1]; InOut[ 2 ] = Temp_Flu[2]; InOut[ 3 ] = Temp_Flu[0]; # ifdef MHD InOut[ Mag_Offset + 0 ] = Temp_Mag[1]; InOut[ Mag_Offset + 1 ] = Temp_Mag[2]; InOut[ Mag_Offset + 2 ] = Temp_Mag[0]; # endif break; } } } // FUNCTION : Hydro_Rotate3D //------------------------------------------------------------------------------------------------------- // Function : Hydro_Con2Pri // Description : Conserved variables --> primitive variables // // Note : 1. Always apply pressure floor // 2. For passive scalars, we store their mass fraction as the primitive variables // when FracPassive is on // --> See the input parameters "FracPassive, NFrac, FracIdx" // --> But note that here we do NOT ensure "sum(mass fraction) == 1.0" // --> It is done by calling Hydro_NormalizePassive() in Hydro_Shared_FullStepUpdate() // 3. In[] and Out[] must NOT point to the same array // 4. In[] and Out[] should have the size of NCOMP_TOTAL_PLUS_MAG // // Parameter : In : Input conserved variables // Out : Output primitive variables // MinPres : Minimum allowed pressure // FracPassive : true --> convert passive scalars to mass fraction // NFrac : Number of passive scalars for the option "FracPassive" // FracIdx : Target variable indices for the option "FracPassive" // JeansMinPres : Apply minimum pressure estimated from the Jeans length // JeansMinPres_Coeff : Coefficient used by JeansMinPres = G*(Jeans_NCell*Jeans_dh)^2/(Gamma*pi); // EoS_DensEint2Pres : EoS routine to compute the gas pressure // EoS_DensPres2Eint : EoS routine to compute the gas internal energy // EoS_AuxArray_* : Auxiliary arrays for EoS_DensEint2Pres() // EoS_Table : EoS tables for EoS_DensEint2Pres() // EintOut : Pointer to store the output internal energy // --> Do nothing if it is NULL // --> Internal energy floor is not applied // // Return : Out[], EintOut (optional) //------------------------------------------------------------------------------------------------------- GPU_DEVICE void Hydro_Con2Pri( const real In[], real Out[], const real MinPres, const bool FracPassive, const int NFrac, const int FracIdx[], const bool JeansMinPres, const real JeansMinPres_Coeff, const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2E_t EoS_DensPres2Eint, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real *const EoS_Table[EOS_NTABLE_MAX], real* const EintOut ) { const bool CheckMinPres_Yes = true; const real _Rho = (real)1.0/In[0]; # ifdef MHD const real Bx = In[ MAG_OFFSET + 0 ]; const real By = In[ MAG_OFFSET + 1 ]; const real Bz = In[ MAG_OFFSET + 2 ]; const real Emag = (real)0.5*( SQR(Bx) + SQR(By) + SQR(Bz) ); # else const real Emag = NULL_REAL; # endif // conserved --> primitive Out[0] = In[0]; Out[1] = In[1]*_Rho; Out[2] = In[2]*_Rho; Out[3] = In[3]*_Rho; Out[4] = Hydro_Con2Pres( In[0], In[1], In[2], In[3], In[4], In+NCOMP_FLUID, CheckMinPres_Yes, MinPres, Emag, EoS_DensEint2Pres, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table, EintOut ); // pressure floor required to resolve the Jeans length // --> note that currently we do not modify the dual-energy variable (e.g., entropy) accordingly if ( JeansMinPres ) { const real Pres0 = Out[4]; Out[4] = Hydro_CheckMinPres( Pres0, JeansMinPres_Coeff*SQR(Out[0]) ); // recompute internal energy to be consistent with the updated pressure if ( EintOut != NULL && Out[4] != Pres0 ) *EintOut = EoS_DensPres2Eint( Out[0], Out[4], In+NCOMP_FLUID, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table ); } // passive scalars # if ( NCOMP_PASSIVE > 0 ) // copy all passive scalars for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) Out[v] = In[v]; // convert the mass density of target passive scalars to mass fraction if ( FracPassive ) for (int v=0; v<NFrac; v++) Out[ NCOMP_FLUID + FracIdx[v] ] *= _Rho; # endif // B field # ifdef MHD for (int v=NCOMP_TOTAL; v<NCOMP_TOTAL_PLUS_MAG; v++) Out[v] = In[v]; # endif } // FUNCTION : Hydro_Con2Pri //------------------------------------------------------------------------------------------------------- // Function : Hydro_Pri2Con // Description : Primitive variables --> conserved variables // // Note : 1. Does NOT check if the input pressure is greater than the given minimum threshold // 2. For passive scalars, we store their mass fraction as the primitive variables // when FracPassive is on // --> See the input parameters "FracPassive, NFrac, FracIdx" // 3. In[] and Out[] must NOT point to the same array // 4. In[] and Out[] should have the size of NCOMP_TOTAL_PLUS_MAG // 5. Convert pressure to internal energy using the input EoS routine by default // --> But one can also specify internal energy directly through *EintIn*, // by which no EoS conversion is required and the input pressure will be useless // --> Mainly used by the option LR_EINT in data reconstruction // // Parameter : In : Array storing the input primitive variables // Out : Array to store the output conserved variables // FracPassive : true --> input passive scalars are mass fraction instead of density // NFrac : Number of passive scalars for the option "FracPassive" // FracIdx : Target variable indices for the option "FracPassive" // EoS_DensPres2Eint : EoS routine to compute the gas internal energy // EoS_AuxArray_* : Auxiliary arrays for EoS_DensPres2Eint() // EoS_Table : EoS tables for EoS_DensPres2Eint() // EintIn : Pointer storing the input internal energy (see the note above) // --> Do nothing if it is NULL // // Return : Out[] //------------------------------------------------------------------------------------------------------- GPU_DEVICE void Hydro_Pri2Con( const real In[], real Out[], const bool FracPassive, const int NFrac, const int FracIdx[], const EoS_DP2E_t EoS_DensPres2Eint, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real *const EoS_Table[EOS_NTABLE_MAX], const real* const EintIn ) { real Eint, Emag=NULL_REAL; // passive scalars // --> do it before invoking EoS_DensPres2Eint() since the latter requires the mass density // instead of mass fraction of passive scalars # if ( NCOMP_PASSIVE > 0 ) // copy all passive scalars for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) Out[v] = In[v]; // convert the mass fraction of target passive scalars back to mass density if ( FracPassive ) for (int v=0; v<NFrac; v++) Out[ NCOMP_FLUID + FracIdx[v] ] *= In[0]; # endif // primitive --> conserved Out[0] = In[0]; Out[1] = In[0]*In[1]; Out[2] = In[0]*In[2]; Out[3] = In[0]*In[3]; # ifdef MHD const real Bx = In[ MAG_OFFSET + 0 ]; const real By = In[ MAG_OFFSET + 1 ]; const real Bz = In[ MAG_OFFSET + 2 ]; Emag = (real)0.5*( SQR(Bx) + SQR(By) + SQR(Bz) ); # endif Eint = ( EintIn == NULL ) ? EoS_DensPres2Eint( In[0], In[4], Out+NCOMP_FLUID, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table ) : *EintIn; Out[4] = Hydro_ConEint2Etot( Out[0], Out[1], Out[2], Out[3], Eint, Emag ); // B field # ifdef MHD for (int v=NCOMP_TOTAL; v<NCOMP_TOTAL_PLUS_MAG; v++) Out[v] = In[v]; # endif } // FUNCTION : Hydro_Pri2Con //------------------------------------------------------------------------------------------------------- // Function : Hydro_Con2Flux // Description : Evaluate hydrodynamic/MHD fluxes from the input conserved variables // // Note : 1. Flux[] and In[] may point to the same array // 2. Flux[] and In[] should have the size of NCOMP_TOTAL_PLUS_MAG // 3. By default, it computes pressure using the input EoS routine // --> But one can also specify pressure directly through *PresIn*, // by which no EoS conversion is required // // Parameter : XYZ : Target spatial direction : (0/1/2) --> (x/y/z) // Flux : Array to store the output fluxes // In : Array storing the input conserved variables // MinPres : Minimum allowed pressure // EoS_DensEint2Pres : EoS routine to compute the gas pressure // EoS_AuxArray_* : Auxiliary arrays for EoS_DensEint2Pres() // EoS_Table : EoS tables for EoS_DensEint2Pres() // PresIn : Pointer storing the input pressure (see the note above) // --> Do nothing if it is NULL // // Return : Flux[] //------------------------------------------------------------------------------------------------------- GPU_DEVICE void Hydro_Con2Flux( const int XYZ, real Flux[], const real In[], const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real *const EoS_Table[EOS_NTABLE_MAX], const real* const PresIn ) { const bool CheckMinPres_Yes = true; real InRot[ NCOMP_FLUID + NCOMP_MAG ]; // no need to include passive scalars since they don't have to be rotated for (int v=0; v<NCOMP_FLUID; v++) InRot[v] = In[v]; # ifdef MHD for (int v=NCOMP_FLUID; v<NCOMP_FLUID+NCOMP_MAG; v++) InRot[v] = In[ v - NCOMP_FLUID + MAG_OFFSET ]; # endif Hydro_Rotate3D( InRot, XYZ, true, NCOMP_FLUID ); # ifdef MHD const real Bx = InRot[ NCOMP_FLUID + 0 ]; const real By = InRot[ NCOMP_FLUID + 1 ]; const real Bz = InRot[ NCOMP_FLUID + 2 ]; const real Emag = (real)0.5*( SQR(Bx) + SQR(By) + SQR(Bz) ); # else const real Emag = NULL_REAL; # endif const real Pres = ( PresIn == NULL ) ? Hydro_Con2Pres( InRot[0], InRot[1], InRot[2], InRot[3], InRot[4], In+NCOMP_FLUID, CheckMinPres_Yes, MinPres, Emag, EoS_DensEint2Pres, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table, NULL ) : *PresIn; const real _Rho = (real)1.0 / InRot[0]; const real Vx = _Rho*InRot[1]; Flux[0] = InRot[1]; Flux[1] = Vx*InRot[1] + Pres; Flux[2] = Vx*InRot[2]; Flux[3] = Vx*InRot[3]; Flux[4] = Vx*( InRot[4] + Pres ); // passive scalars # if ( NCOMP_PASSIVE > 0 ) for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) Flux[v] = In[v]*Vx; # endif // B field # ifdef MHD const real Vy = _Rho*InRot[2]; const real Vz = _Rho*InRot[3]; Flux[ 1 ] += Emag - SQR(Bx); Flux[ 2 ] -= Bx*By; Flux[ 3 ] -= Bx*Bz; Flux[ 4 ] += Vx*Emag - Bx*( Bx*Vx + By*Vy + Bz*Vz ); Flux[ MAG_OFFSET + 0 ] = (real)0.0; Flux[ MAG_OFFSET + 1 ] = By*Vx - Bx*Vy; Flux[ MAG_OFFSET + 2 ] = Bz*Vx - Bx*Vz; # endif Hydro_Rotate3D( Flux, XYZ, false, MAG_OFFSET ); } // FUNCTION : Hydro_Con2Flux //------------------------------------------------------------------------------------------------------- // Function : Hydro_CheckMinPres // Description : Check if the input pressure is greater than the minimum allowed threshold // // Note : 1. This function is used to correct unphysical (usually negative) pressure caused by // numerical errors // --> Usually happen in regions with high mach numbers // --> Currently it simply sets a minimum allowed value for pressure // --> Set MIN_PRES in the runtime parameter file "Input__Parameter" // 2. We should also support a minimum **temperature** instead of **pressure** // --> NOT supported yet // 3. If the input pressure is NaN, return NaN in order to trigger auto-correction such as // "OPT__1ST_FLUX_CORR" and "AUTO_REDUCE_DT" // // Parameter : InPres : Input pressure to be corrected // MinPres : Minimum allowed pressure // // Return : InPres != NaN --> max( InPres, MinPres ) // == NaN --> NaN //------------------------------------------------------------------------------------------------------- GPU_DEVICE real Hydro_CheckMinPres( const real InPres, const real MinPres ) { // call FMAX() only if InPres is not NaN if ( InPres == InPres ) return FMAX( InPres, MinPres ); else return InPres; } // FUNCTION : Hydro_CheckMinPres //------------------------------------------------------------------------------------------------------- // Function : Hydro_CheckMinEint // Description : Similar to Hydro_CheckMinPres() except that this function checks the internal energy // density (Eint) instead of pressure // // Note : 1. See Hydro_CheckMinPres() // // Parameter : InEint : Input Eint to be corrected // MinEint : Minimum allowed Eint // // Return : InEint != NaN --> max( InEint, MinEint ) // == NaN --> NaN //------------------------------------------------------------------------------------------------------- GPU_DEVICE real Hydro_CheckMinEint( const real InEint, const real MinEint ) { // call FMAX() only if InEint is not NaN if ( InEint == InEint ) return FMAX( InEint, MinEint ); else return InEint; } // FUNCTION : Hydro_CheckMinEint //------------------------------------------------------------------------------------------------------- // Function : Hydro_CheckMinTemp // Description : Similar to Hydro_CheckMinPres() except that this function checks the gas temperature // instead of pressure // // Note : 1. See Hydro_CheckMinPres() // // Parameter : InTemp : Input temperature to be corrected // MinTemp : Minimum allowed temperature // // Return : InTemp != NaN --> max( InTemp, MinTemp ) // == NaN --> NaN //------------------------------------------------------------------------------------------------------- GPU_DEVICE real Hydro_CheckMinTemp( const real InTemp, const real MinTemp ) { // call FMAX() only if InTemp is not NaN if ( InTemp == InTemp ) return FMAX( InTemp, MinTemp ); else return InTemp; } // FUNCTION : Hydro_CheckMinTemp //------------------------------------------------------------------------------------------------------- // Function : Hydro_CheckMinEintInEngy // Description : Ensure that the internal energy density in the input total energy density is greater than // a given threshold // // Note : 1. Invoke Hydro_CheckMinEint() // 2. Input conserved instead of primitive variables // 3. For MHD, one must provide the magnetic energy density Emag (i.e., 0.5*B^2) // // Parameter : Dens : Mass density // MomX/Y/Z : Momentum density // InEngy : Energy density // MinEint : Internal energy density floor // Emag : Magnetic energy density (0.5*B^2) --> For MHD only // // Return : Total energy density with internal energy density greater than a given threshold //------------------------------------------------------------------------------------------------------- GPU_DEVICE real Hydro_CheckMinEintInEngy( const real Dens, const real MomX, const real MomY, const real MomZ, const real InEngy, const real MinEint, const real Emag ) { const bool CheckMinEint_No = false; real InEint, OutEint, OutEngy; InEint = Hydro_Con2Eint( Dens, MomX, MomY, MomZ, InEngy, CheckMinEint_No, NULL_REAL, Emag ); OutEint = Hydro_CheckMinEint( InEint, MinEint ); // do not modify energy (even the round-off errors) if the input data pass the check if ( InEint == OutEint ) OutEngy = InEngy; else OutEngy = InEngy - InEint + OutEint; return OutEngy; } // FUNCTION : Hydro_CheckMinEintInEngy //------------------------------------------------------------------------------------------------------- // Function : Hydro_CheckNegative // Description : Check whether the input value is <= 0.0 (also check whether it's Inf or NAN) // // Note : Can be used to check whether the values of density and pressure are unphysical // // Parameter : Input : Input value // // Return : true --> Input <= 0.0 || >= __FLT_MAX__ || != itself (Nan) // false --> otherwise //------------------------------------------------------------------------------------------------------- GPU_DEVICE bool Hydro_CheckNegative( const real Input ) { if ( Input <= (real)0.0 || Input >= __FLT_MAX__ || Input != Input ) return true; else return false; } // FUNCTION : Hydro_CheckNegative //------------------------------------------------------------------------------------------------------- // Function : Hydro_Con2Pres // Description : Evaluate the fluid pressure // // Note : 1. Invoke the EoS routine EoS_DensEint2Pres() to support different EoS // 2. For MHD, Engy is the total energy density including the magnetic energy Emag=0.5*B^2 // and thus one must provide Emag to subtract it // // Parameter : Dens : Mass density // MomX/Y/Z : Momentum density // Engy : Energy density (including the magnetic energy density for MHD) // Passive : Passive scalars // CheckMinPres : Apply pressure floor by invoking Hydro_CheckMinPres() // --> In some cases we actually want to check if pressure becomes unphysical, // for which this option should be disabled // --> For example: Flu_FixUp(), Flu_Close(), Hydro_Aux_Check_Negative() // MinPres : Pressure floor // Emag : Magnetic energy density (0.5*B^2) --> For MHD only // EoS_DensEint2Pres : EoS routine to compute the gas pressure // EoS_AuxArray_* : Auxiliary arrays for EoS_DensEint2Pres() // EoS_Table : EoS tables for EoS_DensEint2Pres() // EintOut : Pointer to store the output internal energy // --> Do nothing if it is NULL // --> Internal energy floor is not applied // // Return : Gas pressure (Pres), EintOut (optional) //------------------------------------------------------------------------------------------------------- GPU_DEVICE real Hydro_Con2Pres( const real Dens, const real MomX, const real MomY, const real MomZ, const real Engy, const real Passive[], const bool CheckMinPres, const real MinPres, const real Emag, const EoS_DE2P_t EoS_DensEint2Pres, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real *const EoS_Table[EOS_NTABLE_MAX], real *EintOut ) { const bool CheckMinEint_No = false; real Eint, Pres; Eint = Hydro_Con2Eint( Dens, MomX, MomY, MomZ, Engy, CheckMinEint_No, NULL_REAL, Emag ); Pres = EoS_DensEint2Pres( Dens, Eint, Passive, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table ); if ( CheckMinPres ) Pres = Hydro_CheckMinPres( Pres, MinPres ); if ( EintOut != NULL ) *EintOut = Eint; return Pres; } // FUNCTION : Hydro_Con2Pres //------------------------------------------------------------------------------------------------------- // Function : Hydro_Con2Eint // Description : Evaluate the gas internal energy density // // Note : 1. For MHD, Engy is the total energy density including the magnetic energy Emag=0.5*B^2 // and thus one must provide Emag to subtract it // 2. Internal energy density is energy per volume instead of per mass // // Parameter : Dens : Mass density // MomX/Y/Z : Momentum density // Engy : Energy density (including the magnetic energy density for MHD) // CheckMinEint : Apply internal energy floor by invoking Hydro_CheckMinEint() // --> In some cases we actually want to check if internal energy becomes unphysical, // for which this option should be disabled // MinEint : Internal energy floor // Emag : Magnetic energy density (0.5*B^2) --> For MHD only // // Return : Gas internal energy density (Eint) //------------------------------------------------------------------------------------------------------- GPU_DEVICE real Hydro_Con2Eint( const real Dens, const real MomX, const real MomY, const real MomZ, const real Engy, const bool CheckMinEint, const real MinEint, const real Emag ) { //###NOTE: assuming Etot = Eint + Ekin + Emag real Eint; Eint = Engy - (real)0.5*( SQR(MomX) + SQR(MomY) + SQR(MomZ) ) / Dens; # ifdef MHD Eint -= Emag; # endif if ( CheckMinEint ) Eint = Hydro_CheckMinEint( Eint, MinEint ); return Eint; } // FUNCTION : Hydro_Con2Eint //------------------------------------------------------------------------------------------------------- // Function : Hydro_ConEint2Etot // Description : Evaluate total energy from the input conserved variables and internal energy // // Note : 1. For MHD, total energy density includes the magnetic energy Emag=0.5*B^2 // 2. Internal energy density is energy per volume instead of per mass // // Parameter : Dens : Mass density // MomX/Y/Z : Momentum density // Eint : Internal energy density // Emag : Magnetic energy density (0.5*B^2) --> For MHD only // // Return : Total energy density (including the magnetic energy density for MHD) //------------------------------------------------------------------------------------------------------- GPU_DEVICE real Hydro_ConEint2Etot( const real Dens, const real MomX, const real MomY, const real MomZ, const real Eint, const real Emag ) { //###NOTE: assuming Etot = Eint + Ekin + Emag real Etot; Etot = (real)0.5*( SQR(MomX) + SQR(MomY) + SQR(MomZ) ) / Dens; Etot += Eint; # ifdef MHD Etot += Emag; # endif return Etot; } // FUNCTION : Hydro_ConEint2Etot //------------------------------------------------------------------------------------------------------- // Function : Hydro_Con2Temp // Description : Evaluate the fluid temperature // // Note : 1. Invoke the EoS routine EoS_DensEint2Temp() to support different EoS // 2. Temperature is in kelvin // // Parameter : Dens : Mass density // MomX/Y/Z : Momentum density // Engy : Energy density // Passive : Passive scalars // CheckMinTemp : Apply temperature floor by calling Hydro_CheckMinTemp() // --> In some cases we actually want to check if temperature becomes unphysical, // for which we don't want to enable this option // MinTemp : Temperature floor // Emag : Magnetic energy density (0.5*B^2) --> For MHD only // EoS_DensEint2Temp : EoS routine to compute the gas temperature // EoS_AuxArray_* : Auxiliary arrays for EoS_DensEint2Temp() // EoS_Table : EoS tables for EoS_DensEint2Temp() // // Return : Gas temperature in kelvin //------------------------------------------------------------------------------------------------------- GPU_DEVICE real Hydro_Con2Temp( const real Dens, const real MomX, const real MomY, const real MomZ, const real Engy, const real Passive[], const bool CheckMinTemp, const real MinTemp, const real Emag, const EoS_DE2T_t EoS_DensEint2Temp, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real *const EoS_Table[EOS_NTABLE_MAX] ) { // check # ifdef GAMER_DEBUG if ( EoS_DensEint2Temp == NULL ) { # ifdef __CUDACC__ printf( "ERROR : EoS_DensEint2Temp == NULL at file <%s>, line <%d>, function <%s> !!\n", __FILE__, __LINE__, __FUNCTION__ ); # else Aux_Error( ERROR_INFO, "EoS_DensEint2Temp == NULL !!\n" ); # endif } # endif // #ifdef GAMER_DEBUG const bool CheckMinEint_No = false; real Eint, Temp; Eint = Hydro_Con2Eint( Dens, MomX, MomY, MomZ, Engy, CheckMinEint_No, NULL_REAL, Emag ); Temp = EoS_DensEint2Temp( Dens, Eint, Passive, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table ); if ( CheckMinTemp ) Temp = Hydro_CheckMinTemp( Temp, MinTemp ); return Temp; } // FUNCTION : Hydro_Con2Temp //------------------------------------------------------------------------------------------------------- // Function : Hydro_NormalizePassive // Description : Normalize the target passive scalars so that the sum of their mass density is equal to // the gas mass density // // Note : 1. Should be invoked AFTER applying the floor values to passive scalars // 2. Invoked by Hydro_Shared_FullStepUpdate(), Prepare_PatchData(), Refine(), LB_Refine_AllocateNewPatch(), // Flu_FixUp(), XXX_Init_ByFunction_AssignData(), Flu_Close() // // Parameter : GasDens : Gas mass density // Passive : Passive scalar array (with the size NCOMP_PASSIVE) // NNorm : Number of passive scalars to be normalized // --> Should be set to the global variable "PassiveNorm_NVar" // NormIdx : Target variable indices to be normalized // --> Should be set to the global variable "PassiveNorm_VarIdx" // // Return : Passive //------------------------------------------------------------------------------------------------------- GPU_DEVICE void Hydro_NormalizePassive( const real GasDens, real Passive[], const int NNorm, const int NormIdx[] ) { // validate the target variable indices # ifdef GAMER_DEBUG const int MinIdx = 0; const int MaxIdx = NCOMP_PASSIVE - 1; for (int v=0; v<NNorm; v++) { if ( NormIdx[v] < MinIdx || NormIdx[v] > MaxIdx ) printf( "ERROR : NormIdx[%d] = %d is not within the correct range ([%d <= idx <= %d]) !!\n", v, NormIdx[v], MinIdx, MaxIdx ); } # endif real Norm, PassiveDens_Sum=(real)0.0; for (int v=0; v<NNorm; v++) PassiveDens_Sum += Passive[ NormIdx[v] ]; Norm = GasDens / PassiveDens_Sum; for (int v=0; v<NNorm; v++) Passive[ NormIdx[v] ] *= Norm; } // FUNCTION : Hydro_NormalizePassive #ifdef MHD //------------------------------------------------------------------------------------------------------- // Function : MHD_GetCellCenteredBField // Description : Calculate the cell-centered magnetic field from the input face-centered magnetic field array // // Note : 1. Use the central average operator // 2. Return all three components of the B field // 3. Input arrays should have the following dimension: // Bx_FC[]: (Nx+1)*(Ny )*(Nz ) // By_FC[]: (Nx )*(Ny+1)*(Nz ) // Bz_FC[]: (Nx )*(Ny )*(Nz+1) // // Parameter : B_CC : Cell-centered B field to be returned // Bx/y/z_FC : Input face-centered B field array // Nx/y/z : Array dimension along different directions (see Note above) // i/j/k : Target cell indices // // Return : B_CC //------------------------------------------------------------------------------------------------------- GPU_DEVICE void MHD_GetCellCenteredBField( real B_CC[], const real Bx_FC[], const real By_FC[], const real Bz_FC[], const int Nx, const int Ny, const int Nz, const int i, const int j, const int k ) { const int idx_Bx = IDX321_BX( i, j, k, Nx, Ny ); const int idx_By = IDX321_BY( i, j, k, Nx, Ny ); const int idx_Bz = IDX321_BZ( i, j, k, Nx, Ny ); B_CC[0] = (real)0.5*( Bx_FC[idx_Bx] + Bx_FC[ idx_Bx + 1 ] ); B_CC[1] = (real)0.5*( By_FC[idx_By] + By_FC[ idx_By + Nx ] ); B_CC[2] = (real)0.5*( Bz_FC[idx_Bz] + Bz_FC[ idx_Bz + Nx*Ny ] ); } // FUNCTION : MHD_GetCellCenteredBField //------------------------------------------------------------------------------------------------------- // Function : MHD_GetCellCenteredBEnergy // Description : Calculate the cell-centered magnetic energy (i.e., 0.5*B^2) from the input face-centered // magnetic field array // // Note : 1. Invoke MHD_GetCellCenteredBField() // 2. Input arrays should have the following dimension: // Bx_FC[]: (Nx+1)*(Ny )*(Nz ) // By_FC[]: (Nx )*(Ny+1)*(Nz ) // Bz_FC[]: (Nx )*(Ny )*(Nz+1) // // Parameter : Bx/y/z_FC : Input face-centered B field array // Nx/y/z : Array dimension along different directions (see Note above) // i/j/k : Target cell indices // // Return : 0.5*B^2 at the center of the target cell //------------------------------------------------------------------------------------------------------- GPU_DEVICE real MHD_GetCellCenteredBEnergy( const real Bx_FC[], const real By_FC[], const real Bz_FC[], const int Nx, const int Ny, const int Nz, const int i, const int j, const int k ) { // CC = cell-centered real B_CC[3], BEngy; MHD_GetCellCenteredBField( B_CC, Bx_FC, By_FC, Bz_FC, Nx, Ny, Nz, i, j, k ); BEngy = (real)0.5*( SQR(B_CC[MAGX]) + SQR(B_CC[MAGY]) + SQR(B_CC[MAGZ]) ); return BEngy; } // FUNCTION : MHD_GetCellCenteredBEnergy #endif // #ifdef MHD #endif // #if ( MODEL == HYDRO ) #endif // #ifndef __CUFLU_FLUUTILITY__
the_stack
#include <nvfunctional> /////////////////////////////////////////////////////////////////////////////// // FUNCTION POINTERS // /////////////////////////////////////////////////////////////////////////////// template<typename T> __device__ __forceinline__ bool less( const T &a, const T &b ) { return ( a < b ); } template<typename T> __device__ __forceinline__ bool greater( const T &a, const T &b ) { return ( a > b ); } template<typename T> __device__ __forceinline__ bool less_equal( const T &a, const T &b ) { return ( a <= b ); } template<typename T> __device__ __forceinline__ bool greater_equal( const T &a, const T &b ) { return ( a >= b ); } template<typename T> __device__ __forceinline__ bool equal( const T &a, const T &b ) { return ( a == b ); } template<typename T> __device__ __forceinline__ bool not_equal( const T &a, const T &b ) { return ( a != b ); } template<typename T> using op_func = bool ( * )( const T &, const T & ); __device__ op_func<int> const func_i[6] = { less, greater, less_equal, greater_equal, equal, not_equal }; __device__ op_func<long int> const func_l[6] = { less, greater, less_equal, greater_equal, equal, not_equal }; __device__ op_func<float> const func_f[6] = { less, greater, less_equal, greater_equal, equal, not_equal }; __device__ op_func<double> const func_d[6] = { less, greater, less_equal, greater_equal, equal, not_equal }; /////////////////////////////////////////////////////////////////////////////// // HELPER FUNCTIONS // /////////////////////////////////////////////////////////////////////////////// __device__ __forceinline__ void clip_plus( const bool &clip, const int &n, int &plus ) { if ( clip ) { if ( plus >= n ) { plus = n - 1; } } else { if ( plus >= n ) { plus -= n; } } } __device__ __forceinline__ void clip_minus( const bool &clip, const int &n, int &minus ) { if ( clip ) { if ( minus < 0 ) { minus = 0; } } else { if ( minus < 0 ) { minus += n; } } } /////////////////////////////////////////////////////////////////////////////// // BOOLRELEXTREMA 1D // /////////////////////////////////////////////////////////////////////////////// template<typename T, class U> __device__ void _cupy_boolrelextrema_1D( const int n, const int order, const bool clip, const T *__restrict__ inp, bool *__restrict__ results, U func ) { const int tx { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) }; const int stride { static_cast<int>( blockDim.x * gridDim.x ) }; for ( int tid = tx; tid < n; tid += stride ) { const T data { inp[tid] }; bool temp { true }; for ( int o = 1; o < ( order + 1 ); o++ ) { int plus { tid + o }; int minus { tid - o }; clip_plus( clip, n, plus ); clip_minus( clip, n, minus ); temp &= func( data, inp[plus] ); temp &= func( data, inp[minus] ); } results[tid] = temp; } } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_boolrelextrema_1D_int32( const int n, const int order, const bool clip, const int comp, const int *__restrict__ inp, bool *__restrict__ results ) { _cupy_boolrelextrema_1D<int, op_func<int>>( n, order, clip, inp, results, func_i[comp] ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_boolrelextrema_1D_int64( const int n, const int order, const bool clip, const int comp, const long int *__restrict__ inp, bool *__restrict__ results ) { _cupy_boolrelextrema_1D<long int, op_func<long int>>( n, order, clip, inp, results, func_l[comp] ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_boolrelextrema_1D_float32( const int n, const int order, const bool clip, const int comp, const float *__restrict__ inp, bool *__restrict__ results ) { _cupy_boolrelextrema_1D<float, op_func<float>>( n, order, clip, inp, results, func_f[comp] ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_boolrelextrema_1D_float64( const int n, const int order, const bool clip, const int comp, const double *__restrict__ inp, bool *__restrict__ results ) { _cupy_boolrelextrema_1D<double, op_func<double>>( n, order, clip, inp, results, func_d[comp] ); } /////////////////////////////////////////////////////////////////////////////// // BOOLRELEXTREMA 2D // /////////////////////////////////////////////////////////////////////////////// template<typename T, class U> __device__ void _cupy_boolrelextrema_2D( const int in_x, const int in_y, const int order, const bool clip, const int axis, const T *__restrict__ inp, bool *__restrict__ results, U func ) { const int ty { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) }; const int tx { static_cast<int>( blockIdx.y * blockDim.y + threadIdx.y ) }; if ( ( tx < in_y ) && ( ty < in_x ) ) { int tid { tx * in_x + ty }; const T data { inp[tid] }; bool temp { true }; for ( int o = 1; o < ( order + 1 ); o++ ) { int plus {}; int minus {}; if ( axis == 0 ) { plus = tx + o; minus = tx - o; clip_plus( clip, in_y, plus ); clip_minus( clip, in_y, minus ); plus = plus * in_x + ty; minus = minus * in_x + ty; } else { plus = ty + o; minus = ty - o; clip_plus( clip, in_x, plus ); clip_minus( clip, in_x, minus ); plus = tx * in_x + plus; minus = tx * in_x + minus; } temp &= func( data, inp[plus] ); temp &= func( data, inp[minus] ); } results[tid] = temp; } } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_boolrelextrema_2D_int32( const int in_x, const int in_y, const int order, const bool clip, const int comp, const int axis, const int *__restrict__ inp, bool *__restrict__ results ) { _cupy_boolrelextrema_2D<int, op_func<int>>( in_x, in_y, order, clip, axis, inp, results, func_i[comp] ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_boolrelextrema_2D_int64( const int in_x, const int in_y, const int order, const bool clip, const int comp, const int axis, const long int *__restrict__ inp, bool *__restrict__ results ) { _cupy_boolrelextrema_2D<long int, op_func<long int>>( in_x, in_y, order, clip, axis, inp, results, func_l[comp] ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_boolrelextrema_2D_float32( const int in_x, const int in_y, const int order, const bool clip, const int comp, const int axis, const float *__restrict__ inp, bool *__restrict__ results ) { _cupy_boolrelextrema_2D<float, op_func<float>>( in_x, in_y, order, clip, axis, inp, results, func_f[comp] ); } extern "C" __global__ void __launch_bounds__( 256 ) _cupy_boolrelextrema_2D_float64( const int in_x, const int in_y, const int order, const bool clip, const int comp, const int axis, const double *__restrict__ inp, bool *__restrict__ results ) { _cupy_boolrelextrema_2D<double, op_func<double>>( in_x, in_y, order, clip, axis, inp, results, func_d[comp] ); }
the_stack
#include <unordered_set> #include <unordered_map> #include "core/graph.h" #include "core/solver.h" #include "model/graph.h" #include "gpu/graph.cuh" /** * @page Node Embedding * * Node embedding is an instantiation of the system on normal graphs (e.g. social network, citation network) * * In node embedding, there are two embedding matrices, namely the vertex embeddings and the context embeddings. * During training, the samplers generate positive edges on multiple CPUs * and the workers generate negative context nodes on GPUs. * Then the workers pick vertex and context embeddings according to the samples, and update the embeddings with SGD. * * Currently, our graph solver supports DeepWalk, LINE and node2vec. * * Reference: * * 1) DeepWalk * https://arxiv.org/pdf/1403.6652.pdf * * 2) LINE * https://arxiv.org/pdf/1503.03578.pdf * * 3) node2vec * https://www.kdd.org/kdd2016/papers/files/rfp0218-groverA.pdf */ namespace graphvite { const int kExpectedDegree = 1600; /** * @brief Normal graphs without attributes * @tparam _Index integral type of node indexes */ template<class _Index = size_t> class Graph : public GraphMixin<_Index> { public: typedef GraphMixin<_Index> Base; USING_GRAPH_MIXIN(Base); typedef _Index Index; std::unordered_map<std::string, Index> name2id; std::vector<std::string> id2name; bool as_undirected, normalization; #define USING_GRAPH(type) \ USING_GRAPH_MIXIN(type); \ using type::name2id; \ using type::id2name; \ using type::as_undirected; \ using type::normalization; \ using type::normalize /** Clear the graph and free CPU memory */ void clear() override { Base::clear(); decltype(name2id)().swap(name2id); decltype(id2name)().swap(id2name); } inline std::string name() const override { std::stringstream ss; ss << "Graph<" << pretty::type2name<Index>() << ">"; return ss.str(); } inline std::string graph_info() const override { std::stringstream ss; ss << "#vertex: " << num_vertex << ", #edge: " << num_edge << std::endl; ss << "as undirected: " << pretty::yes_no(as_undirected) << ", normalization: " << pretty::yes_no(normalization); return ss.str(); } void normalize() { std::vector<float> context_weights(num_vertex); for (Index u = 0; u < num_vertex; u++) for (auto &&vertex_edge : vertex_edges[u]) { Index v = std::get<0>(vertex_edge); float w = std::get<1>(vertex_edge); context_weights[v] += w; } for (Index u = 0; u < num_vertex; u++) { float weight = 0; for (auto &&vertex_edge : vertex_edges[u]) { Index v = std::get<0>(vertex_edge); float &w = std::get<1>(vertex_edge); w /= sqrt(vertex_weights[u] * context_weights[v]); weight += w; } vertex_weights[u] = weight; } } /** Add an edge to the adjacency list */ void add_edge(const std::string &u_name, const std::string &v_name, float w) { Index u, v; auto u_iter = name2id.find(u_name); if (u_iter != name2id.end()) u = u_iter->second; else { u = num_vertex++; name2id[u_name] = u; id2name.push_back(u_name); vertex_edges.push_back(std::vector<VertexEdge>()); vertex_weights.push_back(0); } auto v_iter = name2id.find(v_name); if (v_iter != name2id.end()) v = v_iter->second; else { v = num_vertex++; name2id[v_name] = v; id2name.push_back(v_name); vertex_edges.push_back(std::vector<VertexEdge>()); vertex_weights.push_back(0); } vertex_edges[u].push_back(std::make_tuple(v, w)); vertex_weights[u] += w; if (as_undirected && u != v) { vertex_edges[v].push_back(std::make_tuple(u, w)); vertex_weights[v] += w; } num_edge++; } /** * @brief Load a graph from an edge-list file. Store the graph in an adjacency list. * @param file_name file name * @param _as_undirected symmetrize the graph or not * @param _normalization normalize the adjacency matrix or not * @param delimiters string of delimiter characters * @param comment prefix of comment strings */ void load_file(const char *file_name, bool _as_undirected = true, bool _normalization = false, const char *delimiters = " \t\r\n", const char *comment = "#") { LOG(INFO) << "loading graph from " << file_name; clear(); as_undirected = _as_undirected; normalization = _normalization; FILE *fin = fopen(file_name, "r"); CHECK(fin) << "File `" << file_name << "` doesn't exist"; fseek(fin, 0, SEEK_END); size_t fsize = ftell(fin); fseek(fin, 0, SEEK_SET); char line[kMaxLineLength]; for (size_t i = 1; fgets(line, kMaxLineLength, fin); i++) { LOG_EVERY_N(INFO, 1e7) << 100.0 * ftell(fin) / fsize << "%"; char *comment_str = strstr(line, comment); if (comment_str) *comment_str = 0; char *u_name = strtok(line, delimiters); if (!u_name) continue; char *v_name = strtok(nullptr, delimiters); char *w_str = strtok(nullptr, delimiters); char *more = strtok(nullptr, delimiters); CHECK(v_name && !more) << "Invalid format at line " << i; float w = w_str ? atof(w_str) : 1; add_edge(u_name, v_name, w); } fclose(fin); if (normalization) normalize(); LOG(WARNING) << pretty::block(info()) ; } /** * @brief Load a graph from an edge list. Store the graph in an adjacency list. * @param edge_list edge list * @param _as_undirected symmetrize the graph or not * @param _normalization normalize the adjacency matrix or not */ void load_edge_list(const std::vector<std::tuple<std::string, std::string>> &edge_list, bool _as_undirected = true, bool _normalization = false) { clear(); as_undirected = _as_undirected; normalization = _normalization; for (auto &&edge : edge_list) { auto &u_name = std::get<0>(edge); auto &v_name = std::get<1>(edge); add_edge(u_name, v_name, 1); } if (normalization) normalize(); LOG(WARNING) << pretty::block(info()); } /** * @brief Load a graph from an edge list. Store the graph in an adjacency list. * @param weighted_edge_list weighted edge list * @param _as_undirected symmetrize the graph or not * @param _normalization normalize the adjacency matrix or not */ void load_weighted_edge_list(const std::vector<std::tuple<std::string, std::string, float>> &weighted_edge_list, bool _as_undirected = true, bool _normalization = false) { clear(); as_undirected = _as_undirected; normalization = _normalization; for (auto &&edge : weighted_edge_list) { auto &u_name = std::get<0>(edge); auto &v_name = std::get<1>(edge); float w = std::get<2>(edge); add_edge(u_name, v_name, w); } if (normalization) normalize(); LOG(WARNING) << pretty::block(info()); } /** * @brief Save the graph in edge-list format * @param file_name file name * @param weighted save edge weights or not * @param anonymous save node names or not */ void save(const char *file_name, bool weighted = true, bool anonymous = false) { LOG(INFO) << "Saving weighted graph to " << file_name; FILE *fout = fopen(file_name, "w"); for (unsigned long long i = 0; i < num_vertex; i++) for (auto &&vertex_edge : vertex_edges[i]) { unsigned long long j = std::get<0>(vertex_edge); float w = std::get<1>(vertex_edge); if (anonymous) fprintf(fout, "%llu\t%llu", i, j); else fprintf(fout, "%s\t%s", id2name[i].c_str(), id2name[j].c_str()); if (weighted) fprintf(fout, "\t%f", w); fputc('\n', fout); } fclose(fout); } }; template<size_t _dim, class _Float, class _Index> class GraphSolver; /** Edge sampler for graphs */ template<class _Solver> class GraphSampler : public SamplerMixin<_Solver> { public: typedef SamplerMixin<_Solver> Base; USING_SAMPLER_MIXIN(Base); using Base::Base; typedef GraphSolver<Solver::dim, Float, Index> GraphSolver; /** Return no additional attributes */ inline Attributes get_attributes(const Edge &edge) const override { return Attributes(); } /** Sample edges from biased random walks. This function can be parallelized. */ void sample_biased_random_walk(int start, int end) { GraphSolver *solver = reinterpret_cast<GraphSolver *>(this->solver); CHECK(pool_size % solver->shuffle_base == 0) << "Can't perform pseudo shuffle on " << pool_size << " elements by a shuffle base of " << solver->shuffle_base << ". Try setting the episode size to a multiple of the shuffle base"; CUDA_CHECK(cudaSetDevice(device_id)); random.to_host(); CURAND_CHECK(curandGenerateUniformDouble(generator, random.device_ptr, kRandBatchSize)); auto &sample_pool = solver->sample_pools[solver->pool_id ^ 1]; std::vector<std::vector<int>> offsets(num_partition); for (auto &&partition_offsets : offsets) partition_offsets.resize(num_partition, start); std::vector<std::vector<std::pair<int, Index>>> head_chains(solver->random_walk_batch_size); std::vector<std::vector<std::pair<int, Index>>> tail_chains(solver->random_walk_batch_size); for (auto &&head_chain : head_chains) head_chain.resize(solver->random_walk_length + 1); for (auto &&tail_chain : tail_chains) tail_chain.resize(solver->random_walk_length + 1); std::vector<int> sample_lengths(solver->random_walk_batch_size); int num_complete = 0, rand_id = 0; while (num_complete < num_partition * num_partition) { for (int i = 0; i < solver->random_walk_batch_size; i++) { if (rand_id > kRandBatchSize - solver->random_walk_length * 2) { random.to_host(); CURAND_CHECK(curandGenerateUniformDouble(generator, random.device_ptr, kRandBatchSize)); rand_id = 0; } size_t edge_id = solver->edge_table.sample(random[rand_id++], random[rand_id++]); Index current = std::get<0>(solver->graph->edges[edge_id]); head_chains[i][0] = solver->head_locations[current]; tail_chains[i][0] = solver->tail_locations[current]; current = std::get<1>(solver->graph->edges[edge_id]); head_chains[i][1] = solver->head_locations[current]; tail_chains[i][1] = solver->tail_locations[current]; sample_lengths[i] = solver->random_walk_length; for (int j = 2; j <= solver->random_walk_length; j++) if (!solver->graph->vertex_edges[current].empty()) { Index neighbor_id = solver->edge_edge_tables[edge_id].sample( random[rand_id++], random[rand_id++]); edge_id = solver->graph->flat_offsets[current] + neighbor_id; current = std::get<0>(solver->graph->vertex_edges[current][neighbor_id]); head_chains[i][j] = solver->head_locations[current]; tail_chains[i][j] = solver->tail_locations[current]; } else { sample_lengths[i] = j - 1; break; } } for (int i = 0; i < solver->random_walk_batch_size; i++) { for (int j = 0; j < sample_lengths[i]; j++) { for (int k = 1; k <= solver->augmentation_step; k++) { if (j + k > sample_lengths[i]) break; int head_partition_id = head_chains[i][j].first; int tail_partition_id = tail_chains[i][j + k].first; int &offset = offsets[head_partition_id][tail_partition_id]; if (offset < end) { auto &pool = sample_pool[head_partition_id][tail_partition_id]; Index head_local_id = head_chains[i][j].second; Index tail_local_id = tail_chains[i][j + k].second; // pseudo shuffle int shuffled_offset = offset % solver->shuffle_base * (pool_size / solver->shuffle_base) + offset / solver->shuffle_base; pool[shuffled_offset] = std::make_tuple(head_local_id, tail_local_id); if (++offset == end) num_complete++; } } } } } } /** Sample edges from random walks. This function can be parallelized. */ void sample_random_walk(int start, int end) { GraphSolver *solver = reinterpret_cast<GraphSolver *>(this->solver); CHECK(pool_size % solver->shuffle_base == 0) << "Can't perform pseudo shuffle on " << pool_size << " elements by a shuffle base of " << solver->shuffle_base << ". Try setting the episode size to a multiple of the shuffle base"; CUDA_CHECK(cudaSetDevice(device_id)); random.to_host(); CURAND_CHECK(curandGenerateUniformDouble(generator, random.device_ptr, kRandBatchSize)); auto &sample_pool = solver->sample_pools[solver->pool_id ^ 1]; std::vector<std::vector<int>> offsets(num_partition); for (auto &&partition_offsets : offsets) partition_offsets.resize(num_partition, start); std::vector<std::vector<std::pair<int, Index>>> head_chains(solver->random_walk_batch_size); std::vector<std::vector<std::pair<int, Index>>> tail_chains(solver->random_walk_batch_size); for (auto &&head_chain : head_chains) head_chain.resize(solver->random_walk_length + 1); for (auto &&tail_chain : tail_chains) tail_chain.resize(solver->random_walk_length + 1); std::vector<int> sample_lengths(solver->random_walk_batch_size); int num_complete = 0, rand_id = 0; while (num_complete < num_partition * num_partition) { for (int i = 0; i < solver->random_walk_batch_size; i++) { if (rand_id > kRandBatchSize - solver->random_walk_length * 2) { random.to_host(); CURAND_CHECK(curandGenerateUniformDouble(generator, random.device_ptr, kRandBatchSize)); rand_id = 0; } size_t edge_id = solver->edge_table.sample(random[rand_id++], random[rand_id++]); Index current = std::get<0>(solver->graph->edges[edge_id]); head_chains[i][0] = solver->head_locations[current]; tail_chains[i][0] = solver->tail_locations[current]; current = std::get<1>(solver->graph->edges[edge_id]); head_chains[i][1] = solver->head_locations[current]; tail_chains[i][1] = solver->tail_locations[current]; sample_lengths[i] = solver->random_walk_length; for (int j = 2; j <= solver->random_walk_length; j++) if (!solver->graph->vertex_edges[current].empty()) { Index neighbor_id = solver->vertex_edge_tables[current].sample( random[rand_id++], random[rand_id++]); current = std::get<0>(solver->graph->vertex_edges[current][neighbor_id]); head_chains[i][j] = solver->head_locations[current]; tail_chains[i][j] = solver->tail_locations[current]; } else { sample_lengths[i] = j - 1; break; } } for (int i = 0; i < solver->random_walk_batch_size; i++) { for (int j = 0; j < sample_lengths[i]; j++) { for (int k = 1; k <= solver->augmentation_step; k++) { if (j + k > sample_lengths[i]) break; int head_partition_id = head_chains[i][j].first; int tail_partition_id = tail_chains[i][j + k].first; int &offset = offsets[head_partition_id][tail_partition_id]; if (offset < end) { auto &pool = sample_pool[head_partition_id][tail_partition_id]; Index head_local_id = head_chains[i][j].second; Index tail_local_id = tail_chains[i][j + k].second; // pseudo shuffle int shuffled_offset = offset % solver->shuffle_base * (pool_size / solver->shuffle_base) + offset / solver->shuffle_base; pool[shuffled_offset] = std::make_tuple(head_local_id, tail_local_id); if (++offset == end) num_complete++; } } } } } } }; /** Training worker for graphs */ template<class _Solver> class GraphWorker : public WorkerMixin<_Solver> { public: typedef WorkerMixin<_Solver> Base; USING_WORKER_MIXIN(Base); using Base::Base; typedef GraphSolver<Solver::dim, Float, Index> GraphSolver; /** * Call the corresponding GPU kernel for training * (DeepWalk, LINE, node2vec) * (SGD, Momentum, AdaGrad, RMSprop, Adam) */ bool train_dispatch() override { using namespace gpu; GraphSolver *solver = reinterpret_cast<GraphSolver *>(this->solver); switch (num_moment) { case 0: { decltype(&graph::train<Vector, Index, DeepWalk, kSGD>) train = nullptr; if (solver->model == "DeepWalk") { if (optimizer.type == "SGD") train = &graph::train<Vector, Index, DeepWalk, kSGD>; } if (solver->model == "LINE") { if (optimizer.type == "SGD") train = &graph::train<Vector, Index, LINE, kSGD>; } if (solver->model == "node2vec") { if (optimizer.type == "SGD") train = &graph::train<Vector, Index, Node2Vec, kSGD>; } if (train) { train<<<kBlockPerGrid, kThreadPerBlock, 0, work_stream>>>( *embeddings[0], *embeddings[1], batch, negative_batch, loss, optimizer, solver->negative_weight ); return true; } } case 1: { decltype(&graph::train_1_moment<Vector, Index, DeepWalk, kMomentum>) train = nullptr; if (solver->model == "DeepWalk") { if (optimizer.type == "Momentum") train = &graph::train_1_moment<Vector, Index, DeepWalk, kMomentum>; if (optimizer.type == "AdaGrad") train = &graph::train_1_moment<Vector, Index, DeepWalk, kAdaGrad>; if (optimizer.type == "RMSprop") train = &graph::train_1_moment<Vector, Index, DeepWalk, kRMSprop>; } if (solver->model == "LINE") { if (optimizer.type == "Momentum") train = &graph::train_1_moment<Vector, Index, LINE, kMomentum>; if (optimizer.type == "AdaGrad") train = &graph::train_1_moment<Vector, Index, LINE, kAdaGrad>; if (optimizer.type == "RMSprop") train = &graph::train_1_moment<Vector, Index, LINE, kRMSprop>; } if (solver->model == "node2vec") { if (optimizer.type == "Momentum") train = &graph::train_1_moment<Vector, Index, Node2Vec, kMomentum>; if (optimizer.type == "AdaGrad") train = &graph::train_1_moment<Vector, Index, Node2Vec, kAdaGrad>; if (optimizer.type == "RMSprop") train = &graph::train_1_moment<Vector, Index, Node2Vec, kRMSprop>; } if (train) { train<<<kBlockPerGrid, kThreadPerBlock, 0, work_stream>>>( *embeddings[0], *embeddings[1], (*moments[0])[0], (*moments[1])[0], batch, negative_batch, loss, optimizer, solver->negative_weight ); return true; } } case 2: { decltype(&graph::train_2_moment<Vector, Index, DeepWalk, kAdam>) train = nullptr; if (solver->model == "DeepWalk") { if (optimizer.type == "Adam") train = &graph::train_2_moment<Vector, Index, DeepWalk, kAdam>; } if (solver->model == "LINE") { if (optimizer.type == "Adam") train = &graph::train_2_moment<Vector, Index, LINE, kAdam>; } if (solver->model == "node2vec") { if (optimizer.type == "Adam") train = &graph::train_2_moment<Vector, Index, Node2Vec, kAdam>; } if (train) { train<<<kBlockPerGrid, kThreadPerBlock, 0, work_stream>>>( *embeddings[0], *embeddings[1], (*moments[0])[0], (*moments[1])[0], (*moments[0])[1], (*moments[1])[1], batch, negative_batch, loss, optimizer, solver->negative_weight ); return true; } } } return false; } /** * Call the corresponding GPU kernel for prediction * (DeepWalk, LINE, node2vec) */ bool predict_dispatch() override { using namespace gpu; GraphSolver *solver = reinterpret_cast<GraphSolver *>(this->solver); decltype(&graph::predict<Vector, Index, DeepWalk>) predict = nullptr; if (solver->model == "DeepWalk") predict = &graph::predict<Vector, Index, DeepWalk>; if (solver->model == "LINE") predict = &graph::predict<Vector, Index, LINE>; if (solver->model == "node2vec") predict = &graph::predict<Vector, Index, Node2Vec>; if (predict) { predict<<<kBlockPerGrid, kThreadPerBlock, 0, work_stream>>> (*embeddings[0], *embeddings[1], batch, logits); return true; } return false; } }; /** * @brief Node embedding solver * @tparam _dim dimension of embeddings * @tparam _Float floating type of parameters * @tparam _Index integral type of node indexes */ template<size_t _dim, class _Float = float, class _Index = size_t> class GraphSolver : public SolverMixin<_dim, _Float, _Index, Graph, GraphSampler, GraphWorker> { public: typedef SolverMixin<_dim, _Float, _Index, Graph, GraphSampler, GraphWorker> Base; USING_SOLVER_MIXIN(Base); using Base::Base; int augmentation_step, random_walk_length, random_walk_batch_size, shuffle_base; float p, q; std::shared_ptr<std::vector<Vector>> vertex_embeddings, context_embeddings; std::vector<AliasTable<Float, Index>> vertex_edge_tables; std::vector<AliasTable<Float, Index>> edge_edge_tables; GraphSolver(std::vector<int> device_ids = {}, int num_sampler_per_worker = kAuto, size_t gpu_memory_limit = kAuto): Base(device_ids, num_sampler_per_worker, gpu_memory_limit) {} /** * @brief Return the protocols of embeddings * * Vertex / context embeddings are binded to head / tail partitions respectively. * Both embeddings are updated in place. */ inline std::vector<Protocol> get_protocols() const override { return {kHeadPartition | kInPlace, kTailPartition | kInPlace}; }; /** Return the protocol of negative sampling */ inline Protocol get_sampler_protocol() const override { return kTailPartition; } /** * @brief Return the shapes of embeddings * * Shapes of both vertex and context embeddings can be inferred from the graph. */ inline std::vector<Index> get_shapes() const override { return {kAuto, kAuto}; } /** Return all available models of the solver */ inline std::set<std::string> get_available_models() const override { return {"DeepWalk", "LINE", "node2vec"}; } /** Return the default optimizer type and its hyperparameters */ inline Optimizer get_default_optimizer() const override { return SGD(0.025, 5e-3); } /** Build alias reference for embeddings */ inline void build_alias() override { vertex_embeddings = embeddings[0]; context_embeddings = embeddings[1]; } /** Build vertex edge tables. This function can be parallelized. */ void build_vertex_edge(Index start, Index end) { for (Index i = start; i < end; i++) { std::vector<Float> vertex_edge_weights; for (auto &&edge : graph->vertex_edges[i]) vertex_edge_weights.push_back(std::get<1>(edge)); if (!vertex_edge_weights.empty()) vertex_edge_tables[i].build(vertex_edge_weights); } } /** Build edge edge tables. This function can be parallelized. */ void build_edge_edge(size_t start, size_t end, const std::vector<std::unordered_set<Index>> &neighbors) { for (size_t i = start; i < end; i++) { Index u = std::get<0>(graph->edges[i]); Index v = std::get<1>(graph->edges[i]); std::vector<Float> edge_edge_weights; for (auto &&edge : graph->vertex_edges[v]) { Index x = std::get<0>(edge); Float w = std::get<1>(edge); if (x == u) edge_edge_weights.push_back(w / p); else if (neighbors[x].find(u) == neighbors[x].end()) edge_edge_weights.push_back(w / q); else edge_edge_weights.push_back(w); } if (!edge_edge_weights.empty()) edge_edge_tables[i].build(edge_edge_weights); CHECK(edge_edge_tables[i].count == graph->vertex_edges[v].size()) << "alias table count = " << edge_edge_tables[i].count << ", vertex size = " << graph->vertex_edges[v].size(); } } /** Determine and prepare the sampling function */ SampleFunction get_sample_function() override { if (augmentation_step == 1) return Base::get_sample_function(); graph->flatten(); edge_table.build(graph->edge_weights); std::vector<std::thread> build_threads(num_thread); if (model == "DeepWalk" || model == "LINE") { for (Index i = 0; i < num_vertex; i++) vertex_edge_tables.push_back(AliasTable<Float, Index>(-1)); Index work_load = (num_vertex + num_thread - 1) / num_thread; for (int i = 0; i < num_thread; i++) build_threads[i] = std::thread(&GraphSolver::build_vertex_edge, this, work_load * i, std::min(work_load * (i + 1), num_vertex)); for (auto &&thread : build_threads) thread.join(); return &Base::Sampler::sample_random_walk; } if (model == "node2vec") { std::vector<std::unordered_set<Index>> neighbors; for (auto &&vertex_edge : graph->vertex_edges) { std::unordered_set<Index> neighbor; for (auto &&edge : vertex_edge) neighbor.insert(std::get<0>(edge)); neighbors.push_back(neighbor); } size_t num_directed_edge = graph->edges.size(); for (size_t i = 0; i < num_directed_edge; i++) edge_edge_tables.push_back(AliasTable<Float, Index>(-1)); size_t work_load = (num_directed_edge + num_thread - 1) / num_thread; for (int i = 0; i < num_thread; i++) build_threads[i] = std::thread(&GraphSolver::build_edge_edge, this, work_load * i, std::min(work_load * (i + 1), num_directed_edge), neighbors); for (auto &&thread : build_threads) thread.join(); return &Base::Sampler::sample_biased_random_walk; } return nullptr; } /** Initialize the embeddings */ void init_embeddings() override { std::uniform_real_distribution<Float> init(-0.5 / dim, 0.5 / dim); for (auto &&embedding : *vertex_embeddings) for (int i = 0; i < dim; i++) embedding[i] = init(seed); for (auto &&embedding : *context_embeddings) embedding = 0; } inline std::string name() const override { std::stringstream ss; ss << "GraphSolver<" << dim << ", " << pretty::type2name<Float>() << ", " << pretty::type2name<Index>() << ">"; return ss.str(); } inline std::string sampling_info() const override { std::stringstream ss; if (model == "LINE") ss << "augmentation step: " << augmentation_step << ", shuffle base: " << shuffle_base << std::endl; if (model == "DeepWalk") ss << "augmentation step: " << augmentation_step << std::endl; if (model == "node2vec") ss << "augmentation step: " << augmentation_step << ", p: " << p << ", q: " << q << std::endl; ss << "random walk length: " << random_walk_length << std::endl; ss << "random walk batch size: " << random_walk_batch_size << std::endl; ss << "#negative: " << num_negative << ", negative sample exponent: " << negative_sample_exponent; return ss.str(); } /** * @brief Train node embeddings * @param _model "DeepWalk", "LINE" or "node2vec" * @param _num_epoch number of epochs, i.e. #positive edges / |E| * @param _resume resume training from learned embeddings or not * @param _augmentation_step node pairs with distance <= augmentation_step are considered as positive samples * @param _random_walk_length length of each random walk * @param _random_walk_batch_size batch size of random walks in samplers * @param _shuffle_base base for pseudo shuffle * @param _p return parameter (for node2vec) * @param _q in-out parameter (for node2vec) * @param _positive_reuse times of reusing positive samples * @param _negative_sample_exponent exponent of degrees in negative sampling * @param _negative_weight weight for each negative sample * @param _log_frequency log every log_frequency batches */ void train(const std::string &_model = "LINE", int _num_epoch = 2000, bool _resume = false, int _augmentation_step = kAuto, int _random_walk_length = 40, int _random_walk_batch_size = 100, int _shuffle_base = kAuto, float _p = 1, float _q = 1, int _positive_reuse = 1, float _negative_sample_exponent = 0.75, float _negative_weight = 5, int _log_frequency = 1000) { augmentation_step = _augmentation_step; random_walk_length = _random_walk_length; random_walk_batch_size = _random_walk_batch_size; shuffle_base = _shuffle_base; p = _p; q = _q; if (augmentation_step == kAuto) augmentation_step = log(kExpectedDegree) / log(float(num_edge) / num_vertex); if (shuffle_base == kAuto) shuffle_base = augmentation_step; if (model == "DeepWalk" || model == "node2vec") shuffle_base = 1; CHECK(augmentation_step >= 1) << "`augmentation_step` should be a positive integer"; CHECK(augmentation_step <= random_walk_length) << "`random_walk_length` should be no less than `augmentation_step`"; Base::train(_model, _num_epoch, _resume, random_walk_length * random_walk_batch_size, _positive_reuse, _negative_sample_exponent, _negative_weight, _log_frequency); } /** Save vertex embeddings in word2vec format */ void save_embeddings(const char *file_name) const { FILE *fout = fopen(file_name, "w"); fprintf(fout, "%llu %llu\n", static_cast<unsigned long long>(num_vertex), static_cast<unsigned long long>(dim)); for (Index i = 0; i < num_vertex; i++) { fprintf(fout, "%s ", graph->id2name[i].c_str()); fwrite((*vertex_embeddings)[i].data, sizeof(Float), dim, fout); fprintf(fout, "\n"); } fclose(fout); } /** Free CPU and GPU memory, except the embeddings on CPU */ void clear() override { Base::clear(); decltype(vertex_edge_tables)().swap(vertex_edge_tables); decltype(edge_edge_tables)().swap(edge_edge_tables); } }; } // namespace graphvite
the_stack
#include "utils/utils.cuh" #include "utils/intrinsics.cuh" #include "kernel_libs/kernel_fusion.cuh" #include "data_structures/graph.cuh" #include "data_structures/active_set.cuh" #include "data_structures/functor.cuh" #include "abstraction/config.cuh" template<ASFmt fmt, QueueMode M, typename G, typename F> __global__ void __expand_VC_CM_fused(active_set_t as, G g, F f, config_t conf){ const int* __restrict__ strict_adj_list = g.dg_adj_list; __shared__ int tmp[3*THDNUM_EXPAND]; const int assize = ASProxy<fmt,M>::get_size_hard(as); const int STRIDE = blockDim.x*gridDim.x; const int gtid = threadIdx.x + blockIdx.x*blockDim.x; if(assize==0){if(gtid==0) as.halt_device();return;} const int cosize = blockDim.x; const int phase = threadIdx.x; const int blk_id = 0; const int OFFSET_blk = 3*cosize*blk_id; const int OFFSET_start_pos = OFFSET_blk + cosize; const int OFFSET_odegree = OFFSET_blk + 2*cosize; //const int assize_align = (assize&(cosize-1))?(((assize>>8)+1)<<8):assize; const int assize_align = alignment(assize, cosize); Status want = conf.want(); for(int idx=gtid; idx<assize_align; idx+=STRIDE){ __syncthreads(); // step 1: load vertexs into share memory; int v; if(idx<assize) v = ASProxy<fmt,M>::fetch(as, idx, want); else v = -1; if(v >= 0){ tmp[OFFSET_blk+phase] = v; tmp[OFFSET_start_pos+phase] = tex1Dfetch<int>(g.dt_start_pos, v); tmp[OFFSET_odegree+phase] = tex1Dfetch<int>(g.dt_odegree, v); }else{ tmp[OFFSET_blk+phase] = -1; tmp[OFFSET_odegree+phase] = 0; } __syncthreads(); //step 2: get sum of edges for these cosize vertexs and scan odegree; int nedges_blk=0; int offset=1; for(int d=cosize>>1; d>0; d>>=1){ __syncthreads(); if(phase<d){ int ai = offset*(2*phase+1)-1; int bi = offset*(2*phase+2)-1; tmp[OFFSET_odegree+bi] += tmp[OFFSET_odegree+ai]; } offset<<=1; } __syncthreads(); nedges_blk = tmp[OFFSET_odegree+cosize-1]; __syncthreads(); if(!phase) tmp[OFFSET_odegree+cosize-1]=0; __syncthreads(); for(int d=1; d<cosize; d<<=1){ offset >>=1; __syncthreads(); if(phase<d){ int ai = offset*(2*phase+1)-1; int bi = offset*(2*phase+2)-1; int t = tmp[OFFSET_odegree + ai]; tmp[OFFSET_odegree+ai] = tmp[OFFSET_odegree+bi]; tmp[OFFSET_odegree+bi] += t; } } __syncthreads(); int full_tier = assize_align-cosize; int width = idx<(full_tier)?cosize:(assize-full_tier); //step 3: process cosize edges in parallel for(int i=phase; i<nedges_blk; i+=cosize){ int id = __upper_bound(&tmp[OFFSET_odegree], width, i)-1; if(tmp[OFFSET_blk+id] < 0) continue; int ei = tmp[OFFSET_start_pos+id] + i-tmp[OFFSET_odegree+id]; int u = __ldg(strict_adj_list+ei); int v = tmp[OFFSET_blk+id]; auto vdata = f.emit(v, g.fetch_edata(ei), g); bool toprocess = true; // check 1: if idempotent, we can prune the redundant update (if has, that's also OK) if(toprocess && conf.pruning()) toprocess = as.bitmap.mark_duplicate_lite(u); // check 2: if not push TO ALL, the target vertex must be Inactive // cond is provided by users to indicate whether u should accept the update. if(toprocess && !conf.conf_toall) toprocess = f.cond(u, vdata, g); // if u pass all the checks, do the computation in the functor if(toprocess){ //f.filter(u, g);// useless toprocess = f.compAtomic(f.wa_of(u), vdata, g); } // check 3: enqueue the u only once. (if duplicate, wrong answer) if(toprocess && !conf.pruning()) toprocess = as.bitmap.mark_duplicate_atomic(u); // if u is updated successfully, write u to the queue directly // atomic mode. if(toprocess){ Qproxy<M>::push(as.queue, u); } }//for }//for } template<ASFmt fmt, QueueMode M, typename G, typename F> __global__ void __expand_VC_CM(active_set_t as, G g, F f, config_t conf){ const int* __restrict__ strict_adj_list = g.dg_adj_list; __shared__ int tmp[3*THDNUM_EXPAND]; const int assize = ASProxy<fmt,M>::get_size(as); // bitmap or queue? const int STRIDE = blockDim.x*gridDim.x; const int gtid = threadIdx.x + blockIdx.x*blockDim.x; //if(assize==0) { if(gtid==0) as.halt_device();return; } const int cosize = blockDim.x; const int phase = threadIdx.x; const int blk_id = 0; const int OFFSET_blk = 3*cosize*blk_id; const int OFFSET_start_pos = OFFSET_blk + cosize; const int OFFSET_odegree = OFFSET_blk + 2*cosize; //const int assize_align = (assize&(cosize-1))?(((assize>>10)+1)<<10):assize; const int assize_align = alignment(assize, cosize); Status want = conf.want(); for(int idx=gtid; idx<assize_align; idx+=STRIDE){ __syncthreads(); // step 1: load vertexs into share memory; int v; if(idx<assize) v = ASProxy<fmt,M>::fetch(as, idx, want); else v = -1; if(v >= 0){ tmp[OFFSET_blk+phase] = v; tmp[OFFSET_start_pos+phase] = tex1Dfetch<int>(g.dt_start_pos, v); tmp[OFFSET_odegree+phase] = tex1Dfetch<int>(g.dt_odegree, v); }else{ tmp[OFFSET_blk+phase] = -1; tmp[OFFSET_odegree+phase] = 0; } __syncthreads(); //step 2: get sum of edges for these cosize vertexs and scan odegree; int nedges_blk=0; int offset=1; for(int d=cosize>>1; d>0; d>>=1){ __syncthreads(); if(phase<d){ int ai = offset*(2*phase+1)-1; int bi = offset*(2*phase+2)-1; tmp[OFFSET_odegree+bi] += tmp[OFFSET_odegree+ai]; } offset<<=1; } __syncthreads(); nedges_blk = tmp[OFFSET_odegree+cosize-1]; __syncthreads(); if(!phase) tmp[OFFSET_odegree+cosize-1]=0; __syncthreads(); for(int d=1; d<cosize; d<<=1){ offset >>=1; __syncthreads(); if(phase<d){ int ai = offset*(2*phase+1)-1; int bi = offset*(2*phase+2)-1; int t = tmp[OFFSET_odegree + ai]; tmp[OFFSET_odegree+ai] = tmp[OFFSET_odegree+bi]; tmp[OFFSET_odegree+bi] += t; } } __syncthreads(); int full_tier = assize_align-cosize; int width = idx<(full_tier)?cosize:(assize-full_tier); //step 3: process cosize edges in parallel for(int i=phase; i<nedges_blk; i+=cosize){ int id = __upper_bound(&tmp[OFFSET_odegree], width, i)-1; if(tmp[OFFSET_blk+id] < 0) continue; int ei = tmp[OFFSET_start_pos+id] + i-tmp[OFFSET_odegree+id]; int u = __ldg(strict_adj_list+ei); bool toprocess = true; // check 1: if idempotent, we can prune the redundant update (if has, that's also OK) if(toprocess && conf.pruning()) toprocess = as.bitmap.mark_duplicate_lite(u); // check 2: if not push TO ALL, the target vertex must be Inactive if(toprocess && !conf.conf_toall) toprocess = as.bitmap.is_inactive(u); // if u pass all the checks, do the computation in the functor if(toprocess){ int v = tmp[OFFSET_blk+id]; auto vdata = f.emit(v, g.fetch_edata(ei), g); f.compAtomic(f.wa_of(u), vdata, g); } }//for }//for } template<ASFmt fmt, QueueMode M, typename G, typename F> __global__ void __rexpand_VC_CM(active_set_t as, G g, F f, config_t conf){ const int* __restrict__ strict_adj_list = g.dg_adj_list; __shared__ int tmp[3*THDNUM_EXPAND]; const int assize = ASProxy<fmt,M>::get_size(as); // bitmap or queue? const int STRIDE = blockDim.x*gridDim.x; const int gtid = threadIdx.x + blockIdx.x*blockDim.x; if(assize==0){if(gtid==0) as.halt_device();return;} const int cosize = blockDim.x; const int phase = threadIdx.x; const int blk_id = 0; const int OFFSET_blk = 3*cosize*blk_id; const int OFFSET_start_pos = OFFSET_blk + cosize; const int OFFSET_odegree = OFFSET_blk + 2*cosize; //const int assize_align = (assize&(cosize-1))?(((assize>>10)+1)<<10):assize; const int assize_align = alignment(assize, cosize); Status want = conf.want(); for(int idx=gtid; idx<assize_align; idx+=STRIDE){ __syncthreads(); // step 1: load vertexs into share memory; int v; if(idx<assize) v = ASProxy<fmt,M>::fetch(as, idx, want); else v = -1; if(v >= 0){ tmp[OFFSET_blk+phase] = v; tmp[OFFSET_start_pos+phase] = tex1Dfetch<int>(g.dt_start_pos, v); tmp[OFFSET_odegree+phase] = tex1Dfetch<int>(g.dt_odegree, v); }else{ tmp[OFFSET_blk+phase] = -1; tmp[OFFSET_odegree+phase] = 0; } __syncthreads(); //step 2: get sum of edges for these cosize vertexs and scan odegree; int nedges_blk=0; int offset=1; for(int d=cosize>>1; d>0; d>>=1){ __syncthreads(); if(phase<d){ int ai = offset*(2*phase+1)-1; int bi = offset*(2*phase+2)-1; tmp[OFFSET_odegree+bi] += tmp[OFFSET_odegree+ai]; } offset<<=1; } __syncthreads(); nedges_blk = tmp[OFFSET_odegree+cosize-1]; __syncthreads(); if(!phase) tmp[OFFSET_odegree+cosize-1]=0; __syncthreads(); for(int d=1; d<cosize; d<<=1){ offset >>=1; __syncthreads(); if(phase<d){ int ai = offset*(2*phase+1)-1; int bi = offset*(2*phase+2)-1; int t = tmp[OFFSET_odegree + ai]; tmp[OFFSET_odegree+ai] = tmp[OFFSET_odegree+bi]; tmp[OFFSET_odegree+bi] += t; } } __syncthreads(); int full_tier = assize_align-cosize; int width = idx<(full_tier)?cosize:(assize-full_tier); //step 3: process cosize edges in parallel for(int i=phase; i<nedges_blk; i+=cosize){ int id = __upper_bound(&tmp[OFFSET_odegree], width, i)-1; if(tmp[OFFSET_blk+id] < 0) continue; int ei = tmp[OFFSET_start_pos+id] + i-tmp[OFFSET_odegree+id]; int u = __ldg(strict_adj_list+ei); bool toprocess = true; // data source must from Active vertex or the conf_fromall is enabled if(toprocess && !conf.conf_fromall) toprocess = as.bitmap.is_active(u); // if u pass all the checks, do the computation in the functor if(toprocess){ int v = tmp[OFFSET_blk+id]; auto vdata = f.emit(u, g.fetch_edata(ei), g); f.compAtomic(f.wa_of(v), vdata, g); } }//for }//for } template<> struct ExpandProxy<VC,CM,Push>{ template<typename E, typename F> static void expand(active_set_t as, device_graph_t<CSR,E> g, F f, config_t conf){ if(conf.conf_fuse_inspect) Launch_Expand_VC(CM_fused, as, g, f, conf) else Launch_Expand_VC(CM, as, g, f, conf); } template<typename E, typename F> static void expand(active_set_t as, device_graph_t<COO,E> g, F f, config_t conf){CudaCheckError();} }; template<> struct ExpandProxy<VC,CM,Pull>{ template<typename E, typename F> static void expand(active_set_t as, device_graph_t<CSR,E> g, F f, config_t conf){ Launch_RExpand_VC(CM, as, g, f, conf); } template<typename E, typename F> static void expand(active_set_t as, device_graph_t<COO,E> g, F f, config_t conf){} }; #endif
the_stack
* Test of WarpScan utilities ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <stdio.h> #include <typeinfo> #include <cub/warp/warp_scan.cuh> #include <cub/util_allocator.cuh> #include "test_util.h" using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- static const int NUM_WARPS = 2; bool g_verbose = false; int g_repeat = 0; CachingDeviceAllocator g_allocator(true); /** * Primitive variant to test */ enum TestMode { BASIC, AGGREGATE, }; /** * \brief WrapperFunctor (for precluding test-specialized dispatch to *Sum variants) */ template<typename OpT> struct WrapperFunctor { OpT op; WrapperFunctor(OpT op) : op(op) {} template <typename T> __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const { return op(a, b); } }; //--------------------------------------------------------------------- // Test kernels //--------------------------------------------------------------------- /// Exclusive scan basic template <typename WarpScanT, typename T, typename ScanOpT, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, T &initial_value, ScanOpT &scan_op, T &aggregate, Int2Type<BASIC> test_mode, IsPrimitiveT is_primitive) { // Test basic warp scan warp_scan.ExclusiveScan(data, data, initial_value, scan_op); } /// Exclusive scan aggregate template < typename WarpScanT, typename T, typename ScanOpT, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, T &initial_value, ScanOpT &scan_op, T &aggregate, Int2Type<AGGREGATE> test_mode, IsPrimitiveT is_primitive) { // Test with cumulative aggregate warp_scan.ExclusiveScan(data, data, initial_value, scan_op, aggregate); } /// Exclusive sum basic template < typename WarpScanT, typename T> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, T &initial_value, Sum &scan_op, T &aggregate, Int2Type<BASIC> test_mode, Int2Type<true> is_primitive) { // Test basic warp scan warp_scan.ExclusiveSum(data, data); } /// Exclusive sum aggregate template < typename WarpScanT, typename T> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, T &initial_value, Sum &scan_op, T &aggregate, Int2Type<AGGREGATE> test_mode, Int2Type<true> is_primitive) { // Test with cumulative aggregate warp_scan.ExclusiveSum(data, data, aggregate); } /// Inclusive scan basic template < typename WarpScanT, typename T, typename ScanOpT, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, NullType &initial_value, ScanOpT &scan_op, T &aggregate, Int2Type<BASIC> test_mode, IsPrimitiveT is_primitive) { // Test basic warp scan warp_scan.InclusiveScan(data, data, scan_op); } /// Inclusive scan aggregate template < typename WarpScanT, typename T, typename ScanOpT, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, NullType &initial_value, ScanOpT &scan_op, T &aggregate, Int2Type<AGGREGATE> test_mode, IsPrimitiveT is_primitive) { // Test with cumulative aggregate warp_scan.InclusiveScan(data, data, scan_op, aggregate); } /// Inclusive sum basic template < typename WarpScanT, typename T, typename InitialValueT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, NullType &initial_value, Sum &scan_op, T &aggregate, Int2Type<BASIC> test_mode, Int2Type<true> is_primitive) { // Test basic warp scan warp_scan.InclusiveSum(data, data); } /// Inclusive sum aggregate template < typename WarpScanT, typename T, typename InitialValueT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, NullType &initial_value, Sum &scan_op, T &aggregate, Int2Type<AGGREGATE> test_mode, Int2Type<true> is_primitive) { // Test with cumulative aggregate warp_scan.InclusiveSum(data, data, aggregate); } /** * WarpScan test kernel */ template < int LOGICAL_WARP_THREADS, TestMode TEST_MODE, typename T, typename ScanOpT, typename InitialValueT> __global__ void WarpScanKernel( T *d_in, T *d_out, T *d_aggregate, ScanOpT scan_op, InitialValueT initial_value, clock_t *d_elapsed) { // Cooperative warp-scan utility type (1 warp) typedef WarpScan<T, LOGICAL_WARP_THREADS> WarpScanT; // Allocate temp storage in shared memory __shared__ typename WarpScanT::TempStorage temp_storage[NUM_WARPS]; // Get warp index int warp_id = threadIdx.x / LOGICAL_WARP_THREADS; // Per-thread tile data T data = d_in[threadIdx.x]; // Start cycle timer __threadfence_block(); // workaround to prevent clock hoisting clock_t start = clock(); __threadfence_block(); // workaround to prevent clock hoisting T aggregate; // Test scan WarpScanT warp_scan(temp_storage[warp_id]); DeviceTest( warp_scan, data, initial_value, scan_op, aggregate, Int2Type<TEST_MODE>(), Int2Type<Traits<T>::PRIMITIVE>()); // Stop cycle timer __threadfence_block(); // workaround to prevent clock hoisting clock_t stop = clock(); __threadfence_block(); // workaround to prevent clock hoisting // Store data d_out[threadIdx.x] = data; if (TEST_MODE != BASIC) { // Store aggregate d_aggregate[threadIdx.x] = aggregate; } // Store time if (threadIdx.x == 0) { *d_elapsed = (start > stop) ? start - stop : stop - start; } } //--------------------------------------------------------------------- // Host utility subroutines //--------------------------------------------------------------------- /** * Initialize exclusive-scan problem (and solution) */ template < typename T, typename ScanOpT> void Initialize( GenMode gen_mode, T *h_in, T *h_reference, int logical_warp_items, ScanOpT scan_op, T initial_value, T warp_aggregates[NUM_WARPS]) { for (int w = 0; w < NUM_WARPS; ++w) { int base_idx = (w * logical_warp_items); int i = base_idx; InitValue(gen_mode, h_in[i], i); T warp_aggregate = h_in[i]; h_reference[i] = initial_value; T inclusive = scan_op(initial_value, h_in[i]); for (i = i + 1; i < base_idx + logical_warp_items; ++i) { InitValue(gen_mode, h_in[i], i); h_reference[i] = inclusive; inclusive = scan_op(inclusive, h_in[i]); warp_aggregate = scan_op(warp_aggregate, h_in[i]); } warp_aggregates[w] = warp_aggregate; } } /** * Initialize inclusive-scan problem (and solution) */ template < typename T, typename ScanOpT> void Initialize( GenMode gen_mode, T *h_in, T *h_reference, int logical_warp_items, ScanOpT scan_op, NullType, T warp_aggregates[NUM_WARPS]) { for (int w = 0; w < NUM_WARPS; ++w) { int base_idx = (w * logical_warp_items); int i = base_idx; InitValue(gen_mode, h_in[i], i); T warp_aggregate = h_in[i]; T inclusive = h_in[i]; h_reference[i] = inclusive; for (i = i + 1; i < base_idx + logical_warp_items; ++i) { InitValue(gen_mode, h_in[i], i); inclusive = scan_op(inclusive, h_in[i]); warp_aggregate = scan_op(warp_aggregate, h_in[i]); h_reference[i] = inclusive; } warp_aggregates[w] = warp_aggregate; } } /** * Test warp scan */ template < int LOGICAL_WARP_THREADS, TestMode TEST_MODE, typename T, typename ScanOpT, typename InitialValueT> // NullType implies inclusive-scan, otherwise inclusive scan void Test( GenMode gen_mode, ScanOpT scan_op, InitialValueT initial_value) { enum { TOTAL_ITEMS = LOGICAL_WARP_THREADS * NUM_WARPS, }; // Allocate host arrays T *h_in = new T[TOTAL_ITEMS]; T *h_reference = new T[TOTAL_ITEMS]; T *h_aggregate = new T[TOTAL_ITEMS]; // Initialize problem T aggregates[NUM_WARPS]; Initialize( gen_mode, h_in, h_reference, LOGICAL_WARP_THREADS, scan_op, initial_value, aggregates); if (g_verbose) { printf("Input: \n"); DisplayResults(h_in, TOTAL_ITEMS); printf("\n"); } for (int w = 0; w < NUM_WARPS; ++w) { for (int i = 0; i < LOGICAL_WARP_THREADS; ++i) { h_aggregate[(w * LOGICAL_WARP_THREADS) + i] = aggregates[w]; } } // Initialize/clear device arrays T *d_in = NULL; T *d_out = NULL; T *d_aggregate = NULL; clock_t *d_elapsed = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * TOTAL_ITEMS)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * (TOTAL_ITEMS + 1))); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_aggregate, sizeof(T) * TOTAL_ITEMS)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t))); CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * TOTAL_ITEMS, cudaMemcpyHostToDevice)); CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * (TOTAL_ITEMS + 1))); CubDebugExit(cudaMemset(d_aggregate, 0, sizeof(T) * TOTAL_ITEMS)); // Run kernel printf("Test-mode %d (%s), gen-mode %d (%s), %s warpscan, %d warp threads, %s (%d bytes) elements:\n", TEST_MODE, typeid(TEST_MODE).name(), gen_mode, typeid(gen_mode).name(), (Equals<InitialValueT, NullType>::VALUE) ? "Inclusive" : "Exclusive", LOGICAL_WARP_THREADS, typeid(T).name(), (int) sizeof(T)); fflush(stdout); // Run aggregate/prefix kernel WarpScanKernel<LOGICAL_WARP_THREADS, TEST_MODE><<<1, TOTAL_ITEMS>>>( d_in, d_out, d_aggregate, scan_op, initial_value, d_elapsed); printf("\tElapsed clocks: "); DisplayDeviceResults(d_elapsed, 1); CubDebugExit(cudaPeekAtLastError()); CubDebugExit(cudaDeviceSynchronize()); // Copy out and display results printf("\tScan results: "); int compare = CompareDeviceResults(h_reference, d_out, TOTAL_ITEMS, g_verbose, g_verbose); printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); // Copy out and display aggregate if (TEST_MODE == AGGREGATE) { printf("\tScan aggregate: "); compare = CompareDeviceResults(h_aggregate, d_aggregate, TOTAL_ITEMS, g_verbose, g_verbose); printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); } // Cleanup if (h_in) delete[] h_in; if (h_reference) delete[] h_reference; if (h_aggregate) delete[] h_aggregate; if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); if (d_aggregate) CubDebugExit(g_allocator.DeviceFree(d_aggregate)); if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed)); } /** * Run battery of tests for different primitive variants */ template < int LOGICAL_WARP_THREADS, typename ScanOpT, typename T> void Test( GenMode gen_mode, ScanOpT scan_op, T initial_value) { // Exclusive Test<LOGICAL_WARP_THREADS, BASIC, T>(gen_mode, scan_op, T()); Test<LOGICAL_WARP_THREADS, AGGREGATE, T>(gen_mode, scan_op, T()); // Exclusive (non-specialized, so we can use initial-value) Test<LOGICAL_WARP_THREADS, BASIC, T>(gen_mode, WrapperFunctor<ScanOpT>(scan_op), initial_value); Test<LOGICAL_WARP_THREADS, AGGREGATE, T>(gen_mode, WrapperFunctor<ScanOpT>(scan_op), initial_value); // Inclusive Test<LOGICAL_WARP_THREADS, BASIC, T>(gen_mode, scan_op, NullType()); Test<LOGICAL_WARP_THREADS, AGGREGATE, T>(gen_mode, scan_op, NullType()); } /** * Run battery of tests for different data types and scan ops */ template <int LOGICAL_WARP_THREADS> void Test(GenMode gen_mode) { // Get device ordinal int device_ordinal; CubDebugExit(cudaGetDevice(&device_ordinal)); // Get ptx version int ptx_version; CubDebugExit(PtxVersion(ptx_version)); // primitive Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (char) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (short) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (int) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (long) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (long long) 99); if (gen_mode != RANDOM) { // Only test numerically stable inputs Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (float) 99); if (ptx_version > 100) Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (double) 99); } // primitive (alternative scan op) Test<LOGICAL_WARP_THREADS>(gen_mode, Max(), (unsigned char) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Max(), (unsigned short) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Max(), (unsigned int) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Max(), (unsigned long long) 99); // vec-2 Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_uchar2(17, 21)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_ushort2(17, 21)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_uint2(17, 21)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_ulong2(17, 21)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_ulonglong2(17, 21)); if (gen_mode != RANDOM) { // Only test numerically stable inputs Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_float2(17, 21)); if (ptx_version > 100) Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_double2(17, 21)); } // vec-4 Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_char4(17, 21, 32, 85)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_short4(17, 21, 32, 85)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_int4(17, 21, 32, 85)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_long4(17, 21, 32, 85)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_longlong4(17, 21, 32, 85)); if (gen_mode != RANDOM) { // Only test numerically stable inputs Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_float4(17, 21, 32, 85)); if (ptx_version > 100) Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_double4(17, 21, 32, 85)); } // complex Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), TestFoo::MakeTestFoo(17, 21, 32, 85)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), TestBar(17, 21)); } /** * Run battery of tests for different problem generation options */ template <int LOGICAL_WARP_THREADS> void Test() { Test<LOGICAL_WARP_THREADS>(UNIFORM); Test<LOGICAL_WARP_THREADS>(INTEGER_SEED); Test<LOGICAL_WARP_THREADS>(RANDOM); } /** * Main */ int main(int argc, char** argv) { // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); args.GetCmdLineArgument("repeat", g_repeat); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--device=<device-id>] " "[--repeat=<repetitions of entire test suite>]" "[--v] " "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); #ifdef QUICK_TEST // Compile/run quick tests Test<32, AGGREGATE, int>(UNIFORM, Sum(), (int) 0); Test<32, AGGREGATE, float>(UNIFORM, Sum(), (float) 0); Test<32, AGGREGATE, long long>(UNIFORM, Sum(), (long long) 0); Test<32, AGGREGATE, double>(UNIFORM, Sum(), (double) 0); typedef KeyValuePair<int, float> T; cub::Sum sum_op; Test<32, AGGREGATE, T>(UNIFORM, ReduceBySegmentOp<cub::Sum>(sum_op), T()); #else // Compile/run thorough tests for (int i = 0; i <= g_repeat; ++i) { // Test logical warp sizes Test<32>(); Test<16>(); Test<9>(); Test<2>(); } #endif return 0; }
the_stack
namespace xlib { namespace device_reduce { const unsigned BLOCK_SIZE = 32; namespace kernel { template<unsigned UNROLL_STEPS = 1, typename R, typename T, typename ThreadOp, typename WarpOp, typename SeletOp> __global__ void reduce_arg(T* __restrict__ d_in, int num_items, const ThreadOp& thread_op, const WarpOp& warp_op, const SeletOp& select_op, long long unsigned* __restrict__ d_out, R init_value) { const unsigned RATIO = sizeof(int4) / sizeof(T); const unsigned THREAD_ITEMS = UNROLL_STEPS * 2 * RATIO; int idx = blockIdx.x * blockDim.x + threadIdx.x; int approx_size = xlib::lower_approx<WARP_SIZE>(num_items / THREAD_ITEMS); int stride = gridDim.x * blockDim.x; auto d_tmp = d_in + idx * RATIO; for (int i = idx; i < approx_size; i += stride * THREAD_ITEMS) { T storage[THREAD_ITEMS]; int indices[THREAD_ITEMS]; #pragma unroll for (int J = 0; J < UNROLL_STEPS; J++) { #pragma unroll for (int K = 0; K < RATIO; K++) { indices[RATIO * (J * 2) + K] = RATIO * (i + stride * J * 2) + K; indices[RATIO * (J * 2 + 1) + K] = RATIO * (i + stride * (J * 2 + 1)) + K; } reinterpret_cast<int4*>(storage)[J * 2] = reinterpret_cast<int4*>(d_tmp)[stride * J * 2]; reinterpret_cast<int4*>(storage)[J * 2 + 1] = __ldg(&reinterpret_cast<int4*>(d_tmp)[stride * (J * 2 + 1)]); } R array[THREAD_ITEMS]; #pragma unroll for (int J = 0; J < THREAD_ITEMS; J++) array[J] = select_op(storage[J]); #pragma unroll for (int STRIDE = 1; STRIDE < THREAD_ITEMS; STRIDE *= 2) { #pragma unroll for (int INDEX = 0; INDEX < THREAD_ITEMS; INDEX += STRIDE * 2) { thread_op(array[INDEX], indices[INDEX], array[INDEX + STRIDE], indices[INDEX + STRIDE]); } } warp_op(array[0], indices[0], d_out); d_tmp += stride * THREAD_ITEMS; } R reduction = init_value; int index = approx_size * THREAD_ITEMS + idx; stride = blockDim.x * gridDim.x; if (xlib::lower_approx<WARP_SIZE>(index) >= num_items) return; for (int i = index; i < num_items; i += stride) thread_op(reduction, index, select_op(d_in[i]), i); warp_op(reduction, index, d_out); } } // namespace kernel template<unsigned UNROLL_STEPS = 1, typename R, typename T, typename SeletOp, typename ThreadOp, typename AtomicOp> typename std::pair<R, int> reduce_arg(const T* __restrict__ d_in, int num_items, const SeletOp& select_op, const ThreadOp& thread_op, const AtomicOp& atomic_op, long long unsigned init_value) { using ULL = long long unsigned; const unsigned RATIO = sizeof(int4) / sizeof(T); const unsigned THREAD_ITEMS = UNROLL_STEPS * 2 * RATIO; const auto& warp_op = [=] __device__ (R& value, int index, ULL* __restrict__ d_out) { #pragma unroll for (int i = WARP_SIZE / 2; i >= 1; i /= 2) { auto tmp_value = xlib::shfl_xor(value, i); int tmp_index = __shfl_xor(index, i); thread_op(value, index, tmp_value, tmp_index); } //printf("-->%d %d\t %d \t %d\n", blockIdx.x, threadIdx.x, value, index); int value_int = reinterpret_cast<int&>(value); auto value2 = make_int2(value_int, index); auto value_ull = reinterpret_cast<long long unsigned&>(value2); atomic_op(d_out, value_ull); }; ULL *d_out; int2 h_out; cuMalloc(d_out,1); cuMemcpyToDevice(&init_value, 1, d_out); device_reduce::kernel::reduce_arg<UNROLL_STEPS, R> <<< xlib::ceil_div<BLOCK_SIZE * THREAD_ITEMS>(num_items), BLOCK_SIZE >>> (const_cast<T*>(d_in), num_items, thread_op, warp_op, select_op, d_out, reinterpret_cast<const R&>(init_value)); cuMemcpyToHost(d_out, 1, static_cast<R*>(&h_out)); cuFree(d_out); return std::pair<R, int>(reinterpret_cast<R&>(h_out.x), h_out.y); } template<unsigned UNROLL_STEPS = 1, typename R, typename T, typename SeletOp> typename std::pair<R, int> reduce_argmax(const T* __restrict__ d_in, int num_items, const SeletOp& select_op) { const auto init_ull = std::numeric_limits<long long unsigned>::lowest(); const auto& thread_op = [] __device__ (R& valueA, int& indexA, const R& valueB, int indexB) { if (valueB > valueA) { valueA = valueB; indexA = indexB; } }; const auto& atomic_op = [] __device__ (long long unsigned* d_out, long long unsigned value_ull) { atomicMax(d_out, value_ull); }; return reduce_arg<UNROLL_STEPS, R>(d_in, num_items, select_op, thread_op, atomic_op, init_ull); } template<unsigned UNROLL_STEPS = 1, typename T> typename std::pair<T, int> argMax(const T* __restrict__ d_in, int num_items) { const auto& select_op = [] __device__ (const T& value) { return value; }; return device_reduce::reduce_argmax<UNROLL_STEPS, T> (d_in, num_items, select_op); } //============================================================================== //============================================================================== namespace kernel { template<unsigned UNROLL_STEPS = 1, typename T, typename R, typename ThreadOp, typename WarpOp> __device__ __forceinline__ void reduce(T* __restrict__ d_in, int num_items, const ThreadOp& thread_op, const WarpOp& warp_op, R* __restrict__ d_out, const T& zero_value) { const unsigned RATIO = sizeof(int4) / sizeof(T); //const unsigned THREAD_ITEMS = UNROLL_STEPS * 2 * RATIO; const unsigned THREAD_ITEMS = UNROLL_STEPS * RATIO; int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int approx_size = xlib::lower_approx<WARP_SIZE>(num_items / THREAD_ITEMS); d_in += idx * RATIO; for (int i = idx; i < approx_size; i += stride * THREAD_ITEMS) { T array[THREAD_ITEMS]; #pragma unroll for (int J = 0; J < UNROLL_STEPS; J++) { reinterpret_cast<int4*>(array)[J] = reinterpret_cast<int4*>(d_in)[stride * J]; //reinterpret_cast<int4*>(array)[J * 2] = // reinterpret_cast<int4*>(d_in)[stride * J * 2]; //reinterpret_cast<int4*>(array)[J * 2 + 1] = // __ldg(&reinterpret_cast<int4*>(d_in)[stride * (J * 2 + 1)]); } #pragma unroll for (int STRIDE = 1; STRIDE < THREAD_ITEMS; STRIDE *= 2) { #pragma unroll for (int INDEX = 0; INDEX < THREAD_ITEMS; INDEX += STRIDE * 2) array[INDEX] = thread_op(array[INDEX], array[INDEX + STRIDE]); } warp_op(array[0], d_out); d_in += stride * THREAD_ITEMS; } T reduction = zero_value; for(int i = approx_size * THREAD_ITEMS + idx; i < num_items; i += stride) { reduction = thread_op(reduction, *d_in); d_in += stride; } warp_op(reduction, d_out); } template<unsigned UNROLL_STEPS = 1, typename T, typename R, typename ThreadOp, typename WarpOp> __global__ void reduceGlobal(T* __restrict__ d_in, int num_items, ThreadOp thread_op, WarpOp warp_op, R* __restrict__ d_out, T zero_value) { reduce<UNROLL_STEPS> (d_in, num_items, thread_op, warp_op, d_out, zero_value); } //------------------------------------------------------------------------------ template<unsigned UNROLL_STEPS, typename T, typename R> __global__ void add(T* __restrict__ d_in, int num_items, R* __restrict__ d_out, T zero_value) { const auto& thread_op = [] (const T& a, const T& b) { return a + b; }; const auto& warp_op = [] (const T& value, R* __restrict__ d_out) { WarpReduce<>::atomicAdd(value, d_out); }; reduce<UNROLL_STEPS> (d_in, num_items, thread_op, warp_op, d_out, zero_value); } template<unsigned UNROLL_STEPS, typename T, typename R> __global__ void min(T* __restrict__ d_in, int num_items, R* __restrict__ d_out, T zero_value) { const auto& thread_op = [] (const T& a, const T& b) { return ::min(a, b); }; const auto& warp_op = [] (const T& value, R* __restrict__ d_out) { WarpReduce<>::atomicMin(value, d_out); }; reduce<UNROLL_STEPS> (d_in, num_items, thread_op, warp_op, d_out, zero_value); } template<unsigned UNROLL_STEPS, typename T, typename R> __global__ void max(T* __restrict__ d_in, int num_items, R* __restrict__ d_out, T zero_value) { const auto& thread_op = [] (const T& a, const T& b) { return ::max(a, b); }; const auto& warp_op = [] (const T& value, R* __restrict__ d_out) { WarpReduce<>::atomicMax(value, d_out); }; reduce<UNROLL_STEPS> (d_in, num_items, thread_op, warp_op, d_out, zero_value); } } // namespace kernel //============================================================================== template<unsigned UNROLL_STEPS = 1, typename T, typename R, typename ThreadOp, typename WarpOp> R apply(const T* __restrict__ d_in, int num_items, const ThreadOp& thread_op, const WarpOp& warp_op, const R& init_value, const T& zero_value = T()) { const unsigned RATIO = sizeof(int4) / sizeof(T); const unsigned THREAD_ITEMS = UNROLL_STEPS * 2 * RATIO; R h_out, *d_out; cuMalloc(d_out, 1); cuMemcpyToDevice(&init_value, 1, d_out); device_reduce::kernel::reduceGlobal<UNROLL_STEPS> <<< xlib::ceil_div<BLOCK_SIZE * THREAD_ITEMS>(num_items), BLOCK_SIZE >>> (const_cast<T*>(d_in), num_items, thread_op, warp_op, d_out, zero_value); cuMemcpyToHost(d_out, 1, &h_out); cuFree(d_out); return h_out; } //------------------------------------------------------------------------------ template<unsigned UNROLL_STEPS = 1, typename T, typename R = T> T max(const T* __restrict__ d_in, int num_items, const R& init_value = std::numeric_limits<R>::lowest(), const T& zero_value = T()) { const unsigned RATIO = sizeof(int4) / sizeof(T); const unsigned THREAD_ITEMS = UNROLL_STEPS * 2 * RATIO; R h_out, *d_out; cuMalloc(d_out, 1) cuMemcpyToDevice(&init_value, 1, d_out); device_reduce::kernel::max<UNROLL_STEPS> <<< xlib::ceil_div<BLOCK_SIZE * THREAD_ITEMS>(num_items), BLOCK_SIZE >>> (const_cast<T*>(d_in), num_items, d_out, zero_value); cuMemcpyToHost(d_out, 1, &h_out); cuFree(d_out); return h_out; } template<unsigned UNROLL_STEPS = 1, typename T, typename R> void max(const T* __restrict__ d_in, int num_items, R* __restrict__ d_out, const T& zero_value = T()) { const unsigned RATIO = sizeof(int4) / sizeof(T); const unsigned THREAD_ITEMS = UNROLL_STEPS * 2 * RATIO; device_reduce::kernel::max<UNROLL_STEPS> <<< xlib::ceil_div<BLOCK_SIZE * THREAD_ITEMS>(num_items), BLOCK_SIZE >>> (const_cast<T*>(d_in), num_items, d_out, zero_value); } //------------------------------------------------------------------------------ template<unsigned UNROLL_STEPS = 1, typename T, typename R = T> T add(const T* __restrict__ d_in, int num_items, const T& zero_value = T()) { const unsigned RATIO = sizeof(int4) / sizeof(T); //const unsigned THREAD_ITEMS = UNROLL_STEPS * 2 * RATIO; const unsigned THREAD_ITEMS = UNROLL_STEPS * RATIO; R h_out, *d_out; cuMalloc(d_out, 1); const auto value = R(0); cuMemcpyToDevice(&value, 1, d_out); device_reduce::kernel::add<UNROLL_STEPS> <<< xlib::ceil_div<BLOCK_SIZE * THREAD_ITEMS>(num_items), BLOCK_SIZE >>> (const_cast<T*>(d_in), num_items, d_out, zero_value); CHECK_CUDA_ERROR cuMemcpyToHost(d_out, 1, &h_out); cuFree(d_out); return h_out; } template<unsigned UNROLL_STEPS = 1, typename T, typename R> void add(const T* __restrict__ d_in, int num_items, R* __restrict__ d_out, const T& zero_value = T()) { const unsigned RATIO = sizeof(int4) / sizeof(T); const unsigned THREAD_ITEMS = UNROLL_STEPS * RATIO; device_reduce::kernel::add<UNROLL_STEPS> <<< xlib::ceil_div<BLOCK_SIZE * THREAD_ITEMS>(num_items), BLOCK_SIZE >>> (const_cast<T*>(d_in), num_items, d_out, zero_value); } } // namespace device_reduce } // namespace xlib
the_stack
#define PI 3.14159265 // 2** 31 __device__ const uint32_t REPLUSIVE_INIT = 2147483648; __device__ const int direction[8][2]={1,0, 1,1, 0,1, -1,1, -1,0, -1,-1, 0,-1, 1,-1}; #define CUDA_1D_KERNEL_LOOP(index, nthreads) \ for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; \ index += blockDim.x * gridDim.x) __global__ void find_parents( const int nthreads, const int height, const int width, const float theta_a, const torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> input_angles, torch::PackedTensorAccessor32<int,3,torch::RestrictPtrTraits> parents, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> roots) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int curr_w = index % width; int curr_h = index / width; float curr_angle = input_angles[curr_h][curr_w]; int pos=(curr_angle + PI/8)/(PI/4); if(pos >= 8) pos-=8; int next_h = curr_h + direction[pos][0]; int next_w = curr_w + direction[pos][1]; if (next_h >= height || next_h < 0 || next_w >= width || next_w < 0) { parents[0][curr_h][curr_w] = curr_h; parents[1][curr_h][curr_w] = curr_w; roots[curr_h][curr_w] = 1; return; } float next_angle = input_angles[next_h][next_w]; float angle_diff = abs(curr_angle - next_angle); angle_diff = min(angle_diff, 2*PI - angle_diff); if (angle_diff > theta_a * PI / 180) { parents[0][curr_h][curr_w] = curr_h; parents[1][curr_h][curr_w] = curr_w; roots[curr_h][curr_w] = 1; return; } parents[0][curr_h][curr_w] = next_h; parents[1][curr_h][curr_w] = next_w; } } __global__ void get_super_BPDs_step1( const int nthreads, const int height, const int width, torch::PackedTensorAccessor32<int,3,torch::RestrictPtrTraits> parents, int* super_BPDs) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int curr_w = index % width; int curr_h = index / width; int next_h = parents[0][curr_h][curr_w]; int next_w = parents[1][curr_h][curr_w]; int next_index = next_h*width + next_w; UNION(super_BPDs, index, next_index); } } __global__ void get_super_BPDs_step2( const int nthreads, int* super_BPDs) { CUDA_1D_KERNEL_LOOP(index, nthreads) { super_BPDs[index] = FIND(super_BPDs, index) + 1; } } __global__ void merge_nearby_root_pixels( const int nthreads, const int height, const int width, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> roots, int* super_BPDs) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int curr_w = index % width; int curr_h = index / width; if (!roots[curr_h][curr_w]) return; for (int delta_h=0; delta_h<=min(3, height-1-curr_h); delta_h++) { for (int delta_w=-min(3, curr_w); delta_w<=min(3, width-1-curr_w); delta_w++) { int next_h = curr_h + delta_h; int next_w = curr_w + delta_w; if (roots[next_h][next_w]) { int next_index = next_h*width + next_w; UNION(super_BPDs, index, next_index); } } } } } __global__ void find_bnd_angle_diff( const int nthreads, const int height, const int width, const int num_superpixels, torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> input_angles, int* super_BPDs, torch::PackedTensorAccessor32<int,3,torch::RestrictPtrTraits> parents, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> unique_super_BPDs_inverse, float* bnd_angle_diff, int* bnd_pair_nums) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int curr_w = index % width; int curr_h = index / width; int curr_index = curr_h*width + curr_w; // right and bottom point int delta_h[2] = {0,1}; int delta_w[2] = {1,0}; for (int i=0; i<2; i++) { int next_h = curr_h + delta_h[i]; int next_w = curr_w + delta_w[i]; if (next_w >= width || next_h >= height) continue; int next_index = next_h*width + next_w; if (super_BPDs[curr_index] != super_BPDs[next_index]) { int curr_position = unique_super_BPDs_inverse[curr_h][curr_w]; int next_position = unique_super_BPDs_inverse[next_h][next_w]; int min_position = min(curr_position, next_position); int max_position = max(curr_position, next_position); atomicAdd(bnd_pair_nums + min_position*num_superpixels + max_position, 1); // forward 3 steps respectively, then calculate angle diff int steps = 3; while (steps--) { int curr_parent_h = parents[0][curr_h][curr_w]; int curr_parent_w = parents[1][curr_h][curr_w]; curr_h = curr_parent_h; curr_w = curr_parent_w; int next_parent_h = parents[0][next_h][next_w]; int next_parent_w = parents[1][next_h][next_w]; next_h = next_parent_h; next_w = next_parent_w; } float curr_angle = input_angles[curr_h][curr_w]; float next_angle = input_angles[next_h][next_w]; float angle_diff = abs(curr_angle - next_angle); angle_diff = min(angle_diff, 2*PI - angle_diff); atomicAdd(bnd_angle_diff + min_position*num_superpixels + max_position, angle_diff); } } } } __global__ void classify_edges( const int nthreads, const int num_superpixels, const int nums, const float S_o, torch::PackedTensorAccessor32<float,2,torch::RestrictPtrTraits> bnd_angle_diff, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> bnd_pair_nums, torch::PackedTensorAccessor32<bool,2,torch::RestrictPtrTraits> select_matrix, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> edge_h, torch::PackedTensorAccessor32<int,2,torch::RestrictPtrTraits> edge_w, int* replusive_matrix) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int curr_w = index % num_superpixels; int curr_h = index / num_superpixels; if (bnd_pair_nums[curr_h][curr_w] == 0) return; float avg_angle_diff = bnd_angle_diff[curr_h][curr_w] / bnd_pair_nums[curr_h][curr_w]; bnd_angle_diff[curr_h][curr_w] = avg_angle_diff; if (avg_angle_diff > PI - S_o * PI / 180) { int inter_h = curr_w / 32; int inter_w = curr_w % 32; atomicOr(replusive_matrix + curr_h*nums + inter_h, REPLUSIVE_INIT >> inter_w); return; } select_matrix[curr_h][curr_w] = 1; edge_h[curr_h][curr_w] = curr_h; edge_w[curr_h][curr_w] = curr_w; } } __global__ void final_step( const int nthreads, torch::PackedTensorAccessor32<uint8_t,1,torch::RestrictPtrTraits> connect_marks, torch::PackedTensorAccessor32<int,1,torch::RestrictPtrTraits> edge_h, torch::PackedTensorAccessor32<int,1,torch::RestrictPtrTraits> edge_w, torch::PackedTensorAccessor32<int,1,torch::RestrictPtrTraits> unique_super_BPDs, int* super_BPDs) { CUDA_1D_KERNEL_LOOP(index, nthreads) { if (connect_marks[index]) { int index_h = unique_super_BPDs[edge_h[index]] - 1; int index_w = unique_super_BPDs[edge_w[index]] - 1; UNION(super_BPDs, index_h, index_w); } } } std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, \ torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> \ bpd_cuda(const torch::Tensor input_angles, const int height, const int width, const float theta_a, const float S_o) { const int kThreadsPerBlock = 1024; const int blocks = (height*width + kThreadsPerBlock - 1) / kThreadsPerBlock; torch::Tensor parents = torch::zeros({2, height, width}, torch::CUDA(torch::kInt32)); torch::Tensor roots = torch::zeros({height, width}, torch::CUDA(torch::kInt32)); // get parents and roots find_parents<<<blocks, kThreadsPerBlock>>>( height*width, height, width, theta_a, input_angles.packed_accessor32<float,2,torch::RestrictPtrTraits>(), parents.packed_accessor32<int,3,torch::RestrictPtrTraits>(), roots.packed_accessor32<int,2,torch::RestrictPtrTraits>() ); // get super-BPDs, index from 0 ~ height*width - 1, init label from 1 ~ height*width torch::Tensor super_BPDs = torch::arange(1, height*width + 1, torch::CUDA(torch::kInt32)); get_super_BPDs_step1<<<blocks, kThreadsPerBlock>>>( height*width, height, width, parents.packed_accessor32<int,3,torch::RestrictPtrTraits>(), super_BPDs.contiguous().data_ptr<int>() ); get_super_BPDs_step2<<<blocks, kThreadsPerBlock>>>( height*width, super_BPDs.contiguous().data_ptr<int>() ); auto super_BPDs_before_dilation = super_BPDs.clone(); super_BPDs_before_dilation = super_BPDs_before_dilation.reshape({height, width}); // merge nearby root pixels merge_nearby_root_pixels<<<blocks, kThreadsPerBlock>>>( height*width, height, width, roots.packed_accessor32<int,2,torch::RestrictPtrTraits>(), super_BPDs.contiguous().data_ptr<int>() ); get_super_BPDs_step2<<<blocks, kThreadsPerBlock>>>( height*width, super_BPDs.contiguous().data_ptr<int>() ); auto super_BPDs_after_dilation = super_BPDs.clone(); super_BPDs_after_dilation = super_BPDs_after_dilation.reshape({height, width}); // construct RAG auto unique_results = torch::_unique2(super_BPDs, true, true, true); auto unique_super_BPDs = std::get<0>(unique_results); auto unique_super_BPDs_inverse = std::get<1>(unique_results); unique_super_BPDs_inverse = unique_super_BPDs_inverse.to(torch::kInt32); unique_super_BPDs_inverse = unique_super_BPDs_inverse.reshape({height, width}); auto unique_super_BPDs_counts = std::get<2>(unique_results); unique_super_BPDs_counts = unique_super_BPDs_counts.to(torch::kInt32); int num_superpixels = unique_super_BPDs.numel(); torch::Tensor bnd_angle_diff = torch::zeros({num_superpixels, num_superpixels}, torch::CUDA(torch::kFloat32)); torch::Tensor bnd_pair_nums = torch::zeros({num_superpixels, num_superpixels}, torch::CUDA(torch::kInt32)); find_bnd_angle_diff<<<blocks, kThreadsPerBlock>>>( height*width, height, width, num_superpixels, input_angles.packed_accessor32<float,2,torch::RestrictPtrTraits>(), super_BPDs.contiguous().data_ptr<int>(), parents.packed_accessor32<int,3,torch::RestrictPtrTraits>(), unique_super_BPDs_inverse.packed_accessor32<int,2,torch::RestrictPtrTraits>(), bnd_angle_diff.contiguous().data_ptr<float>(), bnd_pair_nums.contiguous().data_ptr<int>() ); // classify edges (replusive, large, small, tiny) torch::Tensor select_matrix = torch::zeros({num_superpixels, num_superpixels}, torch::CUDA(torch::kBool)); torch::Tensor edge_h = torch::zeros({num_superpixels, num_superpixels}, torch::CUDA(torch::kInt32)); torch::Tensor edge_w = torch::zeros({num_superpixels, num_superpixels}, torch::CUDA(torch::kInt32)); const int nums = (num_superpixels + 32 -1) / 32; torch::Tensor replusive_matrix = torch::zeros({num_superpixels, nums}, torch::CUDA(torch::kInt32)); const int blocks2 = (num_superpixels*num_superpixels + kThreadsPerBlock - 1) / kThreadsPerBlock; classify_edges<<<blocks2, kThreadsPerBlock>>>( num_superpixels*num_superpixels, num_superpixels, nums, S_o, bnd_angle_diff.packed_accessor32<float,2,torch::RestrictPtrTraits>(), bnd_pair_nums.packed_accessor32<int,2,torch::RestrictPtrTraits>(), select_matrix.packed_accessor32<bool,2,torch::RestrictPtrTraits>(), edge_h.packed_accessor32<int,2,torch::RestrictPtrTraits>(), edge_w.packed_accessor32<int,2,torch::RestrictPtrTraits>(), replusive_matrix.contiguous().data_ptr<int>() ); bnd_angle_diff = bnd_angle_diff.masked_select(select_matrix); edge_h = edge_h.masked_select(select_matrix); edge_w = edge_w.masked_select(select_matrix); // diff small to large, sim large to small auto sort_index = bnd_angle_diff.argsort(); auto sorted_bnd_angle_diff = bnd_angle_diff.index({sort_index}); auto sorted_edge_h = edge_h.index({sort_index}); auto sorted_edge_w = edge_w.index({sort_index}); // connect edges sorted_bnd_angle_diff = sorted_bnd_angle_diff.to(torch::kCPU); sorted_edge_h = sorted_edge_h.to(torch::kCPU); sorted_edge_w = sorted_edge_w.to(torch::kCPU); replusive_matrix = replusive_matrix.to(torch::kCPU); unique_super_BPDs_counts = unique_super_BPDs_counts.to(torch::kCPU); unique_super_BPDs = unique_super_BPDs.to(torch::kCPU); return std::make_tuple(unique_super_BPDs_counts, sorted_edge_h, \ sorted_edge_w, sorted_bnd_angle_diff, replusive_matrix, unique_super_BPDs, \ roots, super_BPDs_before_dilation, super_BPDs_after_dilation, super_BPDs); } torch::Tensor bpd_cuda_final_step(const int height, const int width, torch::Tensor connect_marks, torch::Tensor edge_h, \ torch::Tensor edge_w, torch::Tensor unique_super_BPDs, torch::Tensor super_BPDs) { connect_marks = connect_marks.to(torch::kCUDA); edge_h = edge_h.to(torch::kCUDA); edge_w = edge_w.to(torch::kCUDA); unique_super_BPDs = unique_super_BPDs.to(torch::kCUDA); super_BPDs = super_BPDs.to(torch::kCUDA); const int num_edges = edge_h.numel(); const int kThreadsPerBlock = 1024; const int blocks = (num_edges + kThreadsPerBlock - 1) / kThreadsPerBlock; final_step<<<blocks, kThreadsPerBlock>>>( num_edges, connect_marks.packed_accessor32<uint8_t,1,torch::RestrictPtrTraits>(), edge_h.packed_accessor32<int,1,torch::RestrictPtrTraits>(), edge_w.packed_accessor32<int,1,torch::RestrictPtrTraits>(), unique_super_BPDs.packed_accessor32<int,1,torch::RestrictPtrTraits>(), super_BPDs.contiguous().data_ptr<int>() ); const int blocks2 = (height*width + kThreadsPerBlock - 1) / kThreadsPerBlock; get_super_BPDs_step2<<<blocks2, kThreadsPerBlock>>>( height*width, super_BPDs.contiguous().data_ptr<int>() ); super_BPDs = super_BPDs.reshape({height, width}); return super_BPDs; }
the_stack
#include <gunrock/gunrock.h> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph definations #include <gunrock/graphio/graphio.cuh> #include <gunrock/app/app_base.cuh> #include <gunrock/app/test_base.cuh> // single-source shortest path includes #include <gunrock/app/sage/sage_test.cuh> #include <gunrock/app/sage/sage_enactor.cuh> namespace gunrock { namespace app { namespace sage { cudaError_t UseParameters(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(UseParameters_app(parameters)); GUARD_CU(UseParameters_problem(parameters)); GUARD_CU(UseParameters_enactor(parameters)); GUARD_CU(parameters.Use<std::string>( "Wf1", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, "", "<weight matrix for W^1 matrix in algorithm 2, feature part>\n" "\t dimension 64 by 128 for pokec;\n" "\t It should be child feature length by a value you want for W2 layer", __FILE__, __LINE__)); // GUARD_CU(parameters.Use<int>( // "Wf1-dim0", // util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | // util::OPTIONAL_PARAMETER, 64, "Wf1 matrix row dimension", // __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "Wf1-dim1", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 128, "Wf1 matrix column dimension", __FILE__, __LINE__)); GUARD_CU(parameters.Use<std::string>( "Wa1", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, "", "<weight matrix for W^1 matrix in algorithm 2, aggregation part>\n" "\t dimension 64 by 128 for pokec;\n" "\t It should be leaf feature length by a value you want for W2 layer", __FILE__, __LINE__)); // GUARD_CU(parameters.Use<int>( // "Wa1-dim0", // util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, // 64, // "Wa1 matrix row dimension", // __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "Wa1-dim1", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 128, "Wa1 matrix column dimension", __FILE__, __LINE__)); GUARD_CU(parameters.Use<std::string>( "Wf2", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, "", "<weight matrix for W^2 matrix in algorithm 2, feature part>\n" "\t dimension 256 by 128 for pokec;\n" "\t It should be source_temp length by output length", __FILE__, __LINE__)); // GUARD_CU(parameters.Use<int>( // "Wf2-dim0", // util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | // util::OPTIONAL_PARAMETER, 256, "Wf2 matrix row dimension", // __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "Wf2-dim1", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 128, "Wf2 matrix column dimension", __FILE__, __LINE__)); GUARD_CU(parameters.Use<std::string>( "Wa2", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, "", "<weight matrix for W^2 matrix in algorithm 2, aggregation part>\n" "\t dimension 256 by 128 for pokec;\n" "\t It should be child_temp length by output length", __FILE__, __LINE__)); // GUARD_CU(parameters.Use<int>( // "Wa2-dim0", // util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, // 256, // "Wa2 matrix row dimension", // __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "Wa2-dim1", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 128, "Wa2 matrix column dimension", __FILE__, __LINE__)); GUARD_CU(parameters.Use<std::string>( "features", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, "", "<features matrix>\n" "\t dimension |V| by 64 for pokec;\n", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "feature-column", util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, 64, "feature column dimension", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "num-children-per-source", // num_neigh1 util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, 10, "number of sampled children per source", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "num-leafs-per-child", // num_neight2 util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, util::PreDefinedValues<int>::InvalidValue, "number of sampled leafs per child; default is the same as " "num-children-per-source", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "batch-size", util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, 65536, "number of source vertex to process in one iteration", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "rand-seed", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, util::PreDefinedValues<int>::InvalidValue, "seed for random number generator; default will use time(NULL)", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "custom-kernels", util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, true, "whether to use custom CUDA kernels", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "omp-threads", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 32, "number of threads to run CPU reference", __FILE__, __LINE__)); return retval; } /** * @brief Run Sage tests * @tparam GraphT Type of the graph * @tparam ValueT Type of the distances * @param[in] parameters Excution parameters * @param[in] graph Input graph * @param[in] ref_distances Reference distances * @param[in] target Whether to perform the Sage * \return cudaError_t error message(s), if any */ template <typename GraphT, typename ValueT = typename GraphT::ValueT> cudaError_t RunTests(util::Parameters &parameters, GraphT &graph, // ValueT **ref_distances = NULL, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; typedef typename GraphT::VertexT VertexT; typedef typename GraphT::SizeT SizeT; // typedef typename GraphT::ValueT ValueT; typedef Problem<GraphT> ProblemT; typedef Enactor<ProblemT> EnactorT; util::CpuTimer cpu_timer, total_timer; cpu_timer.Start(); total_timer.Start(); // parse configurations from parameters bool quiet_mode = parameters.Get<bool>("quiet"); int num_runs = parameters.Get<int>("num-runs"); std::string validation = parameters.Get<std::string>("validation"); util::Info info("Sage", parameters, graph); // initialize Info structure // Allocate host-side array (for both reference and GPU-computed results) // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; // util::PrintMsg("Before init"); GUARD_CU(problem.Init(graph, target)); GUARD_CU(enactor.Init(problem, target)); ValueT *h_source_result = new ValueT[((uint64_t)graph.nodes) * problem.data_slices[0][0].result_column]; // util::PrintMsg("After init"); cpu_timer.Stop(); parameters.Set("preprocess-time", cpu_timer.ElapsedMillis()); // info.preprocess_time = cpu_timer.ElapsedMillis(); // perform SAGE // VertexT src; for (int run_num = 0; run_num < num_runs; ++run_num) { // src = srcs[run_num % num_srcs]; GUARD_CU(problem.Reset(target)); GUARD_CU(enactor.Reset(target)); util::PrintMsg("__________________________", !quiet_mode); cpu_timer.Start(); GUARD_CU(enactor.Enact()); cpu_timer.Stop(); info.CollectSingleRun(cpu_timer.ElapsedMillis()); util::PrintMsg( "--------------------------\nRun " + std::to_string(run_num) + " elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) //+ " ms, src = "+ std::to_string(src) + " ms, #iterations = " + std::to_string(enactor.enactor_slices[0].enactor_stats.iteration), !quiet_mode); if (validation == "each") { GUARD_CU(problem.Extract(h_source_result)); SizeT num_errors = app::sage::Validate_Results( parameters, graph, h_source_result, problem.data_slices[0][0].result_column, false); } } cpu_timer.Start(); // Copy out results GUARD_CU(problem.Extract(h_source_result)); if (validation == "last") { SizeT num_errors = app::sage::Validate_Results( parameters, graph, h_source_result, problem.data_slices[0][0].result_column, true); } // compute running statistics info.ComputeTraversalStats(enactor, (VertexT *)NULL); // Display_Memory_Usage(problem); #ifdef ENABLE_PERFORMANCE_PROFILING // Display_Performance_Profiling(&enactor); #endif // Clean up GUARD_CU(enactor.Release(target)); GUARD_CU(problem.Release(target)); delete[] h_source_result; h_source_result = NULL; cpu_timer.Stop(); total_timer.Stop(); info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis()); return retval; } } // namespace sage } // namespace app } // namespace gunrock /* * @brief Entry of gunrock_sage function * @tparam GraphT Type of the graph * @tparam ValueT Type of the distances * @param[in] parameters Excution parameters * @param[in] graph Input graph * @param[out] distances Return shortest distance to source per vertex * @param[out] preds Return predecessors of each vertex * \return double Return accumulated elapsed times for all runs */ template <typename GraphT, typename ValueT = typename GraphT::ValueT> double gunrock_sage(gunrock::util::Parameters &parameters, GraphT &graph // ValueT **distances, // typename GraphT::VertexT **preds = NULL ) { typedef typename GraphT::VertexT VertexT; typedef gunrock::app::sage::Problem<GraphT> ProblemT; typedef gunrock::app::sage::Enactor<ProblemT> EnactorT; gunrock::util::CpuTimer cpu_timer; gunrock::util::Location target = gunrock::util::DEVICE; double total_time = 0; if (parameters.UseDefault("quiet")) parameters.Set("quiet", true); // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; problem.Init(graph, target); enactor.Init(problem, target); // std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs"); int num_runs = parameters.Get<int>("num-runs"); // int num_srcs = srcs.size(); for (int run_num = 0; run_num < num_runs; ++run_num) { // int src_num = run_num % num_srcs; // VertexT src = srcs[src_num]; problem.Reset(target); enactor.Reset(target); cpu_timer.Start(); enactor.Enact(); cpu_timer.Stop(); total_time += cpu_timer.ElapsedMillis(); problem.Extract( /*distances[src_num], preds == NULL ? NULL : preds[src_num]*/); } enactor.Release(target); problem.Release(target); // srcs.clear(); return total_time; } /* * @brief Simple interface take in graph as CSR format * @param[in] num_nodes Number of veritces in the input graph * @param[in] num_edges Number of edges in the input graph * @param[in] row_offsets CSR-formatted graph input row offsets * @param[in] col_indices CSR-formatted graph input column indices * @param[in] edge_values CSR-formatted graph input edge weights * @param[in] num_runs Number of runs to perform SSSP * @param[in] sources Sources to begin traverse, one for each run * @param[in] mark_preds Whether to output predecessor info * @param[out] distances Return shortest distance to source per vertex * @param[out] preds Return predecessors of each vertex * \return double Return accumulated elapsed times for all runs */ template <typename VertexT = int, typename SizeT = int, typename GValueT = unsigned int, typename SAGEValueT = GValueT> double sage(const SizeT num_nodes, const SizeT num_edges, const SizeT *row_offsets, const VertexT *col_indices, const GValueT *edge_values, const int num_runs // VertexT *sources, // const bool mark_pred, // SSSPValueT **distances, // VertexT **preds = NULL ) { typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT, gunrock::graph::HAS_EDGE_VALUES | gunrock::graph::HAS_CSR> GraphT; typedef typename GraphT::CsrT CsrT; // Setup parameters gunrock::util::Parameters parameters("sage"); gunrock::graphio::UseParameters(parameters); gunrock::app::sage::UseParameters(parameters); gunrock::app::UseParameters_test(parameters); parameters.Parse_CommandLine(0, NULL); parameters.Set("graph-type", "by-pass"); // parameters.Set("mark-pred", mark_pred); parameters.Set("num-runs", num_runs); // std::vector<VertexT> srcs; // for (int i = 0; i < num_runs; i ++) // srcs.push_back(sources[i]); // parameters.Set("srcs", srcs); bool quiet = parameters.Get<bool>("quiet"); GraphT graph; // Assign pointers into gunrock graph format graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST); graph.CsrT::row_offsets.SetPointer(row_offsets, num_nodes + 1, gunrock::util::HOST); graph.CsrT::column_indices.SetPointer(col_indices, num_edges, gunrock::util::HOST); graph.CsrT::edge_values.SetPointer(edge_values, num_edges, gunrock::util::HOST); // graph.FromCsr(graph.csr(), true, quiet); gunrock::graphio::LoadGraph(parameters, graph); // Run the SSSP double elapsed_time = gunrock_sage(parameters, graph /*, distances, preds*/); // Cleanup graph.Release(); // srcs.clear(); return elapsed_time; } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include <cublas.h> #include <helper_cuda.h> #include <helper_timer.h> // uncomment if you do not use the viewer. //#define NOVIEWER #include "3dregistration.h" using namespace std; __global__ static void updateM(int rowsM, int colsM, int pitchM, float* d_Xx, float* d_Xy, float* d_Xz, float* d_Yx, float* d_Yy, float* d_Yz, float* d_R, float* d_t, float* d_M, float T_cur, float alpha){ int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; // Shared memory __shared__ float XxShare[BLOCK_SIZE]; __shared__ float XyShare[BLOCK_SIZE]; __shared__ float XzShare[BLOCK_SIZE]; __shared__ float YxShare[BLOCK_SIZE]; __shared__ float YyShare[BLOCK_SIZE]; __shared__ float YzShare[BLOCK_SIZE]; __shared__ float RShare[9]; // BLOCK_SIZE >= 9 is assumed __shared__ float tShare[3]; // BLOCK_SIZE >= 3 is assumed if(threadIdx.y == 0) if(threadIdx.x < 9){ RShare[threadIdx.x] = d_R[threadIdx.x]; if(threadIdx.x < 3) tShare[threadIdx.x] = d_t[threadIdx.x]; } if(r < rowsM && c < colsM){ // check for only inside the matrix M if(threadIdx.y == 0){ XxShare[threadIdx.x] = d_Xx[r]; XyShare[threadIdx.x] = d_Xy[r]; XzShare[threadIdx.x] = d_Xz[r]; } if(threadIdx.x == 0){ YxShare[threadIdx.y] = d_Yx[c]; YyShare[threadIdx.y] = d_Yy[c]; YzShare[threadIdx.y] = d_Yz[c]; } __syncthreads(); #define Xx XxShare[threadIdx.x] #define Xy XyShare[threadIdx.x] #define Xz XzShare[threadIdx.x] #define Yx YxShare[threadIdx.y] #define Yy YyShare[threadIdx.y] #define Yz YzShare[threadIdx.y] #define R(i) RShare[i] #define t(i) tShare[i] // #define Euclid(a,b,c) ((a)*(a)+(b)*(b)+(c)*(c)) // float tmp = // Euclid(Xx - (R(0)*Yx + R(1)*Yy + R(2)*Yz + t(0)), // Xy - (R(3)*Yx + R(4)*Yy + R(5)*Yz + t(1)), // Xz - (R(6)*Yx + R(7)*Yy + R(8)*Yz + t(2)) ) - alpha; // tmp = expf(-tmp/T_cur) / sqrtf(T_cur); float tmpX = Xx - (R(0)*Yx + R(1)*Yy + R(2)*Yz + t(0)); float tmpY = Xy - (R(3)*Yx + R(4)*Yy + R(5)*Yz + t(1)); float tmpZ = Xz - (R(6)*Yx + R(7)*Yy + R(8)*Yz + t(2)); #undef Xx #undef Xy #undef Xz #undef Yx #undef Yy #undef Yz #undef R #undef t __syncthreads(); tmpX *= tmpX; tmpY *= tmpY; tmpZ *= tmpZ; tmpX += tmpY; tmpX += tmpZ; tmpX -= alpha; tmpX /= T_cur; tmpX = expf(-tmpX); tmpX /= sqrtf(T_cur); d_M[c * pitchM + r] = tmpX; } } __global__ static void normalizeMbySinkhorn_row(int rowsM, int colsM, int pitchM, float *d_M, const float *d_sumOfRow, float *d_m_outliers_row ){ int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; // Shared memory __shared__ float sumOfRowShare[BLOCK_SIZE]; if(r < rowsM && c < colsM){ // check for only inside the matrix M if(threadIdx.y == 0) sumOfRowShare[threadIdx.x] = d_sumOfRow[r]; __syncthreads(); d_M[c * pitchM + r] /= sumOfRowShare[threadIdx.x]; if(c == 0) d_m_outliers_row[r] /= sumOfRowShare[threadIdx.x]; __syncthreads(); } } __global__ static void normalizeMbySinkhorn_col(int rowsM, int colsM, int pitchM, float *d_M, const float *d_sumOfCol, float *d_m_outliers_col ){ int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; // Shared memory __shared__ float sumOfColShare[BLOCK_SIZE]; if(r < rowsM && c < colsM){ // check for only inside the matrix M if(threadIdx.x == 0) sumOfColShare[threadIdx.y] = d_sumOfCol[c]; __syncthreads(); d_M[c * pitchM + r] /= sumOfColShare[threadIdx.y]; if(r == 0) d_m_outliers_col[c] /= sumOfColShare[threadIdx.y]; __syncthreads(); } } __global__ static void elementwiseMultiplicationCopy(int rowsM, const float* d_Xx, const float* d_Xy, const float* d_Xz, const float* d_sumOfMRow, float* d_Xx_result, float* d_Xy_result, float* d_Xz_result){ int r = blockIdx.x * blockDim.x + threadIdx.x; float l_sumOfRow = d_sumOfMRow[r]; if(r < rowsM){ // check for only inside the matrix M d_Xx_result[r] = l_sumOfRow * d_Xx[r]; d_Xy_result[r] = l_sumOfRow * d_Xy[r]; d_Xz_result[r] = l_sumOfRow * d_Xz[r]; } } __global__ static void centeringXorY(int rowsM, const float* d_Xc, float sum, float* d_Xx_result, float* d_Xy_result, float* d_Xz_result){ // can be work for both row and column int r = blockIdx.x * blockDim.x + threadIdx.x; // Shared memory __shared__ float Xc[3]; if(threadIdx.x < 3) Xc[threadIdx.x] = d_Xc[threadIdx.x]; if(r < rowsM){ // check for only inside the matrix M __syncthreads(); d_Xx_result[r] -= Xc[0]; d_Xy_result[r] -= Xc[1]; d_Xz_result[r] -= Xc[2]; __syncthreads(); } } void softassign(const pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_target, const pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_source, float* h_R, float* h_t, const registrationParameters &param) { int Xsize, Ysize; float *h_X, *h_Y; cloud2data(cloud_target, &h_X, Xsize); cloud2data(cloud_source, &h_Y, Ysize); // // initialize paramters // int JMAX = param.JMAX; int I0 = param.I0; int I1 = param.I1; float T_cur = param.T_0; // current temprature float alpha = param.alpha; float TFACTR = param.TFACTOR; float moutlier = param.moutlier; // // initialize CUDA // findCudaDevice(param.argc, (const char**)param.argv); // // memory allocation // // example: memCUDA(Xx, Xsize); // declare d_Xx. no copy. #define memCUDA(var,num) \ float* d_ ## var; CUDA_SAFE_CALL(cudaMalloc((void**) &(d_ ## var), sizeof(float)*num)); // example: memHostToCUDA(Xx, Xsize); // declera d_Xx, then copy h_Xx to d_Xx. #define memHostToCUDA(var,num) \ float* d_ ## var; CUDA_SAFE_CALL(cudaMalloc((void**) &(d_ ## var), sizeof(float)*num)); \ CUDA_SAFE_CALL(cudaMemcpy(d_ ## var, h_ ## var, sizeof(float)*num, cudaMemcpyHostToDevice)); memHostToCUDA(X, Xsize*3); float* d_Xx = &d_X[Xsize*0]; float* d_Xy = &d_X[Xsize*1]; float* d_Xz = &d_X[Xsize*2]; memHostToCUDA(Y, Ysize*3); float* d_Yx = &d_Y[Ysize*0]; float* d_Yy = &d_Y[Ysize*1]; float* d_Yz = &d_Y[Ysize*2]; memCUDA(X_result, Xsize*3); float *d_Xx_result = &d_X_result[Xsize*0]; float *d_Xy_result = &d_X_result[Xsize*1]; float *d_Xz_result = &d_X_result[Xsize*2]; memCUDA(Y_result, Ysize*3); float *d_Yx_result = &d_Y_result[Ysize*0]; float *d_Yy_result = &d_Y_result[Ysize*1]; float *d_Yz_result = &d_Y_result[Ysize*2]; // center of X, Y float h_Xc[3], h_Yc[3]; memCUDA(Xc, 3); memCUDA(Yc, 3); // R, t memHostToCUDA(R, 3*3); memHostToCUDA(t, 3); CUDA_SAFE_CALL(cudaMemcpy(d_R, h_R, sizeof(float)*3*3, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_t, h_t, sizeof(float)*3, cudaMemcpyHostToDevice)); // S for finding R, t float h_S[9]; memCUDA(S, 9); // NOTE on matrix M // number of rows: Xsize, or rowsM // number of columns : Ysize, or colsM // // [0th in Y] [1st] ... [(Ysize-1)] // [0th point in X] [ M(0,0) M(0,1) ... M(0,Ysize-1) ] // [1st ] [ M(1,0) M(1,1) ... ] // ... [ ... ] // [(Xsize-1) ] [ M(Xsize-1, 0) ... M(Xsize-1,Ysize-1)] // // // CAUTION on matrix M // M is allcoated as a column-maijor format for the use of cublas. // This means that you must acces an element at row r and column c as: // M(r,c) = M[c * pitchM + r] int rowsM = Xsize; int colsM = Ysize; // pitchM: leading dimension of M, which is ideally equal to rowsM, // but actually larger than that. int pitchM = (rowsM / 4 + 1) * 4; memCUDA(M, pitchM*colsM); // fprintf(stderr, "rowsM, rowsM*sizeof(float), colsM : %d %d %d\n", // rowsM, rowsM * sizeof(float), colsM) memCUDA(D, 3*rowsM); // temporary vector // a vector with all elements of 1.0f float* h_one = new float [max(Xsize,Ysize)]; for(int t = 0; t < max(Xsize,Ysize); t++) h_one[t] = 1.0f; memHostToCUDA(one, max(Xsize,Ysize)); // vector with all elements of 1 memCUDA(sumOfMRow, rowsM); memCUDA(sumOfMCol, colsM); float* h_m_outliers_row = new float [rowsM]; float* h_m_outliers_col = new float [colsM]; for(int i = 0; i < rowsM; i++) h_m_outliers_row[i] = moutlier; for(int i = 0; i < colsM; i++) h_m_outliers_col[i] = moutlier; memHostToCUDA(m_outliers_row, rowsM); memHostToCUDA(m_outliers_col, colsM); // // threads // // for 2D block dim3 dimBlockForM(BLOCK_SIZE, BLOCK_SIZE); // a block is (BLOCK_SIZE*BLOCK_SIZE) threads dim3 dimGridForM( (pitchM + dimBlockForM.x - 1) / dimBlockForM.x, (colsM + dimBlockForM.y - 1) / dimBlockForM.y); // for 1D block int threadsPerBlockForYsize = 512; // a block is 512 threads int blocksPerGridForYsize = (Ysize + threadsPerBlockForYsize - 1 ) / threadsPerBlockForYsize; int threadsPerBlockForXsize = 512; // a block is 512 threads int blocksPerGridForXsize = (Xsize + threadsPerBlockForXsize - 1 ) / threadsPerBlockForYsize; // // timer // #define START_TIMER(timer) \ if(!param.notimer){ \ CUDA_SAFE_CALL( cudaThreadSynchronize() );\ CUT_SAFE_CALL(sdkStartTimer(&timer)); \ } #define STOP_TIMER(timer) \ if(!param.notimer){ \ CUDA_SAFE_CALL( cudaThreadSynchronize() );\ CUT_SAFE_CALL(sdkStopTimer(&timer)); \ } // timers StopWatchInterface *timerTotal, *timerUpdateM, *timerShinkhorn, *timerSumM, *timerGetWeightedXY, *timerGetXcYc, *timerCenteringXY, *timerFindS, *timerAfterSVD, *timerRT, *timerShinkhorn1, *timerShinkhorn2, *timerShinkhorn3; if(!param.notimer){ CUT_SAFE_CALL(sdkCreateTimer(&timerUpdateM)); CUT_SAFE_CALL(sdkCreateTimer(&timerShinkhorn)); CUT_SAFE_CALL(sdkCreateTimer(&timerShinkhorn1)); CUT_SAFE_CALL(sdkCreateTimer(&timerShinkhorn2)); CUT_SAFE_CALL(sdkCreateTimer(&timerShinkhorn3)); CUT_SAFE_CALL(sdkCreateTimer(&timerSumM)); CUT_SAFE_CALL(sdkCreateTimer(&timerGetWeightedXY)); CUT_SAFE_CALL(sdkCreateTimer(&timerGetXcYc)); CUT_SAFE_CALL(sdkCreateTimer(&timerCenteringXY)); CUT_SAFE_CALL(sdkCreateTimer(&timerFindS)); CUT_SAFE_CALL(sdkCreateTimer(&timerAfterSVD)); CUT_SAFE_CALL(sdkCreateTimer(&timerRT)); } CUT_SAFE_CALL(sdkCreateTimer(&timerTotal)); CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL(sdkStartTimer(&timerTotal)); // // initializing cublas // cublasInit(); // // softassign main loop // for(int Titer = 1; Titer <= JMAX; Titer++){ fprintf(stderr, "%d iter. temp. %f ", Titer, T_cur); fprintf(stderr, "time %.10f [s]\n", sdkGetTimerValue(&timerTotal) / 1000.0f); // inner loop with the same temperature for(int iter0 = 0; iter0 < I0; iter0++){ // // UpdateM // START_TIMER(timerUpdateM); updateM <<< dimGridForM, dimBlockForM >>> (rowsM, colsM, pitchM, d_Xx, d_Xy, d_Xz, d_Yx, d_Yy, d_Yz, d_R, d_t, d_M, T_cur, alpha); STOP_TIMER(timerUpdateM); // // Normalization of M by Shinkhorn // START_TIMER(timerShinkhorn); // shinkhorn loop until M converges for (int Sinkh_iter = 0; Sinkh_iter < I1; Sinkh_iter++){ // // row normalization // START_TIMER(timerShinkhorn1); // cublasSgemv (char trans, int m, int n, float alpha, const float *A, int lda, // const float *x, int incx, float beta, float *y, int incy) // y = alpha * op(A) * x + beta * y, // M * one vector = vector with elements of row-wise sum // d_M * d_one => d_sumOfMRow //(rowsM*colsM) * (colsM*1) = (rowsM*1) cublasSgemv('n', // char trans rowsM, colsM, // int m (rows of A), n (cols of A) ; not op(A) 1.0f, // float alpha d_M, pitchM, // const float *A, int lda d_one, 1, // const float *x, int incx 0.0f, // float beta d_sumOfMRow, 1); // float *y, int incy STOP_TIMER(timerShinkhorn1); START_TIMER(timerShinkhorn2); // void cublasSaxpy (int n, float alpha, const float *x, int incx, float *y, int incy) // alpha * x + y => y // m_outliers_row + d_sumOfMRow => d_sumOfMRow cublasSaxpy(rowsM, 1.0f, d_m_outliers_row, 1, d_sumOfMRow, 1); STOP_TIMER(timerShinkhorn2); START_TIMER(timerShinkhorn3); normalizeMbySinkhorn_row <<< dimGridForM, dimBlockForM >>> (rowsM, colsM, pitchM, d_M, d_sumOfMRow, d_m_outliers_row); STOP_TIMER(timerShinkhorn3); // // column normalization // // cublasSgemv (char trans, int m, int n, float alpha, const float *A, int lda, // const float *x, int incx, float beta, float *y, int incy) // y = alpha * op(A) * x + beta * y, // M * one vector = vector with elements of column-wise sum // d_M^T * d_one => d_sumOfMCol //(coslM*rowsM) * (rowsM*1) = (colsM*1) cublasSgemv('t', // char trans rowsM, colsM, // int m (rows of A), n (cols of A) ; not op(A) 1.0f, // float alpha d_M, pitchM, // const float *A, int lda d_one, 1, // const float *x, int incx 0.0f, // float beta d_sumOfMCol, 1); // float *y, int incy // void cublasSaxpy (int n, float alpha, const float *x, int incx, float *y, int incy) // alpha * x + y => y // m_outliers_col + d_sumOfMCol => d_sumOfMCol cublasSaxpy(colsM, 1.0f, d_m_outliers_col, 1, d_sumOfMCol, 1); normalizeMbySinkhorn_col <<< dimGridForM, dimBlockForM >>> (rowsM, colsM, pitchM, d_M, d_sumOfMCol, d_m_outliers_col); } STOP_TIMER(timerShinkhorn); // // update R,T // ///////////////////////////////////////////////////////////////////////////////////// // compute sum of all elements in M START_TIMER(timerSumM); // cublasSgemv (char trans, int m, int n, float alpha, const float *A, int lda, // const float *x, int incx, float beta, float *y, int incy) // y = alpha * op(A) * x + beta * y, // M * one vector = vector with elements of row-wise sum // d_M * d_one => d_sumOfMRow //(rowsM*colsM) * (colsM*1) = (rowsM*1) cublasSgemv('n', // char trans rowsM, colsM, // int m (rows of A), n (cols of A) ; not op(A) 1.0f, // float alpha d_M, pitchM, // const float *A, int lda d_one, 1, // const float *x, int incx 0.0f, // float beta d_sumOfMRow, 1); // float *y, int incy //sum of M // float cublasSasum (int n, const float *x, int incx) // computes the sum of the absolute values of the elements float sumM = cublasSasum (rowsM, d_sumOfMRow, 1); // sum of all elements in M, assuming that all are positive. STOP_TIMER(timerSumM); ///////////////////////////////////////////////////////////////////////////////////// // compute weighted X and Y START_TIMER(timerGetWeightedXY); // X .* sumOfRow => X_result elementwiseMultiplicationCopy <<< blocksPerGridForXsize, threadsPerBlockForXsize>>> (rowsM, d_Xx, d_Xy, d_Xz, d_sumOfMRow, d_Xx_result, d_Xy_result, d_Xz_result); // Y .* sumOfCol => Y_result elementwiseMultiplicationCopy <<< blocksPerGridForYsize, threadsPerBlockForYsize>>> (colsM, d_Yx, d_Yy, d_Yz, d_sumOfMCol, d_Yx_result, d_Yy_result, d_Yz_result); STOP_TIMER(timerGetWeightedXY); ///////////////////////////////////////////////////////////////////////////////////// // find weighted center of X' and Y START_TIMER(timerGetXcYc); // cublasSasum can not be used for summing up a vector // because it is ABS sum, not just sum. // cublasSgemv (char trans, int m, int n, float alpha, const float *A, int lda, // const float *x, int incx, float beta, float *y, int incy) // y = alpha * op(A) * x + beta * y, // d_X_result^T * d_one => h_Xc // (3 * rowsM) (rowsM * 1) = (3 * 1) cublasSgemv('t', // char trans rowsM, 3, // int m (rows of A), n (cols of A) ; not op(A) 1.0f, // float alpha d_X_result, rowsM, // const float *A, int lda d_one, 1, // const float *x, int incx 0.0f, // float beta d_Xc, 1); // float *y, int incy // d_Y_result^T * d_one => h_Yc // (3 * colsM) (colM * 1) = (3 * 1) cublasSgemv('t', // char trans colsM, 3, // int m (rows of A), n (cols of A) ; not op(A) 1.0f, // float alpha d_Y_result, colsM, // const float *A, int lda d_one, 1, // const float *x, int incx 0.0f, // float beta d_Yc, 1); // float *y, int incy // void cublasSscal (int n, float alpha, float *x, int incx) // it replaces x[ix + i * incx] with alpha * x[ix + i * incx] cublasSscal (3, 1/sumM, d_Xc, 1); cublasSscal (3, 1/sumM, d_Yc, 1); CUDA_SAFE_CALL(cudaMemcpy(h_Xc, d_Xc, sizeof(float)*3, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaMemcpy(h_Yc, d_Yc, sizeof(float)*3, cudaMemcpyDeviceToHost)); STOP_TIMER(timerGetXcYc); ///////////////////////////////////////////////////////////////////////////////////// // centering X and Y START_TIMER(timerCenteringXY); centeringXorY <<< blocksPerGridForXsize, threadsPerBlockForXsize>>> (rowsM, d_Xc, sumM, d_Xx_result, d_Xy_result, d_Xz_result); centeringXorY <<< blocksPerGridForYsize, threadsPerBlockForYsize>>> (colsM, d_Yc, sumM, d_Yx_result, d_Yy_result, d_Yz_result); STOP_TIMER(timerCenteringXY); ///////////////////////////////////////////////////////////////////////////////////// // compute S START_TIMER(timerFindS) // S = d_X_result^T * d_M * d_Y_result // cublasSgemm (char transa, char transb, int m, int n, int k, float alpha, // const float *A, int lda, const float *B, int ldb, float beta, // float *C, int ldc) // C = alpha * op(A) * op(B) + beta * C, // // m number of rows of matrix op(A) and rows of matrix C // n number of columns of matrix op(B) and number of columns of C // k number of columns of matrix op(A) and number of rows of op(B) // d_M * d_Y_result => d_D //(rowsM*colsM) * (colsM*3) = (rowsM*3) // m * k k * n m * n cublasSgemm('n', 'n', rowsM, 3, colsM, 1.0f, d_M, pitchM, d_Y_result, colsM, 0.0f, d_D, rowsM); // d_X_result^T * d_D => d_S // (3*rowsM) * (rowsM*3) = (3*3) // m * k k * n m * n cublasSgemm('t', 'n', 3, 3, rowsM, 1.0f, d_X_result, rowsM, d_D, rowsM, 0.0f, d_S, 3); CUDA_SAFE_CALL(cudaMemcpy(h_S, d_S, sizeof(float)*9, cudaMemcpyDeviceToHost)); STOP_TIMER(timerFindS); ///////////////////////////////////////////////////////////////////////////////////// // find RT from S START_TIMER(timerAfterSVD); findRTfromS(h_Xc, h_Yc, h_S, h_R, h_t); STOP_TIMER(timerAfterSVD); ///////////////////////////////////////////////////////////////////////////////////// // copy R,t to device START_TIMER(timerRT); CUDA_SAFE_CALL(cudaMemcpy(d_R, h_R, sizeof(float)*3*3, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_t, h_t, sizeof(float)*3, cudaMemcpyHostToDevice)); STOP_TIMER(timerRT); ///////////////////////////////////////////////////////////////////////////////////// #ifndef NOVIEWER if(!param.noviewer){ Eigen::Matrix4f transformation; transformation << h_R[0], h_R[1], h_R[2], h_t[0], h_R[3], h_R[4], h_R[5], h_t[1], h_R[6], h_R[7], h_R[8], h_t[2], 0, 0, 0, 1; pcl::transformPointCloud ( *param.cloud_source, *param.cloud_source_trans, transformation ); param.viewer->updatePointCloud ( param.cloud_source_trans, *param.source_trans_color, "source trans" ); param.viewer->spinOnce(); } #endif } T_cur = T_cur*TFACTR; } CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL(sdkStopTimer(&timerTotal)); fprintf(stderr, "comping time: %.10f [s]\n", sdkGetTimerValue(&timerTotal) / 1000.0f); if(!param.notimer){ fprintf(stderr, "Average %.10f [s] for %s\n", sdkGetAverageTimerValue(&timerUpdateM) / 1000.0f, "updateM"); fprintf(stderr, "Average %.10f [s] for %s\n", sdkGetAverageTimerValue(&timerShinkhorn)/ 1000.0f, "shinkhorn"); fprintf(stderr, "Average %.10f [s] for %s\n", sdkGetAverageTimerValue(&timerShinkhorn1)/ 1000.0f, "shinkhorn1"); fprintf(stderr, "Average %.10f [s] for %s\n", sdkGetAverageTimerValue(&timerShinkhorn2)/ 1000.0f, "shinkhorn2"); fprintf(stderr, "Average %.10f [s] for %s\n", sdkGetAverageTimerValue(&timerShinkhorn3)/ 1000.0f, "shinkhorn3"); fprintf(stderr, "Average %.10f [s] for %s\n", sdkGetAverageTimerValue(&timerSumM) / 1000.0f, "SumM"); fprintf(stderr, "Average %.10f [s] for %s\n", sdkGetAverageTimerValue(&timerGetWeightedXY) / 1000.0f, "getMXY"); fprintf(stderr, "Average %.10f [s] for %s\n", sdkGetAverageTimerValue(&timerGetXcYc) / 1000.0f, "getXcYc"); fprintf(stderr, "Average %.10f [s] for %s\n", sdkGetAverageTimerValue(&timerCenteringXY) / 1000.0f, "getNewXY"); fprintf(stderr, "Average %.10f [s] for %s\n", sdkGetAverageTimerValue(&timerFindS) / 1000.0f, "findS"); fprintf(stderr, "Average %.10f [s] for %s\n", sdkGetAverageTimerValue(&timerAfterSVD) / 1000.0f, "afterSVD"); fprintf(stderr, "Average %.10f [s] for %s\n", sdkGetAverageTimerValue(&timerRT) / 1000.0f, "RT"); CUT_SAFE_CALL(sdkDeleteTimer(&timerTotal)); CUT_SAFE_CALL(sdkDeleteTimer(&timerUpdateM)); CUT_SAFE_CALL(sdkDeleteTimer(&timerShinkhorn)); CUT_SAFE_CALL(sdkDeleteTimer(&timerShinkhorn1)); CUT_SAFE_CALL(sdkDeleteTimer(&timerShinkhorn2)); CUT_SAFE_CALL(sdkDeleteTimer(&timerShinkhorn3)); CUT_SAFE_CALL(sdkDeleteTimer(&timerSumM)); CUT_SAFE_CALL(sdkDeleteTimer(&timerGetWeightedXY)); CUT_SAFE_CALL(sdkDeleteTimer(&timerGetXcYc)); CUT_SAFE_CALL(sdkDeleteTimer(&timerCenteringXY)); CUT_SAFE_CALL(sdkDeleteTimer(&timerFindS)); CUT_SAFE_CALL(sdkDeleteTimer(&timerAfterSVD)); } cublasShutdown(); CUDA_SAFE_CALL(cudaFree(d_Xx)); CUDA_SAFE_CALL(cudaFree(d_Yx)); CUDA_SAFE_CALL(cudaFree(d_R)); CUDA_SAFE_CALL(cudaFree(d_t)); CUDA_SAFE_CALL(cudaFree(d_M)); CUDA_SAFE_CALL(cudaFree(d_D)); CUDA_SAFE_CALL(cudaFree(d_S)); CUDA_SAFE_CALL(cudaFree(d_one)); CUDA_SAFE_CALL(cudaFree(d_sumOfMRow)); CUDA_SAFE_CALL(cudaFree(d_sumOfMCol)); CUDA_SAFE_CALL(cudaFree(d_X_result)); CUDA_SAFE_CALL(cudaFree(d_Y_result)); CUDA_SAFE_CALL(cudaFree(d_m_outliers_row)); CUDA_SAFE_CALL(cudaFree(d_m_outliers_col)); delete [] h_m_outliers_row; delete [] h_m_outliers_col; delete [] h_one; CUDA_SAFE_CALL( cudaThreadExit() ); }
the_stack
#include "FillCurve.h" #include <iostream> using namespace std; // 宏:DEBUG_IMG // 定义是否输出中间图像调试信息 // #define DEBUG_IMG // 宏:DEBUG_TIME // 定义是否输出时间调试信息 // #define DEBUG_TIME // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:CUDA_PIXEL_GLO(x,y) // 获取全局内存中内核函数中图像中(x,y)像素的位置 #define CUDA_PIXEL_GLO(x,y) imgcud.imgMeta.imgData[(y)*imgcud.pitchBytes+(x)] // 宏:CUDA_VALID_GLO(x,y) // 判断全局内存中内核函数中(x,y)像素的位置是否合法 #define CUDA_VALID_GLO(x,y) (x>=0 && x<imgcud.imgMeta.width && y>=0 && y<imgcud.imgMeta.height) // 宏:CUDA_PIXEL_SHR(x,y) // 获取共享内存中内核函数中图像中(x,y)像素的位置 //#define CUDA_PIXEL_SHR(x,y) imgcud.imgMeta.imgData[(y)*imgcud.pitchBytes+(x)] #define CUDA_PIXEL_SHR(x,y) shareImg[(y)*w+(x)] // 宏:CUDA_VALID_SHR(x,y) // 判断共享内存内核函数中(x,y)像素的位置是否合法 #define CUDA_VALID_SHR(x,y) (x>=0 && x<w && y>=0 && y<h) // 宏:CUDA_STACK_SIZE // 自定义的cuda栈最大容量,根据测试,不太复杂的图像,最大深度为4,因此最大值定义12足够 #define CUDA_STACK_SIZE 12 //--------------------------内核方法声明------------------------------------ // Kernel 函数:_seedScanLineOutConGlobalKer(并行的种子扫描线算法,种子在轮廓外部) // 全局内存版 static __global__ void _seedScanLineOutConGlobalKer( ImageCuda imgcud, // 要填充的轮廓图像 int arrayxlen ); // Kernel 函数:_seedScanLineOutConShareKer(并行的种子扫描线算法,种子在轮廓外部) // 共享内存版 static __global__ void _seedScanLineOutConShareKer( ImageCuda imgcud, // 要填充的轮廓图像 int threadsize ); // Kernel 函数:_intersectionKer(求两幅图像交,结果放入outbordercud中) static __global__ void _intersectionKer( ImageCuda outborderCud, // 外轮廓被填充过后的图像 ImageCuda inborderCud // 内轮廓被填充过后的图像 ); //--------------------------内核方法实现------------------------------------ // Kernel 函数:_seedScanLineOutConGlobalKer(并行的种子扫描线算法,种子在轮廓外部) static __global__ void _seedScanLineOutConGlobalKer( ImageCuda imgcud, // 要填充的轮廓图像 int len ){ // 计算线程对应的输出点的位置的 x 和 y 分量 int x = blockIdx.x * blockDim.x + threadIdx.x; int seedx,seedy; // 线程号转换成顺时针种子点编号,根据编号,计算该点坐标 int w=imgcud.imgMeta.width; int h=imgcud.imgMeta.height; // 超过线程范围的线程退出 if(x>=len) return; if (x<w) { seedy=0; seedx=x; } else if(x< w+h-1){ seedx=w-1; seedy=x-(w-1); } else if(x< w*2+h-2){ seedx=x-(w+h-2); seedy=h-1; } else { seedx=0; seedy=x-(2*w+h-2); } // 如果得到的种子超过图像范围,或者不是背景点(可能是轮廓点或者已经被其他线程 // 填充,则直接退出) if(seedx>=imgcud.imgMeta.width || seedy >= imgcud.imgMeta.height || CUDA_PIXEL_GLO(seedx,seedy) != BK_COLOR) return; // 填充工作 // 输入:轮廓线workimg,种子seed; // 输出:填充过的workimg int cudastack[CUDA_STACK_SIZE]; int stackptr=0; int xtemp,xright,xleft; int spanfill; // 种子入栈 cudastack[stackptr++]=seedx; cudastack[stackptr++]=seedy; // stackptr==0表示栈为空,>0说明栈不空,每个像素点占用2个位置 while(stackptr>0){ int curx,cury; // 统计堆栈最大深度 //if(stackptr>stackmaxsize[0]) //stackmaxsize[0]=stackptr; // 入栈顺序x、y,出栈顺序应y、x。 cury=cudastack[--stackptr]; curx=cudastack[--stackptr]; // 填充当前点 CUDA_PIXEL_GLO(curx,cury)=BORDER_COLOR; // 向右填充,填充过程中检测当前点坐标 for(xtemp=curx+1;CUDA_VALID_GLO(xtemp,cury)&&CUDA_PIXEL_GLO(xtemp,cury)!=BORDER_COLOR;xtemp++){ CUDA_PIXEL_GLO(xtemp,cury)=BORDER_COLOR;} //纪录当前线段最右位置 xright=xtemp-1; // 向左填充 for(xtemp=curx-1;CUDA_VALID_GLO(xtemp,cury)&&CUDA_PIXEL_GLO(xtemp,cury)!=BORDER_COLOR;xtemp--){ CUDA_PIXEL_GLO(xtemp,cury)=BORDER_COLOR; } // 纪录当前线段最左位置 xleft=xtemp+1; //cout<<"hang:"<<cury<<"["<<xleft<<","<<xright<<"]"<<endl; // 下方相邻扫描线,从左向右扫描 xtemp=xleft; cury++; // 每次循环把一个线段种子放入堆栈(一条扫描线中可能多个线段) while(xtemp<=xright && cury>=0 && cury<imgcud.imgMeta.height){ spanfill=0; // 找到一个线段的最右点 while(CUDA_PIXEL_GLO(xtemp,cury)!=BORDER_COLOR && xtemp<=xright){ spanfill=1; xtemp++; } // 最右点(xtemp-1,cury)入栈 if(spanfill==1){ cudastack[stackptr++]=xtemp-1; cudastack[stackptr++]=cury; } // 继续向右走,跳过边界和已经填充部分,找到下一段未填充线段 while( xtemp<=xright && CUDA_PIXEL_GLO(xtemp,cury)==BORDER_COLOR) xtemp++; } // 下方扫描线结束 //上方相邻扫描线 xtemp=xleft; cury-=2; // 循环一次,把一个线段种子放入堆栈(一条扫描线中可能多个线段) while(xtemp<=xright && cury>=0 && cury<imgcud.imgMeta.height){ spanfill=0; // 找到一个线段的最右点 while( xtemp<=xright && CUDA_PIXEL_GLO(xtemp,cury)!=BORDER_COLOR ){ spanfill=1; xtemp++; } // 最右点入栈 if(spanfill==1){ cudastack[stackptr++]=xtemp-1; cudastack[stackptr++]=cury; } // 继续向右走,跳过边界和已经填充部分,找到下一段未填充线段 while(CUDA_PIXEL_GLO(xtemp,cury)==BORDER_COLOR && xtemp<=xright) xtemp++; } // 上方扫描线结束 }// 填充结束 return ; } // Kernel 函数:_seedScanLineOutConShareKer(并行的种子扫描线算法,种子在轮廓外部) static __global__ void _seedScanLineOutConShareKer( ImageCuda imgcud, // 要填充的轮廓图像 int threadsize ){ // 计算线程对应的输出点的位置的 x 和 y 分量 int x = threadIdx.x; int seedx,seedy; // 图像放入共享内存,动态申请,大小由参数决定,和在全局内存中的大小一致 // 超过线程范围的线程退出 // if(x>=threadsize) return; extern __shared__ unsigned char shareImg[]; int w=imgcud.imgMeta.width; int h=imgcud.imgMeta.height; // 图像拷贝到共享内存 int imgarraysize=w*h; // 计算每个线程需要负责多少个像素点的复制 register int stride=imgarraysize/threadsize+1; // 本线程负责的像素点在数组中的开始位置下标 register int beginIdx=x*stride; register int ny,nx; if (beginIdx<imgarraysize){ // 本线程负责的像素点开始坐标(nx,ny) ny=beginIdx / w; nx=beginIdx % w; // 从(nx,ny)开始的 stride 个像素点从全局内存中复制到共享内存中。 // 注意:全局内存总的pitchBytes for(int i=0;i<stride;i++){ // 末尾可能越界的像素点跳过 if(ny*w+nx+i>=imgarraysize) break; shareImg[ny*w+nx+i]=imgcud.imgMeta.imgData[ny*imgcud.pitchBytes+nx+i]; } } __syncthreads(); // 线程号转换成顺时针种子点编号,根据编号,计算该点坐标 if (x<w) { seedy=0; seedx=x; } else if(x< w+h-1){ seedx=w-1; seedy=x-(w-1); } else if(x< w*2+h-2){ seedx=x-(w+h-2); seedy=h-1; } else { seedx=0; seedy=x-(2*w+h-2); } // 如果得到的种子超过图像范围,或者不是背景点(可能是轮廓点或者已经被其他线程 // 填充,则直接退出) if(seedx>=imgcud.imgMeta.width || seedy >= imgcud.imgMeta.height || CUDA_PIXEL_SHR(seedx,seedy) != BK_COLOR) return; // 填充工作 // 输入:轮廓线workimg,种子seed; // 输出:填充过的workimg int cudastack[CUDA_STACK_SIZE]; int stackptr=0; int xtemp,xright,xleft; int spanfill; // 种子入栈 cudastack[stackptr++]=seedx; cudastack[stackptr++]=seedy; // stackptr==0表示栈为空,>0说明栈不空,每个像素点占用2个位置 while(stackptr>0){ int curx,cury; // 统计堆栈最大深度 //if(stackptr>stackmaxsize[0]) //stackmaxsize[0]=stackptr; // 入栈顺序x、y,出栈顺序应y、x。 cury=cudastack[--stackptr]; curx=cudastack[--stackptr]; // 填充当前点 CUDA_PIXEL_SHR(curx,cury)=BORDER_COLOR; // 向右填充,填充过程中检测当前点坐标 for(xtemp=curx+1;CUDA_VALID_SHR(xtemp,cury)&&CUDA_PIXEL_SHR(xtemp,cury)!=BORDER_COLOR;xtemp++){ CUDA_PIXEL_SHR(xtemp,cury)=BORDER_COLOR;} //纪录当前线段最右位置 xright=xtemp-1; // 向左填充 for(xtemp=curx-1;CUDA_VALID_SHR(xtemp,cury)&&CUDA_PIXEL_SHR(xtemp,cury)!=BORDER_COLOR;xtemp--){ CUDA_PIXEL_SHR(xtemp,cury)=BORDER_COLOR; } // 纪录当前线段最左位置 xleft=xtemp+1; //cout<<"hang:"<<cury<<"["<<xleft<<","<<xright<<"]"<<endl; // 下方相邻扫描线,从左向右扫描 xtemp=xleft; cury++; // 每次循环把一个线段种子放入堆栈(一条扫描线中可能多个线段) while(xtemp<=xright && cury>=0 && cury<imgcud.imgMeta.height){ spanfill=0; // 找到一个线段的最右点 while(CUDA_PIXEL_SHR(xtemp,cury)!=BORDER_COLOR && xtemp<=xright){ spanfill=1; xtemp++; } // 最右点(xtemp-1,cury)入栈 if(spanfill==1){ cudastack[stackptr++]=xtemp-1; cudastack[stackptr++]=cury; } // 继续向右走,跳过边界和已经填充部分,找到下一段未填充线段 while( xtemp<=xright && CUDA_PIXEL_SHR(xtemp,cury)==BORDER_COLOR) xtemp++; } // 下方扫描线结束 //上方相邻扫描线 xtemp=xleft; cury-=2; // 循环一次,把一个线段种子放入堆栈(一条扫描线中可能多个线段) while(xtemp<=xright && cury>=0 && cury<imgcud.imgMeta.height){ spanfill=0; // 找到一个线段的最右点 while( xtemp<=xright && CUDA_PIXEL_SHR(xtemp,cury)!=BORDER_COLOR ){ spanfill=1; xtemp++; } // 最右点入栈 if(spanfill==1){ cudastack[stackptr++]=xtemp-1; cudastack[stackptr++]=cury; } // 继续向右走,跳过边界和已经填充部分,找到下一段未填充线段 while(CUDA_PIXEL_SHR(xtemp,cury)==BORDER_COLOR && xtemp<=xright) xtemp++; } // 上方扫描线结束 }// 填充结束 // 全部线程填充结束后,图像拷贝回全局内存。 __syncthreads(); // 计算填充主题时间间隔 // 从(nx,ny)开始的 stride 个像素点从全局内存中复制到共享内存中。 // 注意:全局内存总的pitchBytes if (beginIdx<imgarraysize) for(int i=0;i<stride;i++){ // 末尾可能越界的像素点跳过 if(ny*w+nx+i>=imgarraysize) break; imgcud.imgMeta.imgData[ny*imgcud.pitchBytes+nx+i]=shareImg[ny*w+nx+i]; } return ; } // Kernel 函数:_intersectionKer(求两幅图像交,结果放入outbordercud中) static __global__ void _intersectionKer( ImageCuda outborderCud, // 外轮廓被填充过后的图像 ImageCuda inborderCud // 内轮廓被填充过后的图像 ){ // 此版本中,填充色就是轮廓色,因此,逻辑判定简单 // 计算线程对应的输出点的位置的 x 和 y 分量 int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int index=y*outborderCud.pitchBytes+x; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if(x>=outborderCud.imgMeta.width || y >= outborderCud.imgMeta.height ) return; // 内边界填充图填充部分 且 外边界填充图未填充部分 是要求的结果,其余部分 // 认为是背景 if(outborderCud.imgMeta.imgData[index] != BORDER_COLOR && inborderCud.imgMeta.imgData[index] == BORDER_COLOR) outborderCud.imgMeta.imgData[index]=BORDER_COLOR; else outborderCud.imgMeta.imgData[index]=BK_COLOR; return; } // Kernel 函数:_negateKer(对输入图像求反 BORDER_COLOR<-->BK_COLOR) static __global__ void _negateKer( ImageCuda outborderCud // 外轮廓被填充过的图形,反转后是轮廓内部填充 ){ // 计算线程对应的输出点的位置的 x 和 y 分量 int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int index=y*outborderCud.pitchBytes+x; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if(x>=outborderCud.imgMeta.width || y >= outborderCud.imgMeta.height ) return; // BORDER_COLOR 变成 BK_COLOR ,或者相反 if(outborderCud.imgMeta.imgData[index] == BORDER_COLOR) outborderCud.imgMeta.imgData[index]=BK_COLOR; else outborderCud.imgMeta.imgData[index]=BORDER_COLOR; return; } //--------------------------全局方法声明------------------------------------ // 函数:_findMinMaxCoordinates(根据输入点集的坐标,找到最上、最下、最左、最右 // 的点,从而确定图像的宽和高) static __host__ int _findMinMaxCoordinates(CoordiSet *guidingset, int *xmin, int *ymin, int *xmax, int *ymax); //--------------------------全局方法实现------------------------------------ // 函数:_findMinMaxCoordinates(根据输入点集的坐标,找到最上、最下、最左、最右 // 的点,从而确定图像的宽和高) static __host__ int _findMinMaxCoordinates(CoordiSet *guidingset, int *xmin, int *ymin, int *xmax, int *ymax) { // 声明局部变量。 int errcode; // 在 host 端申请一个新的 CoordiSet 变量。 CoordiSet *tmpcoordiset; errcode = CoordiSetBasicOp::newCoordiSet(&tmpcoordiset); if (errcode != NO_ERROR) return errcode; errcode = CoordiSetBasicOp::makeAtHost(tmpcoordiset, guidingset->count); if (errcode != NO_ERROR) return errcode; // 将坐标集拷贝到 Host 端。 errcode = CoordiSetBasicOp::copyToHost(guidingset, tmpcoordiset); if (errcode != NO_ERROR) return errcode; // 初始化 x 和 y 方向上的最小最大值。 xmin[0] = xmax[0] = tmpcoordiset->tplData[0]; ymin[0] = ymax[0] = tmpcoordiset->tplData[1]; // 循环寻找坐标集最左、最右、最上、最下的坐标。 for (int i = 1;i < tmpcoordiset->count;i++) { // 寻找 x 方向上的最小值。 if (xmin[0] > tmpcoordiset->tplData[2 * i]) xmin[0] = tmpcoordiset->tplData[2 * i]; // 寻找 x 方向上的最大值 if (xmax[0] < tmpcoordiset->tplData[2 * i]) xmax[0] = tmpcoordiset->tplData[2 * i]; // 寻找 y 方向上的最小值。 if (ymin[0] > tmpcoordiset->tplData[2 * i + 1]) ymin[0] = tmpcoordiset->tplData[2 * i + 1]; // 寻找 y 方向上的最大值 if (ymax[0] < tmpcoordiset->tplData[2 * i + 1]) ymax[0] = tmpcoordiset->tplData[2 * i + 1]; } // 释放临时坐标集变量。 CoordiSetBasicOp::deleteCoordiSet(tmpcoordiset); return errcode; } //--------------------------成员方法实现------------------------------------ // 成员方法:seedScanLineCoorGlo(并行种子扫描线算法填充 coordiset 集合围起的区域) __host__ int // 返回值:函数是否正确执行,若函数正确执 // 行,返回 NO_ERROR。 FillCurve::seedScanLineCoorGlo( CoordiSet *outbordercoor, // 输入的 coordiset ,内容为封闭区域 // 外轮廓闭合曲线 CoordiSet *inbordercoor, // 输入的 coordiset ,内容为封闭区域 // 内轮廓闭合曲线。如果没有内轮廓,设为NULL CoordiSet *fillcoor // 输出填充过的的 coordiset ){ // 获取坐标集中点的分布范围,即包围盒坐标 int minx,maxx,miny,maxy; // ----------------------输入coor参数转化成img---------------------------- Image *outborderimg=NULL; ImageBasicOp::newImage(&outborderimg); Image *inborderimg=NULL; ImageBasicOp::newImage(&inborderimg); ImgConvert imgcvt(BORDER_COLOR,BK_COLOR); #ifdef DEBUG_TIME cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float runTime; cudaEventRecord(start, 0); #endif // --------------------------轮廓坐标集转换成img------------------------------- if(outbordercoor!=NULL){ // 预处理,得到外轮廓大小 int errorcode=_findMinMaxCoordinates(outbordercoor,&minx,&miny,&maxx,&maxy); if(errorcode!=NO_ERROR) return 0; // 处理外轮廓 // 创建工作图像 //给工作图像分配空间,宽度是最大坐标值+1,因为坐标从0开始计数,再+1,保证轮廓外连通 ImageBasicOp::makeAtHost(outborderimg,maxx+2 ,maxy+2); // 把坐标集绘制到图像上,前景255,背景0 imgcvt.cstConvertToImg(outbordercoor,outborderimg); if(inbordercoor!=NULL){ //给工作图像分配空间,宽度是最大坐标值+1,因为坐标从0开始计数,再+1,保证轮廓外连通 ImageBasicOp::makeAtHost(inborderimg,maxx+2 ,maxy+2); // 把坐标集绘制到图像上,前景255,背景0 imgcvt.cstConvertToImg(inbordercoor,inborderimg); } #ifdef DEBUG_IMG // 把填充前的图像保存到文件 ImageBasicOp::copyToHost(outborderimg); ImageBasicOp::writeToFile("outborder_notFilled.bmp",outborderimg); ImageBasicOp::copyToHost(inborderimg); ImageBasicOp::writeToFile("inborder_notFilled.bmp",inborderimg); #endif #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[coor] coor->img time" << runTime << " ms" << endl; cudaEventRecord(start, 0); #endif // --------------------------调用图像填充算法------------------------------- seedScanLineImgGlo(outborderimg,inborderimg); }// end of out border #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[coor] fill time" << runTime << " ms" << endl; cudaEventRecord(start, 0); #endif //------------------------串行图像转化成coor,返回------------------------- ImageBasicOp::copyToHost(outborderimg); #ifdef DEBUG_IMG // 最终图像输出到文件 ImageBasicOp::writeToFile("[coor]intersection.bmp",outborderimg); #endif // 此时imgcvt的设置是前景255,背景0,灰色部分会忽略,故自定义串行转化方法 //imgcvt.imgConvertToCst(outborderimg,fillcoor); int w,h; w=outborderimg->width; h=outborderimg->height; int imgsize=w*h; // 每个点(x,y)占用两个整数存放 int *coorarray=(int *)malloc(2*imgsize*sizeof(int)); int coorcount=0; for(int i=0;i<w;i++) for(int j=0;j<h;j++){ // 图像中的点(i,j) int curpix=outborderimg->imgData[j*w+i]; if(curpix==BORDER_COLOR ){ coorarray[coorcount*2]=i; coorarray[coorcount*2+1]=j; coorcount++; } } // 创建coor,给count、和数据数组赋值 CoordiSetBasicOp::makeAtHost(fillcoor,coorcount); memcpy(fillcoor->tplData,coorarray,coorcount*2*sizeof(int)); free(coorarray); #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[coor] img to coor " << runTime << " ms" << endl; #endif /* //------------------------并行图像转化成coor,返回------------------------- // 经过测试,效率不如串行,故不采用 imgcvt.imgConvertToCst(outborderimg,fillcoor); #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "con img to coor " << runTime << " ms" << endl; #endif */ //------------------------内存回收------------------------------------- ImageBasicOp::deleteImage(outborderimg); ImageBasicOp::deleteImage(inborderimg); return NO_ERROR; } // 成员方法:seedScanLineImgGlo(并行种子扫描线算法填充 coordiset 集合围起的区域) __host__ int // 返回值:函数是否正确执行,若函数正确执 // 行,返回 NO_ERROR。 FillCurve::seedScanLineImgGlo( Image *outborderimg, // 外轮廓闭合曲线图像,同时也是输出结果 Image *inborderimg // 内轮廓闭合曲线图像,没有内轮廓设为空 ){ ImageCuda outborderCud; ImageCuda inborderCud; #ifdef DEBUG_TIME cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float runTime; cudaEventRecord(start, 0); #endif // --------------------------处理外轮廓------------------------------- if(outborderimg!=NULL){ int errcode; // 将输入图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outborderimg); if (errcode != NO_ERROR) { return errcode; } // 提取输入图像的 ROI 子图像。 errcode = ImageBasicOp::roiSubImage(outborderimg, &outborderCud); if (errcode != NO_ERROR) { return errcode; } // 计算边缘点个数,即最大线程个数 int outmaxthreadsize=(outborderimg->width+outborderimg->height-2)<<1; dim3 grid,block; block.x=DEF_BLOCK_X; block.y=1; block.z=1; grid.x=(outmaxthreadsize+DEF_BLOCK_X-1)/DEF_BLOCK_X; grid.y=1; grid.z=1; //--------------------------------- _seedScanLineOutConGlobalKer<<<grid,block>>> (outborderCud,outmaxthreadsize); //--------------------------------------------- #ifdef DEBUG_IMG ImageBasicOp::copyToHost(outborderimg); ImageBasicOp::writeToFile("outborder_Filled.bmp",outborderimg); // 交操作还要在 device 端使用图像,将输入图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outborderimg); // 经过一次主存显存传输,ROI子图像需要重新提取 errcode = ImageBasicOp::roiSubImage(outborderimg, &outborderCud); #endif }// end of out border #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[img] out border fill time" << runTime << " ms" << endl; cudaEventRecord(start, 0); #endif // --------------------------处理内轮廓------------------------------- if(outborderimg!=NULL && inborderimg!=NULL){ int errcode; // 将输入图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(inborderimg); if (errcode != NO_ERROR) { return errcode; } // 提取输入图像的 ROI 子图像。 errcode = ImageBasicOp::roiSubImage(inborderimg, &inborderCud); if (errcode != NO_ERROR) { return errcode; } // 计算边缘点个数,即最大线程个数 int inmaxthreadsize=(inborderimg->width+inborderimg->height-2)<<1; dim3 grid,block; block.x=DEF_BLOCK_X; block.y=1; block.z=1; grid.x=(inmaxthreadsize+DEF_BLOCK_X-1)/DEF_BLOCK_X; grid.y=1; grid.z=1; _seedScanLineOutConGlobalKer<<<grid,block>>>(inborderCud,inmaxthreadsize); #ifdef DEBUG_IMG ImageBasicOp::copyToHost(inborderimg); ImageBasicOp::writeToFile("inborderFilled.bmp",inborderimg); // 交操作还要在 device 端使用图像,将输入图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(inborderimg); // 经过一次主存显存传输,ROI子图像需要重新提取 errcode = ImageBasicOp::roiSubImage(inborderimg, &inborderCud); #endif }// end of in border & process #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[img] in border fill time " << runTime << " ms" << endl; cudaEventRecord(start, 0); #endif //--------------如果有内轮廓,则内外轮廓填充图像求交---------------------- if(outborderimg!=NULL && inborderimg!=NULL){ dim3 gridsize,blocksize; // 计算调用计算局部最大值的 kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outborderimg->width + blocksize.x - 1) / blocksize.x; gridsize.y = (outborderimg->height + blocksize.y - 1) / blocksize.y; // 调用 kernel 函数求交,结果放入outbordercud中,此时outborderCud和 // inborderCud都在divice中,不用再次copytodevice _intersectionKer<<<gridsize, blocksize>>>( outborderCud, inborderCud ); if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; } //--------------如果没有内轮廓,则仅仅对外轮廓填充结果求反--------- else{ dim3 gridsize,blocksize; // 计算调用计算局部最大值的 kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outborderimg->width + blocksize.x - 1) / blocksize.x; gridsize.y = (outborderimg->height + blocksize.y - 1) / blocksize.y; // 调用 kernel 函数求反 _negateKer<<<gridsize, blocksize>>>( outborderCud ); if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; } #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[img ]inter or negate " << runTime << " ms" << endl; cudaEventRecord(start, 0); #endif #ifdef DEBUG_IMG ImageBasicOp::copyToHost(outborderimg); ImageBasicOp::writeToFile("[img]fill_result.bmp",outborderimg); #endif ImageBasicOp::copyToHost(outborderimg); return NO_ERROR; } // 成员方法:seedScanLineCurveGlo(并行种子扫描线算法填充 Curve 集合围起的区域) // 使用本并行算法时,内外轮廓要放入不同的 Curve 中。 __host__ int // 返回值:函数是否正确执行,若函数正确执 // 行,返回 NO_ERROR。 FillCurve::seedScanLineCurveGlo( Curve *outbordercurve, // 输入的 Curve ,内容为封闭区域 // 外轮廓闭合曲线 Curve *inbordercurve, // 输入的 Curve ,内容为封闭区域 // 内轮廓闭合曲线。如果没有内轮廓,设为NULL Curve *fillcurve // 输出填充过的的 Curve ){ // ----------------------输入Curve参数转化成img---------------------------- Image *outborderimg=NULL; Image *inborderimg=NULL; CurveConverter curcvt(BORDER_COLOR,BK_COLOR); #ifdef DEBUG_TIME cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float runTime; cudaEventRecord(start, 0); #endif // --------------------------轮廓坐标集转换成img------------------------------- if(outbordercurve==NULL) return INVALID_DATA; ImageBasicOp::newImage(&outborderimg); // 创建工作图像 //给工作图像分配空间,宽度是最大坐标值+1,因为坐标从0开始计数,再+1,保证轮廓外连通 ImageBasicOp::makeAtHost(outborderimg,outbordercurve->maxCordiX+2 ,outbordercurve->maxCordiY+2); // 把坐标集绘制到图像上,前景255,背景0 curcvt.curve2Img(outbordercurve,outborderimg); if(inbordercurve!=NULL){ ImageBasicOp::newImage(&inborderimg); //给工作图像分配空间,按照外轮廓大小分配 ImageBasicOp::makeAtHost(inborderimg,outbordercurve->maxCordiX+2 ,outbordercurve->maxCordiY+2); // 把坐标集绘制到图像上,前景255,背景0 curcvt.curve2Img(inbordercurve,inborderimg); } #ifdef DEBUG_IMG // 把填充前的图像保存到文件 ImageBasicOp::copyToHost(outborderimg); ImageBasicOp::writeToFile("outborder_notFilled.bmp",outborderimg); ImageBasicOp::copyToHost(inborderimg); ImageBasicOp::writeToFile("inborder_notFilled.bmp",inborderimg); #endif #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[Curve] coor->img time" << runTime << " ms" << endl; cudaEventRecord(start, 0); #endif // --------------------------调用图像填充算法------------------------------- seedScanLineImgGlo(outborderimg,inborderimg); #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[Curve] fill time" << runTime << " ms" << endl; cudaEventRecord(start, 0); #endif //------------------------图像转化成curve,返回------------------------- cudaThreadSynchronize(); curcvt.img2Curve(outborderimg,fillcurve); #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[Curve] img to Curve " << runTime << " ms" << endl; #endif //------------------------内存回收------------------------------------- ImageBasicOp::deleteImage(outborderimg); ImageBasicOp::deleteImage(inborderimg); return NO_ERROR; } // 成员方法:seedScanLineCoorShr(并行种子扫描线算法填充 coordiset 集合围起的区域) __host__ int // 返回值:函数是否正确执行,若函数正确执 // 行,返回 NO_ERROR。 FillCurve::seedScanLineCoorShr( CoordiSet *outbordercoor, // 输入的 coordiset ,内容为封闭区域 // 外轮廓闭合曲线 CoordiSet *inbordercoor, // 输入的 coordiset ,内容为封闭区域 // 内轮廓闭合曲线。如果没有内轮廓,设为NULL CoordiSet *fillcoor // 输出填充过的的 coordiset ){ // 获取坐标集中点的分布范围,即包围盒坐标 int minx,maxx,miny,maxy; // ----------------------输入coor参数转化成img---------------------------- Image *outborderimg=NULL; ImageBasicOp::newImage(&outborderimg); Image *inborderimg=NULL; ImageBasicOp::newImage(&inborderimg); ImgConvert imgcvt(BORDER_COLOR,BK_COLOR); #ifdef DEBUG_TIME cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float runTime; cudaEventRecord(start, 0); #endif // --------------------------轮廓坐标集转换成img------------------------------- if(outbordercoor!=NULL){ // 预处理,得到外轮廓大小 int errorcode=_findMinMaxCoordinates(outbordercoor,&minx,&miny,&maxx,&maxy); if(errorcode!=NO_ERROR) return 0; // 处理外轮廓 // 创建工作图像 //给工作图像分配空间,宽度是最大坐标值+1,因为坐标从0开始计数,再+1,保证轮廓外连通 ImageBasicOp::makeAtHost(outborderimg,maxx+2 ,maxy+2); // 把坐标集绘制到图像上,前景255,背景0 imgcvt.cstConvertToImg(outbordercoor,outborderimg); if(inbordercoor!=NULL){ //给工作图像分配空间,宽度是最大坐标值+1,因为坐标从0开始计数,再+1,保证轮廓外连通 ImageBasicOp::makeAtHost(inborderimg,maxx+2 ,maxy+2); // 把坐标集绘制到图像上,前景255,背景0 imgcvt.cstConvertToImg(inbordercoor,inborderimg); } #ifdef DEBUG_IMG // 把填充前的图像保存到文件 ImageBasicOp::copyToHost(outborderimg); ImageBasicOp::writeToFile("outborder_notFilled.bmp",outborderimg); ImageBasicOp::copyToHost(inborderimg); ImageBasicOp::writeToFile("inborder_notFilled.bmp",inborderimg); #endif #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[coor] coor->img time" << runTime << " ms" << endl; cudaEventRecord(start, 0); #endif // --------------------------调用图像填充算法------------------------------- seedScanLineImgShr(outborderimg,inborderimg); }// end of out border #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[coor] fill time" << runTime << " ms" << endl; cudaEventRecord(start, 0); #endif //------------------------串行图像转化成coor,返回------------------------- ImageBasicOp::copyToHost(outborderimg); #ifdef DEBUG_IMG // 最终图像输出到文件 ImageBasicOp::writeToFile("[coor]intersection.bmp",outborderimg); #endif // 此时imgcvt的设置是前景255,背景0,灰色部分会忽略,故自定义串行转化方法 //imgcvt.imgConvertToCst(outborderimg,fillcoor); int w,h; w=outborderimg->width; h=outborderimg->height; int imgsize=w*h; // 每个点(x,y)占用两个整数存放 int *coorarray=(int *)malloc(2*imgsize*sizeof(int)); int coorcount=0; for(int i=0;i<w;i++) for(int j=0;j<h;j++){ // 图像中的点(i,j) int curpix=outborderimg->imgData[j*w+i]; if(curpix==BORDER_COLOR ){ coorarray[coorcount*2]=i; coorarray[coorcount*2+1]=j; coorcount++; } } // 创建coor,给count、和数据数组赋值 CoordiSetBasicOp::makeAtHost(fillcoor,coorcount); memcpy(fillcoor->tplData,coorarray,coorcount*2*sizeof(int)); free(coorarray); #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[coor] img to coor " << runTime << " ms" << endl; #endif /* //------------------------并行图像转化成coor,返回------------------------- // 经过测试,效率不如串行,故不采用 imgcvt.imgConvertToCst(outborderimg,fillcoor); #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "con img to coor " << runTime << " ms" << endl; #endif */ //------------------------内存回收------------------------------------- ImageBasicOp::deleteImage(outborderimg); ImageBasicOp::deleteImage(inborderimg); return NO_ERROR; } // 成员方法:seedScanLineImgShr(并行种子扫描线算法填充 coordiset 集合围起的区域) __host__ int // 返回值:函数是否正确执行,若函数正确执 // 行,返回 NO_ERROR。 FillCurve::seedScanLineImgShr( Image *outborderimg, // 外轮廓闭合曲线图像,同时也是输出结果 Image *inborderimg // 内轮廓闭合曲线图像,没有内轮廓设为空 ){ ImageCuda outborderCud; ImageCuda inborderCud; #ifdef DEBUG_TIME cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float runTime; cudaEventRecord(start, 0); #endif // --------------------------处理外轮廓------------------------------- if(outborderimg!=NULL){ int errcode; // 将输入图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outborderimg); if (errcode != NO_ERROR) { return errcode; } // 提取输入图像的 ROI 子图像。 errcode = ImageBasicOp::roiSubImage(outborderimg, &outborderCud); if (errcode != NO_ERROR) { return errcode; } // 计算边缘点个数,即最大线程个数 int outmaxthreadsize=(outborderimg->width+outborderimg->height-2)<<1; if(outmaxthreadsize>maxThreadsPerBlock) outmaxthreadsize=maxThreadsPerBlock; dim3 grid,block; block.x=outmaxthreadsize; block.y=1; block.z=1; grid.x=1; grid.y=1; grid.z=1; //--------------------------------- int sharedmemsize=outborderCud.imgMeta.height* outborderCud.imgMeta.width* sizeof (unsigned char); _seedScanLineOutConShareKer<<<grid,block,sharedmemsize>>> (outborderCud, outmaxthreadsize ); //--------------------------------------------- #ifdef DEBUG_IMG ImageBasicOp::copyToHost(outborderimg); ImageBasicOp::writeToFile("outborder_Filled.bmp",outborderimg); // 交操作还要在 device 端使用图像,将输入图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outborderimg); // 经过一次主存显存传输,ROI子图像需要重新提取 errcode = ImageBasicOp::roiSubImage(outborderimg, &outborderCud); #endif }// end of out border #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[img] out border fill time" << runTime << " ms" << endl; cudaEventRecord(start, 0); #endif // --------------------------处理内轮廓------------------------------- if(outborderimg!=NULL && inborderimg!=NULL){ int errcode; // 将输入图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(inborderimg); if (errcode != NO_ERROR) { return errcode; } // 提取输入图像的 ROI 子图像。 errcode = ImageBasicOp::roiSubImage(inborderimg, &inborderCud); if (errcode != NO_ERROR) { return errcode; } // 计算边缘点个数,即最大线程个数 int inmaxthreadsize=(inborderimg->width+inborderimg->height-2)<<1; if(inmaxthreadsize>maxThreadsPerBlock) inmaxthreadsize=maxThreadsPerBlock; dim3 grid,block; block.x=inmaxthreadsize; block.y=1; block.z=1; grid.x=1; grid.y=1; grid.z=1; int insharedmemsize=inborderCud.imgMeta.width* inborderCud.imgMeta.height* sizeof (unsigned char); _seedScanLineOutConShareKer<<<grid,block,insharedmemsize>>>(inborderCud,inmaxthreadsize); #ifdef DEBUG_IMG ImageBasicOp::copyToHost(inborderimg); ImageBasicOp::writeToFile("inborderFilled.bmp",inborderimg); // 交操作还要在 device 端使用图像,将输入图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(inborderimg); // 经过一次主存显存传输,ROI子图像需要重新提取 errcode = ImageBasicOp::roiSubImage(inborderimg, &inborderCud); #endif }// end of in border & process #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[img] in border fill time " << runTime << " ms" << endl; cudaEventRecord(start, 0); #endif //--------------如果有内轮廓,则内外轮廓填充图像求交---------------------- if(outborderimg!=NULL && inborderimg!=NULL){ dim3 gridsize,blocksize; // 计算调用计算局部最大值的 kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outborderimg->width + blocksize.x - 1) / blocksize.x; gridsize.y = (outborderimg->height + blocksize.y - 1) / blocksize.y; // 调用 kernel 函数求交,结果放入outbordercud中,此时outborderCud和 // inborderCud都在divice中,不用再次copytodevice _intersectionKer<<<gridsize, blocksize>>>( outborderCud, inborderCud ); if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; } //--------------如果没有内轮廓,则仅仅对外轮廓填充结果求反--------- else{ dim3 gridsize,blocksize; // 计算调用计算局部最大值的 kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outborderimg->width + blocksize.x - 1) / blocksize.x; gridsize.y = (outborderimg->height + blocksize.y - 1) / blocksize.y; // 调用 kernel 函数求反 _negateKer<<<gridsize, blocksize>>>( outborderCud ); if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; } #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[img ]inter or negate " << runTime << " ms" << endl; cudaEventRecord(start, 0); #endif #ifdef DEBUG_IMG ImageBasicOp::copyToHost(outborderimg); ImageBasicOp::writeToFile("[img]fill_result.bmp",outborderimg); #endif ImageBasicOp::copyToHost(outborderimg); return NO_ERROR; } // 成员方法:seedScanLineCurveShr(并行种子扫描线算法填充 Curve 集合围起的区域) // 使用本并行算法时,内外轮廓要放入不同的 Curve 中。 __host__ int // 返回值:函数是否正确执行,若函数正确执 // 行,返回 NO_ERROR。 FillCurve::seedScanLineCurveShr( Curve *outbordercurve, // 输入的 Curve ,内容为封闭区域 // 外轮廓闭合曲线 Curve *inbordercurve, // 输入的 Curve ,内容为封闭区域 // 内轮廓闭合曲线。如果没有内轮廓,设为NULL Curve *fillcurve // 输出填充过的的 Curve ){ // ----------------------输入Curve参数转化成img---------------------------- Image *outborderimg=NULL; Image *inborderimg=NULL; CurveConverter curcvt(BORDER_COLOR,BK_COLOR); #ifdef DEBUG_TIME cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float runTime; cudaEventRecord(start, 0); #endif // --------------------------轮廓坐标集转换成img------------------------------- if(outbordercurve==NULL) return INVALID_DATA; ImageBasicOp::newImage(&outborderimg); // 创建工作图像 //给工作图像分配空间,宽度是最大坐标值+1,因为坐标从0开始计数,再+1,保证轮廓外连通 ImageBasicOp::makeAtHost(outborderimg,outbordercurve->maxCordiX+2 ,outbordercurve->maxCordiY+2); // 把坐标集绘制到图像上,前景255,背景0 curcvt.curve2Img(outbordercurve,outborderimg); if(inbordercurve!=NULL){ ImageBasicOp::newImage(&inborderimg); //给工作图像分配空间,按照外轮廓大小分配 ImageBasicOp::makeAtHost(inborderimg,outbordercurve->maxCordiX+2 ,outbordercurve->maxCordiY+2); // 把坐标集绘制到图像上,前景255,背景0 curcvt.curve2Img(inbordercurve,inborderimg); } #ifdef DEBUG_IMG // 把填充前的图像保存到文件 ImageBasicOp::copyToHost(outborderimg); ImageBasicOp::writeToFile("outborder_notFilled.bmp",outborderimg); ImageBasicOp::copyToHost(inborderimg); ImageBasicOp::writeToFile("inborder_notFilled.bmp",inborderimg); #endif #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[Curve] coor->img time" << runTime << " ms" << endl; cudaEventRecord(start, 0); #endif // --------------------------调用图像填充算法------------------------------- seedScanLineImgGlo(outborderimg,inborderimg); #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[Curve] fill time" << runTime << " ms" << endl; cudaEventRecord(start, 0); #endif //------------------------图像转化成curve,返回------------------------- cudaThreadSynchronize(); curcvt.img2Curve(outborderimg,fillcurve); #ifdef DEBUG_TIME cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runTime, start, stop); cout << "[Curve] img to Curve " << runTime << " ms" << endl; #endif //------------------------内存回收------------------------------------- ImageBasicOp::deleteImage(outborderimg); ImageBasicOp::deleteImage(inborderimg); return NO_ERROR; }
the_stack
namespace MegBA { namespace geo { namespace { template <typename T> __device__ void AngleAxisRotatePoint( const T angle_axis0, const T angle_axis1, const T angle_axis2, const T point0, const T point1, const T point2, T &re_projection0, T &re_projection1, T &re_projection2, T *dprojection0_dangleaxis, T *dprojection1_dangleaxis, T *dprojection2_dangleaxis, T *dprojection0_dpoint, T *dprojection1_dpoint, T *dprojection2_dpoint) { const T theta2 = angle_axis0 * angle_axis0 + angle_axis1 * angle_axis1 + angle_axis2 * angle_axis2; if (theta2 > std::numeric_limits<T>::epsilon()) { const T theta = Wrapper::sqrtG<T>::call(theta2); T sintheta, costheta; Wrapper::sincosG<T>::call(theta, &sintheta, &costheta); const T theta_inverse = T(1.0) / theta; const T negative_theta_inverse_pow2 = -theta_inverse * theta_inverse; const T one_minus_costheta = T(1.0) - costheta; const T w[3] = {angle_axis0 * theta_inverse, angle_axis1 * theta_inverse, angle_axis2 * theta_inverse}; const T w_cross_pt[3] = {w[1] * point2 - w[2] * point1, w[2] * point0 - w[0] * point2, w[0] * point1 - w[1] * point0}; const T tmp = (w[0] * point0 + w[1] * point1 + w[2] * point2) * one_minus_costheta; re_projection0 = point0 * costheta + w_cross_pt[0] * sintheta + w[0] * tmp; re_projection1 = point1 * costheta + w_cross_pt[1] * sintheta + w[1] * tmp; re_projection2 = point2 * costheta + w_cross_pt[2] * sintheta + w[2] * tmp; const T dtheta_daa[3] = {theta_inverse * angle_axis0, theta_inverse * angle_axis1, theta_inverse * angle_axis2}; const T dw_daa[9] = { theta_inverse + negative_theta_inverse_pow2 * angle_axis0 * dtheta_daa[0], negative_theta_inverse_pow2 * angle_axis0 * dtheta_daa[1], negative_theta_inverse_pow2 * angle_axis0 * dtheta_daa[2], negative_theta_inverse_pow2 * angle_axis1 * dtheta_daa[0], theta_inverse + negative_theta_inverse_pow2 * angle_axis1 * dtheta_daa[1], negative_theta_inverse_pow2 * angle_axis1 * dtheta_daa[2], negative_theta_inverse_pow2 * angle_axis2 * dtheta_daa[0], negative_theta_inverse_pow2 * angle_axis2 * dtheta_daa[1], theta_inverse + negative_theta_inverse_pow2 * angle_axis2 * dtheta_daa[2]}; const T dwcrosspt_dx[9] = {0, -w[2], w[1], w[2], 0, -w[0], -w[1], w[0], 0}; const T dwcrosspt_daa[9] = {point2 * dw_daa[3] - point1 * dw_daa[6], point2 * dw_daa[4] - point1 * dw_daa[7], point2 * dw_daa[5] - point1 * dw_daa[8], -point2 * dw_daa[0] + point0 * dw_daa[6], -point2 * dw_daa[1] + point0 * dw_daa[7], -point2 * dw_daa[2] + point0 * dw_daa[8], point1 * dw_daa[0] - point0 * dw_daa[3], point1 * dw_daa[1] - point0 * dw_daa[4], point1 * dw_daa[2] - point0 * dw_daa[5]}; const T dtmp_dx[3] = {one_minus_costheta * w[0], one_minus_costheta * w[1], one_minus_costheta * w[2]}; const T dtmp_daa[3] = { sintheta * (w[0] * point0 + w[1] * point1 + w[2] * point2) * dtheta_daa[0] + one_minus_costheta * (point0 * dw_daa[0] + point1 * dw_daa[3] + point2 * dw_daa[6]), sintheta * (w[0] * point0 + w[1] * point1 + w[2] * point2) * dtheta_daa[1] + one_minus_costheta * (point0 * dw_daa[1] + point1 * dw_daa[4] + point2 * dw_daa[7]), sintheta * (w[0] * point0 + w[1] * point1 + w[2] * point2) * dtheta_daa[2] + one_minus_costheta * (point0 * dw_daa[2] + point1 * dw_daa[5] + point2 * dw_daa[8])}; const T dcostheta_daa[3] = {-sintheta * dtheta_daa[0], -sintheta * dtheta_daa[1], -sintheta * dtheta_daa[2]}; const T dsintheta_daa[3] = {costheta * dtheta_daa[0], costheta * dtheta_daa[1], costheta * dtheta_daa[2]}; dprojection0_dangleaxis[0] = point0 * dcostheta_daa[0] + w_cross_pt[0] * dsintheta_daa[0] + sintheta * dwcrosspt_daa[0] + w[0] * dtmp_daa[0] + tmp * dw_daa[0]; dprojection0_dangleaxis[1] = point0 * dcostheta_daa[1] + w_cross_pt[0] * dsintheta_daa[1] + sintheta * dwcrosspt_daa[1] + w[0] * dtmp_daa[1] + tmp * dw_daa[1]; dprojection0_dangleaxis[2] = point0 * dcostheta_daa[2] + w_cross_pt[0] * dsintheta_daa[2] + sintheta * dwcrosspt_daa[2] + w[0] * dtmp_daa[2] + tmp * dw_daa[2]; dprojection1_dangleaxis[0] = point1 * dcostheta_daa[0] + w_cross_pt[1] * dsintheta_daa[0] + sintheta * dwcrosspt_daa[3] + w[1] * dtmp_daa[0] + tmp * dw_daa[3]; dprojection1_dangleaxis[1] = point1 * dcostheta_daa[1] + w_cross_pt[1] * dsintheta_daa[1] + sintheta * dwcrosspt_daa[4] + w[1] * dtmp_daa[1] + tmp * dw_daa[4]; dprojection1_dangleaxis[2] = point1 * dcostheta_daa[2] + w_cross_pt[1] * dsintheta_daa[2] + sintheta * dwcrosspt_daa[5] + w[1] * dtmp_daa[2] + tmp * dw_daa[5]; dprojection2_dangleaxis[0] = point2 * dcostheta_daa[0] + w_cross_pt[2] * dsintheta_daa[0] + sintheta * dwcrosspt_daa[6] + w[2] * dtmp_daa[0] + tmp * dw_daa[6]; dprojection2_dangleaxis[1] = point2 * dcostheta_daa[1] + w_cross_pt[2] * dsintheta_daa[1] + sintheta * dwcrosspt_daa[7] + w[2] * dtmp_daa[1] + tmp * dw_daa[7]; dprojection2_dangleaxis[2] = point2 * dcostheta_daa[2] + w_cross_pt[2] * dsintheta_daa[2] + sintheta * dwcrosspt_daa[8] + w[2] * dtmp_daa[2] + tmp * dw_daa[8]; dprojection0_dpoint[0] = costheta + sintheta * dwcrosspt_dx[0] + w[0] * dtmp_dx[0]; dprojection0_dpoint[1] = sintheta * dwcrosspt_dx[1] + w[0] * dtmp_dx[1]; dprojection0_dpoint[2] = sintheta * dwcrosspt_dx[2] + w[0] * dtmp_dx[2]; dprojection1_dpoint[0] = sintheta * dwcrosspt_dx[3] + w[1] * dtmp_dx[0]; dprojection1_dpoint[1] = costheta + sintheta * dwcrosspt_dx[4] + w[1] * dtmp_dx[1]; dprojection1_dpoint[2] = sintheta * dwcrosspt_dx[5] + w[1] * dtmp_dx[2]; dprojection2_dpoint[0] = sintheta * dwcrosspt_dx[6] + w[2] * dtmp_dx[0]; dprojection2_dpoint[1] = sintheta * dwcrosspt_dx[7] + w[2] * dtmp_dx[1]; dprojection2_dpoint[2] = costheta + sintheta * dwcrosspt_dx[8] + w[2] * dtmp_dx[2]; } else { const T w_cross_pt[3] = {angle_axis1 * point2 - angle_axis2 * point1, angle_axis2 * point0 - angle_axis0 * point2, angle_axis0 * point1 - angle_axis1 * point0}; re_projection0 = point0 + w_cross_pt[0]; re_projection1 = point1 + w_cross_pt[1]; re_projection2 = point2 + w_cross_pt[2]; dprojection0_dangleaxis[0] = 0; dprojection0_dangleaxis[1] = point2; dprojection0_dangleaxis[2] = -point1; dprojection1_dangleaxis[0] = -point2; dprojection1_dangleaxis[1] = 0; dprojection1_dangleaxis[2] = point0; dprojection2_dangleaxis[0] = point1; dprojection2_dangleaxis[1] = -point0; dprojection2_dangleaxis[2] = 0; dprojection0_dpoint[0] = 1; dprojection0_dpoint[1] = -angle_axis2; dprojection0_dpoint[2] = angle_axis1; dprojection1_dpoint[0] = angle_axis2; dprojection1_dpoint[1] = 1; dprojection1_dpoint[2] = -angle_axis0; dprojection2_dpoint[0] = -angle_axis1; dprojection2_dpoint[1] = angle_axis0; dprojection2_dpoint[2] = 1; } } template <typename T> __global__ void AnalyticalDerivativesKernelGradKernel( const int nItem, const int N, const T *angle_axis0_ptr, const T *angle_axis1_ptr, const T *angle_axis2_ptr, const T *t0_ptr, const T *t1_ptr, const T *t2_ptr, const T *f_ptr, const T *k1_ptr, const T *k2_ptr, const T *point0_ptr, const T *point1_ptr, const T *point2_ptr, const T *obs0_ptr, const T *obs1_ptr, T *error0_valueDevicePtr, T *error1_valueDevicePtr, T *gradDevicePtrError0, T *gradDevicePtrError1) { unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= nItem) return; T dprojection0_dangleaxis[3]; T dprojection1_dangleaxis[3]; T dprojection2_dangleaxis[3]; T dprojection0_dpoint[3]; T dprojection1_dpoint[3]; T dprojection2_dpoint[3]; T re_projection0, re_projection1, re_projection2; AngleAxisRotatePoint( angle_axis0_ptr[idx], angle_axis1_ptr[idx], angle_axis2_ptr[idx], point0_ptr[idx], point1_ptr[idx], point2_ptr[idx], re_projection0, re_projection1, re_projection2, dprojection0_dangleaxis, dprojection1_dangleaxis, dprojection2_dangleaxis, dprojection0_dpoint, dprojection1_dpoint, dprojection2_dpoint); re_projection0 += t0_ptr[idx]; re_projection1 += t1_ptr[idx]; re_projection2 += t2_ptr[idx]; const T B0 = -re_projection0 / re_projection2; const T B1 = -re_projection1 / re_projection2; const T l2_pow2 = B0 * B0 + B1 * B1; const T rp = T(1.) + k1_ptr[idx] * l2_pow2 + k2_ptr[idx] * l2_pow2 * l2_pow2; const T fr = f_ptr[idx] * rp; error0_valueDevicePtr[idx] = fr * B0 - obs0_ptr[idx]; error1_valueDevicePtr[idx] = fr * B1 - obs1_ptr[idx]; const T tmp0 = f_ptr[idx] * B0 * (k1_ptr[idx] + 2 * k2_ptr[idx] * l2_pow2); const T tmp1 = f_ptr[idx] * B1 * (k1_ptr[idx] + 2 * k2_ptr[idx] * l2_pow2); const T negative_reciprocal_p2 = -1 / re_projection2; const T p0_dividedby_p2pow2 = re_projection0 / (re_projection2 * re_projection2); const T p1_dividedby_p2pow2 = re_projection1 / (re_projection2 * re_projection2); T error_factor0 = negative_reciprocal_p2 * (fr + 2 * tmp0 * B0); T error_factor1 = negative_reciprocal_p2 * 2 * tmp0 * B1; T error_factor2 = p0_dividedby_p2pow2 * (fr + 2 * tmp0 * B0) + p1_dividedby_p2pow2 * 2 * tmp0 * B1; // derror0 / dangle_axis gradDevicePtrError0[idx] = error_factor0 * dprojection0_dangleaxis[0] + error_factor1 * dprojection1_dangleaxis[0] + error_factor2 * dprojection2_dangleaxis[0]; gradDevicePtrError0[idx + 1 * nItem] = error_factor0 * dprojection0_dangleaxis[1] + error_factor1 * dprojection1_dangleaxis[1] + error_factor2 * dprojection2_dangleaxis[1]; gradDevicePtrError0[idx + 2 * nItem] = error_factor0 * dprojection0_dangleaxis[2] + error_factor1 * dprojection1_dangleaxis[2] + error_factor2 * dprojection2_dangleaxis[2]; // derror0 / dt gradDevicePtrError0[idx + 3 * nItem] = error_factor0; gradDevicePtrError0[idx + 4 * nItem] = error_factor1; gradDevicePtrError0[idx + 5 * nItem] = error_factor2; // derror0 / df gradDevicePtrError0[idx + 6 * nItem] = rp * B0; // derror0 / dk1 gradDevicePtrError0[idx + 7 * nItem] = f_ptr[idx] * l2_pow2 * B0; // derror0 / dk2 gradDevicePtrError0[idx + 8 * nItem] = f_ptr[idx] * l2_pow2 * l2_pow2 * B0; // derror0 / dpoint_xyz gradDevicePtrError0[idx + 9 * nItem] = error_factor0 * dprojection0_dpoint[0] + error_factor1 * dprojection1_dpoint[0] + error_factor2 * dprojection2_dpoint[0]; gradDevicePtrError0[idx + 10 * nItem] = error_factor0 * dprojection0_dpoint[1] + error_factor1 * dprojection1_dpoint[1] + error_factor2 * dprojection2_dpoint[1]; gradDevicePtrError0[idx + 11 * nItem] = error_factor0 * dprojection0_dpoint[2] + error_factor1 * dprojection1_dpoint[2] + error_factor2 * dprojection2_dpoint[2]; // ------------------------------------------------------------------------------------- error_factor0 = negative_reciprocal_p2 * 2 * tmp1 * B0; error_factor1 = negative_reciprocal_p2 * (fr + 2 * tmp1 * B1); error_factor2 = p0_dividedby_p2pow2 * 2 * tmp1 * B0 + p1_dividedby_p2pow2 * (fr + 2 * tmp1 * B1); // derror0 / dangle_axis gradDevicePtrError1[idx] = error_factor0 * dprojection0_dangleaxis[0] + error_factor1 * dprojection1_dangleaxis[0] + error_factor2 * dprojection2_dangleaxis[0]; gradDevicePtrError1[idx + 1 * nItem] = error_factor0 * dprojection0_dangleaxis[1] + error_factor1 * dprojection1_dangleaxis[1] + error_factor2 * dprojection2_dangleaxis[1]; gradDevicePtrError1[idx + 2 * nItem] = error_factor0 * dprojection0_dangleaxis[2] + error_factor1 * dprojection1_dangleaxis[2] + error_factor2 * dprojection2_dangleaxis[2]; // derror1 / dt gradDevicePtrError1[idx + 3 * nItem] = error_factor0; gradDevicePtrError1[idx + 4 * nItem] = error_factor1; gradDevicePtrError1[idx + 5 * nItem] = error_factor2; // derror1 / df gradDevicePtrError1[idx + 6 * nItem] = rp * B1; // derror1 / dk1 gradDevicePtrError1[idx + 7 * nItem] = f_ptr[idx] * l2_pow2 * B1; // derror1 / dk2 gradDevicePtrError1[idx + 8 * nItem] = f_ptr[idx] * l2_pow2 * l2_pow2 * B1; // derror1 / dpoint_xyz gradDevicePtrError1[idx + 9 * nItem] = error_factor0 * dprojection0_dpoint[0] + error_factor1 * dprojection1_dpoint[0] + error_factor2 * dprojection2_dpoint[0]; gradDevicePtrError1[idx + 10 * nItem] = error_factor0 * dprojection0_dpoint[1] + error_factor1 * dprojection1_dpoint[1] + error_factor2 * dprojection2_dpoint[1]; gradDevicePtrError1[idx + 11 * nItem] = error_factor0 * dprojection0_dpoint[2] + error_factor1 * dprojection1_dpoint[2] + error_factor2 * dprojection2_dpoint[2]; } } // namespace template <typename T> MegBA::geo::JVD<T> AnalyticalDerivativesKernelMatrix( const Eigen::Map<const JVD<T>> &AxisAngle, const Eigen::Map<const JVD<T>> &t, const Eigen::Map<const JVD<T>> &intrinsics, const JVD<T> &point_xyz, const JVD<T> &obs_uv) { const MegBA::JetVector<T> &JV_Template = AxisAngle(0, 0); MegBA::geo::JVD<T> error{}; error.resize(2, 1); for (int i = 0; i < 2; ++i) { error(i).initAs(JV_Template); } const auto N = JV_Template.getGradShape(); for (int i = 0; i < MemoryPool::getWorldSize(); ++i) { cudaSetDevice(i); const auto nItem = JV_Template.getItemNum(i); // 512 instead of 1024 for the limitation of registers dim3 block_dim(std::min(decltype(nItem)(512), nItem)); dim3 grid_dim((nItem - 1) / block_dim.x + 1); AnalyticalDerivativesKernelGradKernel<T><<<grid_dim, block_dim>>>( nItem, N, AxisAngle(0).getCUDAResPtr()[i], AxisAngle(1).getCUDAResPtr()[i], AxisAngle(2).getCUDAResPtr()[i], t(0).getCUDAResPtr()[i], t(1).getCUDAResPtr()[i], t(2).getCUDAResPtr()[i], intrinsics(0).getCUDAResPtr()[i], intrinsics(1).getCUDAResPtr()[i], intrinsics(2).getCUDAResPtr()[i], point_xyz(0).getCUDAResPtr()[i], point_xyz(1).getCUDAResPtr()[i], point_xyz(2).getCUDAResPtr()[i], obs_uv(0).getCUDAResPtr()[i], obs_uv(1).getCUDAResPtr()[i], error(0).getCUDAResPtr()[i], error(1).getCUDAResPtr()[i], error(0).getCUDAGradPtr()[i], error(1).getCUDAGradPtr()[i]); ASSERT_CUDA_NO_ERROR(); } return error; } template MegBA::geo::JVD<float> AnalyticalDerivativesKernelMatrix( const Eigen::Map<const JVD<float>> &AxisAngle, const Eigen::Map<const JVD<float>> &t, const Eigen::Map<const JVD<float>> &intrinsics, const JVD<float> &point_xyz, const JVD<float> &obs_uv); template MegBA::geo::JVD<double> AnalyticalDerivativesKernelMatrix( const Eigen::Map<const JVD<double>> &AxisAngle, const Eigen::Map<const JVD<double>> &t, const Eigen::Map<const JVD<double>> &intrinsics, const JVD<double> &point_xyz, const JVD<double> &obs_uv); } // namespace geo } // namespace MegBA
the_stack
SgemmEx: f16.f16->f16 f16.f16->f32 i8.i8->f32 f32.f32->f32 *CgemmEx: ci8.ci8->cf32 [>= sm_50] cf32.cf32->cf32 [>= sm_50] DgemmEx: f64.f64->f64 ZgemmEx: cf64.cf64->cf64 *Cgemm3m: cf32.cf32->cf32 (Gauss) [>= sm_50] Zgemm3m: cf64.cf64->cf64 (Gauss) [>= sm_50] Cherk: cf32->cf32 CherkEx: ci8->cf32 [>= sm_50] cf32->cf32 [>= sm_50] *Cherk3mEx: ci8->cf32 (Gauss) [>= sm_50] cf32->cf32 (Gauss) [>= sm_50] # TODO: Start with: Cgemm (+preconvert to fp32) CgemmEx (8bit, cuda >= 8.0, >=sm_50) Cgemm3m (fp32, cuda >= 8.0, >=sm_50) Cherk (+preconvert to fp32) Cherk3mEx (8bit or fp32, cuda >= 8.0, >=sm_50) The preconvert paths should support ci4, ci8, ci16, fp16 The other paths should only be used if the dtype already matches Eventually it will probably be worth integrating the xGPU kernel, given the lack of cublasHerkEx (particularly the small-N problem). */ #include <bifrost/linalg.h> #include "linalg_kernels.h" #include "assert.hpp" #include "utils.hpp" #include "cuda.hpp" #include "cuda/stream.hpp" #include "ShapeIndexer.cuh" #include "trace.hpp" #include "Complex.hpp" class BFlinalg_impl { cublasHandle_t _cublas; // No copy-assign BFlinalg_impl(BFlinalg_impl const& ); BFlinalg_impl& operator=(BFlinalg_impl const& ); public: BFlinalg_impl() { BF_CHECK_CUBLAS_EXCEPTION(cublasCreate(&_cublas)); } ~BFlinalg_impl() { if( _cublas ) { cublasDestroy(_cublas); } } cublasHandle_t cublas() const { return _cublas; } }; BFstatus bfMatMul_aa_exec_nobatch(BFlinalg handle, cudaStream_t stream, cublasOperation_t trans, long n, long k, double alpha, void const* a_data, BFdtype a_type, long a_stride, double beta, void* c_data, BFdtype c_type, long c_stride) { BF_TRACE_STREAM(stream); BF_CHECK_CUBLAS(cublasSetStream(handle->cublas(), stream)); // Note: UPPER here means lower for row-major ordering cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER; BF_CHECK_CUBLAS(cublasSetPointerMode(handle->cublas(), CUBLAS_POINTER_MODE_HOST)); BF_ASSERT(a_data, BF_STATUS_INVALID_POINTER); BF_ASSERT(c_data, BF_STATUS_INVALID_POINTER); switch( a_type ) { case BF_DTYPE_F32: { BF_ASSERT(c_type == BF_DTYPE_F32, BF_STATUS_UNSUPPORTED_DTYPE); float alpha_f = (float)alpha; float beta_f = (float)beta; BF_CHECK_CUBLAS(cublasSsyrk(handle->cublas(), uplo, trans, n, k, &alpha_f, (float*)a_data, a_stride, &beta_f, (float*)c_data, c_stride)); break; } case BF_DTYPE_F64: { BF_ASSERT(c_type == BF_DTYPE_F64, BF_STATUS_UNSUPPORTED_DTYPE); BF_CHECK_CUBLAS(cublasDsyrk(handle->cublas(), uplo, trans, n, k, &alpha, (double*)a_data, a_stride, &beta, (double*)c_data, c_stride)); break; } #if CUDART_VERSION >= 8000 case BF_DTYPE_CI8: { BF_ASSERT(c_type == BF_DTYPE_CF32, BF_STATUS_UNSUPPORTED_DTYPE); float alpha_f = (float)alpha; float beta_f = (float)beta; if( get_cuda_device_cc() >= 50 ) { BF_CHECK_CUBLAS(cublasCherk3mEx(handle->cublas(), uplo, trans, n, k, &alpha_f, (cuComplex*)a_data, CUDA_C_8I, a_stride, &beta_f, (cuComplex*)c_data, CUDA_C_32F, c_stride)); break; } BF_FAIL("Supported dtype for array a", BF_STATUS_UNSUPPORTED_DTYPE); } #endif case BF_DTYPE_CF32: { BF_ASSERT(c_type == BF_DTYPE_CF32, BF_STATUS_UNSUPPORTED_DTYPE); float alpha_f = (float)alpha; float beta_f = (float)beta; #if CUDART_VERSION >= 8000 if( get_cuda_device_cc() >= 50 ) { BF_CHECK_CUBLAS(cublasCherk3mEx(handle->cublas(), uplo, trans, n, k, &alpha_f, (cuComplex*)a_data, CUDA_C_32F, a_stride, &beta_f, (cuComplex*)c_data, CUDA_C_32F, c_stride)); break; } #endif BF_CHECK_CUBLAS(cublasCherk(handle->cublas(), uplo, trans, n, k, &alpha_f, (cuComplex*)a_data, a_stride, &beta_f, (cuComplex*)c_data, c_stride)); break; } case BF_DTYPE_CF64: { BF_ASSERT(c_type == BF_DTYPE_CF64, BF_STATUS_UNSUPPORTED_DTYPE); BF_CHECK_CUBLAS(cublasZherk(handle->cublas(), uplo, trans, n, k, &alpha, (cuDoubleComplex*)a_data, a_stride, &beta, (cuDoubleComplex*)c_data, c_stride)); break; } default: BF_FAIL("Supported dtype for array a", BF_STATUS_UNSUPPORTED_DTYPE); } return BF_STATUS_SUCCESS; } BFstatus bfMatMul_aa_exec(BFlinalg handle, cudaStream_t stream, cublasOperation_t trans, long n, long k, long nbatch, double alpha, void const* a_data, BFdtype a_type, long a_stride, long a_batchstride, double beta, void* c_data, BFdtype c_type, long c_stride, long c_batchstride) { // TODO: Use batched algos here where possible //char* use_bf_cherk_str = getenv("BF_CHERK"); //bool use_bf_cherk = use_bf_cherk_str && atoi(use_bf_cherk_str); enum { BF_CUBLAS_CHERK_THRESHOLD = 896 }; if( //use_bf_cherk && (CUDART_VERSION < 8000 || n < BF_CUBLAS_CHERK_THRESHOLD) && trans == CUBLAS_OP_N && n % 2 == 0 && a_stride % 2 == 0 && a_batchstride % 2 == 0 && c_stride % 2 == 0 && c_batchstride % 2 == 0 && (a_type == BF_DTYPE_CI8 || a_type == BF_DTYPE_CI16) && c_type == BF_DTYPE_CF32 ) { BF_TRY_RETURN(bf_cherk_N( n, k, nbatch, alpha, a_data, a_type, a_stride, a_batchstride, beta, c_data, c_type, c_stride, c_batchstride, stream)); } for( long b=0; b<nbatch; ++b ) { cuda::child_stream child_stream(stream); BF_CHECK( bfMatMul_aa_exec_nobatch(handle, child_stream, trans, n, k, alpha, a_data, a_type, a_stride, beta, c_data, c_type, c_stride) ); a_data = (char*)a_data + a_batchstride * BF_DTYPE_NBYTE(a_type); c_data = (char*)c_data + c_batchstride * BF_DTYPE_NBYTE(c_type); } return BF_STATUS_SUCCESS; } BFstatus bfMatMul_aa(BFlinalg handle, double alpha, BFarray const* a, bool adjoint, double beta, BFarray const* c) { BF_TRACE(); BF_ASSERT(c->ndim == a->ndim, BF_STATUS_INVALID_SHAPE); int ndim = a->ndim; BFarray a_mutable; ::memcpy(&a_mutable, a, sizeof(BFarray)); a = &a_mutable; if( adjoint ) { std::swap(a_mutable.shape[ ndim-1], a_mutable.shape[ ndim-2]); std::swap(a_mutable.strides[ndim-1], a_mutable.strides[ndim-2]); a_mutable.conjugated = !a_mutable.conjugated; } // Check that output shape is correct BF_ASSERT(c->shape[ndim-1] == a->shape[ndim-2], BF_STATUS_INVALID_SHAPE); BF_ASSERT(c->shape[ndim-2] == a->shape[ndim-2], BF_STATUS_INVALID_SHAPE); // Handle batch dims by merging the contiguous ones together and selecting // the largest one to be the kernel batch dim. long nbatch = 1; int batch_dim = -1; int batch_shape[BF_MAX_DIMS]; BFarray a_flattened, c_flattened; for( int d=0; d<ndim; ++d ) { batch_shape[d] = 1; } if( ndim > 2 ) { // Keep the last 3 dims but attempt to flatten all others unsigned long keep_dims_mask = 0x7 << (ndim-3); keep_dims_mask |= padded_dims_mask(a); keep_dims_mask |= padded_dims_mask(c); flatten(a, &a_flattened, keep_dims_mask); flatten(c, &c_flattened, keep_dims_mask); a = &a_flattened; c = &c_flattened; BF_ASSERT(a_flattened.ndim == c_flattened.ndim, BF_STATUS_INTERNAL_ERROR); ndim = c->ndim; for( int d=0; d<ndim-2; ++d ) { BF_ASSERT(a->shape[d] == c->shape[d] || a->shape[d] == 1, BF_STATUS_INVALID_SHAPE); batch_shape[d] = c->shape[d]; // Find longest dimension to use as kernel batch dim if( c->shape[d] >= nbatch ) { nbatch = c->shape[d]; batch_dim = d; } } // Remove the kernel batch dim from the rest of the batch shape batch_shape[batch_dim] = 1; } // Convert byte strides to element strides int astrides[BF_MAX_DIMS]; int cstrides[BF_MAX_DIMS]; for( int d=0; d<ndim ; ++d ) { astrides[d] = a->strides[d]; cstrides[d] = c->strides[d]; } for( int d=0; d<ndim ; ++d ) { BF_ASSERT(astrides[d] % BF_DTYPE_NBYTE(a->dtype) == 0, BF_STATUS_INVALID_STRIDE); BF_ASSERT(cstrides[d] % BF_DTYPE_NBYTE(c->dtype) == 0, BF_STATUS_INVALID_STRIDE); astrides[d] /= BF_DTYPE_NBYTE(a->dtype); cstrides[d] /= BF_DTYPE_NBYTE(c->dtype); } // Determine transposition based on strides, and update strides cublasOperation_t trans; if( astrides[ndim-1] < astrides[ndim-2] ) { // Note: The fastest dim cannot be a batch dim BF_ASSERT(astrides[ndim-1] == 1, BF_STATUS_UNSUPPORTED_STRIDE); if( BF_DTYPE_IS_COMPLEX(a->dtype) ) { // Note: Because BLAS uses col-major ordering, we can only support // the non-conjugated case here. BF_ASSERT(!a->conjugated, BF_STATUS_UNSUPPORTED); trans = CUBLAS_OP_C; } else { trans = CUBLAS_OP_T; } } else if( astrides[ndim-1] > astrides[ndim-2] ) { // Note: The fastest dim cannot be a batch dim BF_ASSERT(astrides[ndim-2] == 1, BF_STATUS_UNSUPPORTED_STRIDE); // Note: Because BLAS uses col-major ordering, we can only support // the conjugated case here. if( BF_DTYPE_IS_COMPLEX(a->dtype) ) { BF_ASSERT(a->conjugated, BF_STATUS_UNSUPPORTED); } trans = CUBLAS_OP_N; std::swap(astrides[ndim-1], astrides[ndim-2]); } else { // TODO: I think this actually occurs legitimately when shape[-1] = 1 BF_ASSERT(false, BF_STATUS_INVALID_STRIDE); } BF_ASSERT(cstrides[ndim-2] >= cstrides[ndim-1], BF_STATUS_UNSUPPORTED_STRIDE); if( nbatch > 1 ) { // Enable broadcasting in the kernel batch dim if( a->shape[batch_dim] == 1 ) { astrides[batch_dim] = 0; } } // Loop over batch dims ShapeIndexer<BF_MAX_DIMS> shape_indexer(batch_shape, ndim); for( long i=0; i<shape_indexer.size(); ++i ) { auto inds = shape_indexer.at(i); void* a_data = array_get_pointer(a, inds); void* c_data = array_get_pointer(c, inds); cuda::child_stream stream(g_cuda_stream); BF_CHECK( bfMatMul_aa_exec(handle, stream, trans, a->shape[ndim-2], a->shape[ndim-1], nbatch, alpha, a_data, a->dtype, astrides[ndim-2], astrides[batch_dim], beta, c_data, c->dtype, cstrides[ndim-2], cstrides[batch_dim]) ); } return BF_STATUS_SUCCESS; } BFstatus bfMatMul_ab_exec_nobatch(BFlinalg handle, cudaStream_t stream, cublasOperation_t trans_a, cublasOperation_t trans_b, long m, long n, long k, double alpha, void const* a_data, BFdtype a_type, long a_stride, void const* b_data, BFdtype b_type, long b_stride, double beta, void* c_data, BFdtype c_type, long c_stride) { BF_TRACE_STREAM(stream); BF_CHECK_CUBLAS(cublasSetStream(handle->cublas(), stream)); BF_CHECK_CUBLAS(cublasSetPointerMode(handle->cublas(), CUBLAS_POINTER_MODE_HOST)); BF_ASSERT(a_data, BF_STATUS_INVALID_POINTER); BF_ASSERT(b_data, BF_STATUS_INVALID_POINTER); BF_ASSERT(c_data, BF_STATUS_INVALID_POINTER); BF_ASSERT(a_type == b_type, BF_STATUS_UNSUPPORTED_DTYPE); // TODO: Look into optimizations using cublasGemmEx algo selection and // batched/strided APIs. switch( a_type ) { case BF_DTYPE_F32: { BF_ASSERT(c_type == BF_DTYPE_F32, BF_STATUS_UNSUPPORTED_DTYPE); float alpha_f = (float)alpha; float beta_f = (float)beta; BF_CHECK_CUBLAS(cublasSgemm(handle->cublas(), trans_a, trans_b, m, n, k, &alpha_f, (float*)a_data, a_stride, (float*)b_data, b_stride, &beta_f, (float*)c_data, c_stride)); break; } case BF_DTYPE_F64: { BF_ASSERT(c_type == BF_DTYPE_F64, BF_STATUS_UNSUPPORTED_DTYPE); BF_CHECK_CUBLAS(cublasDgemm(handle->cublas(), trans_a, trans_b, m, n, k, &alpha, (double*)a_data, a_stride, (double*)b_data, b_stride, &beta, (double*)c_data, c_stride)); break; } #if CUDART_VERSION >= 8000 case BF_DTYPE_CI8: { BF_ASSERT(c_type == BF_DTYPE_CF32, BF_STATUS_UNSUPPORTED_DTYPE); cuComplex alpha_cf = make_cuComplex(alpha, 0); cuComplex beta_cf = make_cuComplex(beta, 0); if( get_cuda_device_cc() >= 50 ) { BF_CHECK_CUBLAS(cublasCgemmEx(handle->cublas(), trans_a, trans_b, m, n, k, &alpha_cf, (cuComplex*)a_data, CUDA_C_8I, a_stride, (cuComplex*)b_data, CUDA_C_8I, b_stride, &beta_cf, (cuComplex*)c_data, CUDA_C_32F, c_stride)); break; } BF_FAIL("Supported dtype for input array", BF_STATUS_UNSUPPORTED_DTYPE); } #endif case BF_DTYPE_CF32: { BF_ASSERT(c_type == BF_DTYPE_CF32, BF_STATUS_UNSUPPORTED_DTYPE); cuComplex alpha_cf = make_cuComplex(alpha, 0); cuComplex beta_cf = make_cuComplex(beta, 0); #if CUDART_VERSION >= 8000 if( get_cuda_device_cc() >= 50 ) { BF_CHECK_CUBLAS(cublasCgemm3m(handle->cublas(), trans_a, trans_b, m, n, k, &alpha_cf, (cuComplex*)a_data, a_stride, (cuComplex*)b_data, b_stride, &beta_cf, (cuComplex*)c_data, c_stride)); break; } #endif BF_CHECK_CUBLAS(cublasCgemm(handle->cublas(), trans_a, trans_b, m, n, k, &alpha_cf, (cuComplex*)a_data, a_stride, (cuComplex*)b_data, b_stride, &beta_cf, (cuComplex*)c_data, c_stride)); break; } case BF_DTYPE_CF64: { cuDoubleComplex alpha_cd = make_cuDoubleComplex(alpha, 0); cuDoubleComplex beta_cd = make_cuDoubleComplex(beta, 0); BF_ASSERT(c_type == BF_DTYPE_CF64, BF_STATUS_UNSUPPORTED_DTYPE); BF_CHECK_CUBLAS(cublasZgemm(handle->cublas(), trans_a, trans_b, m, n, k, &alpha_cd, (cuDoubleComplex*)a_data, a_stride, (cuDoubleComplex*)b_data, b_stride, &beta_cd, (cuDoubleComplex*)c_data, c_stride)); break; } default: BF_FAIL("Supported dtype for input array", BF_STATUS_UNSUPPORTED_DTYPE); } return BF_STATUS_SUCCESS; } BFstatus bfMatMul_ab_exec(BFlinalg handle, cudaStream_t stream, cublasOperation_t trans_a, cublasOperation_t trans_b, long m, long n, long k, long nbatch, double alpha, void const* a_data, BFdtype a_type, long a_stride, long a_batchstride, void const* b_data, BFdtype b_type, long b_stride, long b_batchstride, double beta, void* c_data, BFdtype c_type, long c_stride, long c_batchstride) { // TODO: Use batched algos here where possible //char* use_bf_cgemm_str = getenv("BF_CGEMM"); //bool use_bf_cgemm = use_bf_cgemm_str && atoi(use_bf_cgemm_str); if( //use_bf_cgemm && n <= 12 && trans_a == CUBLAS_OP_T && trans_b == CUBLAS_OP_N && (a_type == BF_DTYPE_CI4 || a_type == BF_DTYPE_CI8) && (b_type == BF_DTYPE_CI16 || b_type == BF_DTYPE_CF16 || b_type == BF_DTYPE_CF32) && c_type == BF_DTYPE_CF32 ) { BF_TRY_RETURN(bf_cgemm_TN_smallM( m, n, k, nbatch, alpha, a_data, a_type, a_stride, a_batchstride, b_data, b_type, b_stride, b_batchstride, beta, c_data, c_type, c_stride, c_batchstride, stream)); } for( long b=0; b<nbatch; ++b ) { cuda::child_stream child_stream(stream); BF_CHECK( bfMatMul_ab_exec_nobatch(handle, child_stream, trans_a, trans_b, m, n, k, alpha, a_data, a_type, a_stride, b_data, b_type, b_stride, beta, c_data, c_type, c_stride) ); a_data = (char*)a_data + a_batchstride * BF_DTYPE_NBYTE(a_type); b_data = (char*)b_data + b_batchstride * BF_DTYPE_NBYTE(b_type); c_data = (char*)c_data + c_batchstride * BF_DTYPE_NBYTE(c_type); } return BF_STATUS_SUCCESS; } BFstatus bfMatMul_ab(BFlinalg handle, double alpha, BFarray const* a, BFarray const* b, double beta, BFarray const* c) { BF_TRACE(); BF_ASSERT(c->ndim == a->ndim, BF_STATUS_INVALID_SHAPE); BF_ASSERT(c->ndim == b->ndim, BF_STATUS_INVALID_SHAPE); int ndim = a->ndim; // Check that shapes are correct BF_ASSERT(c->shape[ndim-2] == a->shape[ndim-2], BF_STATUS_INVALID_SHAPE); BF_ASSERT(c->shape[ndim-1] == b->shape[ndim-1], BF_STATUS_INVALID_SHAPE); BF_ASSERT(a->shape[ndim-1] == b->shape[ndim-2], BF_STATUS_INVALID_SHAPE); // Handle batch dims by merging the contiguous ones together and selecting // the largest one to be the kernel batch dim. long nbatch = 1; int batch_dim = -1; int batch_shape[BF_MAX_DIMS]; BFarray a_flattened, b_flattened, c_flattened; for( int d=0; d<ndim; ++d ) { batch_shape[d] = 1; } if( ndim > 2 ) { // Keep the last 3 dims but attempt to flatten all others unsigned long keep_dims_mask = 0x7 << (ndim-3); keep_dims_mask |= padded_dims_mask(a); keep_dims_mask |= padded_dims_mask(b); keep_dims_mask |= padded_dims_mask(c); flatten(a, &a_flattened, keep_dims_mask); flatten(b, &b_flattened, keep_dims_mask); flatten(c, &c_flattened, keep_dims_mask); a = &a_flattened; b = &b_flattened; c = &c_flattened; BF_ASSERT(a_flattened.ndim == b_flattened.ndim, BF_STATUS_INTERNAL_ERROR); BF_ASSERT(c_flattened.ndim == b_flattened.ndim, BF_STATUS_INTERNAL_ERROR); ndim = c->ndim; for( int d=0; d<ndim-2; ++d ) { BF_ASSERT(a->shape[d] == c->shape[d] || a->shape[d] == 1, BF_STATUS_INVALID_SHAPE); BF_ASSERT(b->shape[d] == c->shape[d] || b->shape[d] == 1, BF_STATUS_INVALID_SHAPE); batch_shape[d] = c->shape[d]; // Find longest dimension to use as kernel batch dim if( c->shape[d] >= nbatch ) { nbatch = c->shape[d]; batch_dim = d; } } // Remove the kernel batch dim from the rest of the batch shape batch_shape[batch_dim] = 1; } // Convert byte strides to element strides int astrides[BF_MAX_DIMS]; int bstrides[BF_MAX_DIMS]; int cstrides[BF_MAX_DIMS]; for( int d=0; d<ndim ; ++d ) { astrides[d] = a->strides[d]; bstrides[d] = b->strides[d]; cstrides[d] = c->strides[d]; } for( int d=0; d<ndim ; ++d ) { BF_ASSERT(astrides[d] % BF_DTYPE_NBYTE(a->dtype) == 0, BF_STATUS_INVALID_STRIDE); BF_ASSERT(bstrides[d] % BF_DTYPE_NBYTE(b->dtype) == 0, BF_STATUS_INVALID_STRIDE); BF_ASSERT(cstrides[d] % BF_DTYPE_NBYTE(c->dtype) == 0, BF_STATUS_INVALID_STRIDE); astrides[d] /= BF_DTYPE_NBYTE(a->dtype); bstrides[d] /= BF_DTYPE_NBYTE(b->dtype); cstrides[d] /= BF_DTYPE_NBYTE(c->dtype); } // Determine transposition based on strides, and update strides cublasOperation_t trans_a; cublasOperation_t trans_b; if( astrides[ndim-1] < astrides[ndim-2] ) { // Note: The fastest dim cannot be a batch dim BF_ASSERT(astrides[ndim-1] == 1, BF_STATUS_UNSUPPORTED_STRIDE); // TODO: Check behaviour with conjugated arrays BF_ASSERT(!BF_DTYPE_IS_COMPLEX(a->dtype) || !a->conjugated, BF_STATUS_UNSUPPORTED); trans_a = CUBLAS_OP_N; } else if( astrides[ndim-1] > astrides[ndim-2] ) { // Note: The fastest dim cannot be a batch dim BF_ASSERT(astrides[ndim-2] == 1, BF_STATUS_UNSUPPORTED_STRIDE); trans_a = (BF_DTYPE_IS_COMPLEX(a->dtype) && a->conjugated ? CUBLAS_OP_C : CUBLAS_OP_T); std::swap(astrides[ndim-1], astrides[ndim-2]); } else { // TODO: I think this actually occurs legitimately when shape[-1] = 1 BF_ASSERT(false, BF_STATUS_INVALID_STRIDE); } if( bstrides[ndim-1] < bstrides[ndim-2] ) { // Note: The fastest dim cannot be a batch dim BF_ASSERT(bstrides[ndim-1] == 1, BF_STATUS_UNSUPPORTED_STRIDE); // TODO: Check behaviour with conjugated arrays BF_ASSERT(!BF_DTYPE_IS_COMPLEX(b->dtype) || !b->conjugated, BF_STATUS_UNSUPPORTED); trans_b = CUBLAS_OP_N; } else if( bstrides[ndim-1] > bstrides[ndim-2] ) { // Note: The fastest dim cannot be a batch dim BF_ASSERT(bstrides[ndim-2] == 1, BF_STATUS_UNSUPPORTED_STRIDE); trans_b = (BF_DTYPE_IS_COMPLEX(b->dtype) && b->conjugated ? CUBLAS_OP_C : CUBLAS_OP_T); std::swap(bstrides[ndim-1], bstrides[ndim-2]); } else { BF_ASSERT(false, BF_STATUS_INVALID_STRIDE); } BF_ASSERT(cstrides[ndim-2] >= cstrides[ndim-1], BF_STATUS_UNSUPPORTED_STRIDE); if( nbatch > 1 ) { // Enable broadcasting in the kernel batch dim if( a->shape[batch_dim] == 1 ) { astrides[batch_dim] = 0; } if( b->shape[batch_dim] == 1 ) { bstrides[batch_dim] = 0; } } ShapeIndexer<BF_MAX_DIMS> shape_indexer(batch_shape, ndim); for( long i=0; i<shape_indexer.size(); ++i ) { auto inds = shape_indexer.at(i); void* a_data = array_get_pointer(a, inds); void* b_data = array_get_pointer(b, inds); void* c_data = array_get_pointer(c, inds); cuda::child_stream stream(g_cuda_stream); BF_CHECK( bfMatMul_ab_exec(handle, stream, trans_b, trans_a, c->shape[ndim-1], // m c->shape[ndim-2], // n a->shape[ndim-1], // k nbatch, alpha, // Note: We swap a and b here because // CUBLAS uses column-major // while we use row-major order. b_data, b->dtype, bstrides[ndim-2], bstrides[batch_dim], a_data, a->dtype, astrides[ndim-2], astrides[batch_dim], beta, c_data, c->dtype, cstrides[ndim-2], cstrides[batch_dim]) ); } return BF_STATUS_SUCCESS; } BFstatus bfLinAlgCreate(BFlinalg* handle_ptr) { BF_TRACE(); BF_ASSERT(handle_ptr, BF_STATUS_INVALID_POINTER); BF_TRY_RETURN_ELSE(*handle_ptr = new BFlinalg_impl(), *handle_ptr = 0); } BFstatus bfLinAlgDestroy(BFlinalg handle) { BF_TRACE(); BF_ASSERT(handle, BF_STATUS_INVALID_HANDLE); delete handle; return BF_STATUS_SUCCESS; } // Computes c = a.b, or a.a^H or b^H.b if either a or b are NULL BFstatus bfLinAlgMatMul(BFlinalg handle, double alpha, BFarray const* a, // [...,i,j] BFarray const* b, // [...,j,k] double beta, BFarray const* c) { // [...,i,k] BF_TRACE(); BF_ASSERT(handle, BF_STATUS_INVALID_HANDLE); BF_ASSERT(a || b, BF_STATUS_INVALID_ARGUMENT); BF_ASSERT(c, BF_STATUS_INVALID_POINTER); BF_ASSERT(space_accessible_from(c->space, BF_SPACE_CUDA), BF_STATUS_UNSUPPORTED_SPACE); if( a && b ) { BF_ASSERT(space_accessible_from(a->space, BF_SPACE_CUDA), BF_STATUS_UNSUPPORTED_SPACE); BF_ASSERT(space_accessible_from(b->space, BF_SPACE_CUDA), BF_STATUS_UNSUPPORTED_SPACE); return bfMatMul_ab(handle, alpha, a, b, beta, c); //BF_TRY_RETURN(bfMatMul_ab(handle, alpha, a, b, beta, c)); } else { BFarray const* input = a ? a : b; BF_ASSERT(space_accessible_from(input->space, BF_SPACE_CUDA), BF_STATUS_UNSUPPORTED_SPACE); bool adjoint = (input == b); // TODO: BF_TRY_RETURN return bfMatMul_aa(handle, alpha, input, adjoint, beta, c); } }
the_stack
// Avoid warnings in Qt includes with CUDA compiler #pragma GCC diagnostic ignored "-Wattributes" // Avoid warnings in Eigen includes with CUDA compiler #pragma diag_suppress code_is_unreachable #include "surfel_meshing/cuda_depth_processing.cuh" #include <libvis/cuda/cuda_util.h> #include <math_constants.h> #include "surfel_meshing/cuda_util.cuh" // Uncomment this to run CUDA kernels sequentially for debugging. // #define CUDA_SEQUENTIAL_CHECKS #ifdef WIN32 #define M_PI 3.14159265358979323846 #endif namespace vis { __global__ void BilateralFilteringAndDepthCutoffCUDAKernel( float denom_xy, float sigma_value_factor, int radius, int radius_squared, u16 value_to_ignore, u16 max_depth, float depth_valid_region_radius_squared, CUDABuffer_<u16> input_depth, CUDABuffer_<u16> output_depth) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < output_depth.width() && y < output_depth.height()) { int image_half_width = output_depth.width() / 2; int image_half_height = output_depth.height() / 2; float center_distance_squared = (x - image_half_width) * (x - image_half_width) + (y - image_half_height) * (y - image_half_height); if (center_distance_squared > depth_valid_region_radius_squared) { output_depth(y, x) = value_to_ignore; return; } // Depth cutoff. u16 center_value = input_depth(y, x); if (center_value == value_to_ignore || center_value > max_depth) { output_depth(y, x) = value_to_ignore; return; } // Bilateral filtering. const float adapted_sigma_value = center_value * sigma_value_factor; const float adapted_denom_value = 2.0f * adapted_sigma_value * adapted_sigma_value; float sum = 0; float weight = 0; const int min_y = max(static_cast<int>(0), static_cast<int>(y - radius)); const int max_y = min(static_cast<int>(output_depth.height() - 1), static_cast<int>(y + radius)); for (int sample_y = min_y; sample_y <= max_y; ++ sample_y) { const int dy = sample_y - y; const int min_x = max(static_cast<int>(0), static_cast<int>(x - radius)); const int max_x = min(static_cast<int>(output_depth.width() - 1), static_cast<int>(x + radius)); for (int sample_x = min_x; sample_x <= max_x; ++ sample_x) { const int dx = sample_x - x; const int grid_distance_squared = dx * dx + dy * dy; if (grid_distance_squared > radius_squared) { continue; } u16 sample = input_depth(sample_y, sample_x); if (sample == value_to_ignore) { continue; } float value_distance_squared = center_value - sample; value_distance_squared *= value_distance_squared; float w = exp(-grid_distance_squared / denom_xy + -value_distance_squared / adapted_denom_value); sum += w * sample; weight += w; } } output_depth(y, x) = (weight == 0) ? value_to_ignore : (sum / weight + 0.5f); } } void BilateralFilteringAndDepthCutoffCUDA( cudaStream_t stream, float sigma_xy, float sigma_value_factor, u16 value_to_ignore, float radius_factor, u16 max_depth, float depth_valid_region_radius, const CUDABuffer_<u16>& input_depth, CUDABuffer_<u16>* output_depth) { #ifdef CUDA_SEQUENTIAL_CHECKS cudaDeviceSynchronize(); #endif CHECK_CUDA_NO_ERROR(); int radius = radius_factor * sigma_xy + 0.5f; constexpr int kBlockWidth = 32; constexpr int kBlockHeight = 32; dim3 grid_dim(GetBlockCount(output_depth->width(), kBlockWidth), GetBlockCount(output_depth->height(), kBlockHeight)); dim3 block_dim(kBlockWidth, kBlockHeight); BilateralFilteringAndDepthCutoffCUDAKernel <<<grid_dim, block_dim, 0, stream>>>( 2.0f * sigma_xy * sigma_xy, sigma_value_factor, radius, radius * radius, value_to_ignore, max_depth, depth_valid_region_radius * depth_valid_region_radius, input_depth, *output_depth); #ifdef CUDA_SEQUENTIAL_CHECKS cudaDeviceSynchronize(); #endif CHECK_CUDA_NO_ERROR(); } // Helper to pass arrays to the kernel. template <int count, typename DepthT> struct OutlierDepthMapFusionCUDAKernelParam { CUDAMatrix3x4 other_TR_reference[count - 1]; CUDABuffer_<DepthT> other_depths[count - 1]; }; template <int count, typename DepthT> __global__ void OutlierDepthMapFusionCUDAKernel( float max_tolerance_factor, float min_tolerance_factor, CUDABuffer_<DepthT> input_depth, float fx, float fy, float cx, float cy, float fx_inv, float fy_inv, float cx_inv, float cy_inv, OutlierDepthMapFusionCUDAKernelParam<count, DepthT> p, CUDABuffer_<DepthT> output_depth) { constexpr int kOtherCount = count - 1; unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < output_depth.width() && y < output_depth.height()) { DepthT depth_value = input_depth(y, x); if (depth_value == 0) { output_depth(y, x) = 0; return; } float3 reference_point = make_float3(depth_value * (fx_inv * x + cx_inv), depth_value * (fy_inv * y + cy_inv), depth_value); // Project the pixel into the other depth maps to verify that there are enough close other depth estimates. bool ok = true; for (int other_index = 0; other_index < kOtherCount; ++ other_index) { float3 other_point = p.other_TR_reference[other_index] * reference_point; if (other_point.z <= 0) { ok = false; break; } float2 pixel_pos = make_float2(fx * (other_point.x / other_point.z) + cx, fy * (other_point.y / other_point.z) + cy); // TODO: for pixel_pos.x or .y in ]-1, 0] this will also treat the pixel as in the image int px = static_cast<int>(pixel_pos.x); int py = static_cast<int>(pixel_pos.y); if (px < 0 || py < 0 || px >= output_depth.width() || py >= output_depth.height()) { ok = false; break; } DepthT other_depth_value = p.other_depths[other_index](py, px); if (other_depth_value <= 0 || other_depth_value > max_tolerance_factor * other_point.z || other_depth_value < min_tolerance_factor * other_point.z) { ok = false; break; } } output_depth(y, x) = ok ? depth_value : 0; } } template <int count, typename DepthT> void OutlierDepthMapFusionCUDA( cudaStream_t stream, float tolerance, const CUDABuffer_<DepthT>& input_depth, float depth_fx, float depth_fy, float depth_cx, float depth_cy, const CUDABuffer_<DepthT>** other_depths, const CUDAMatrix3x4* others_TR_reference, CUDABuffer_<u16>* output_depth) { #ifdef CUDA_SEQUENTIAL_CHECKS cudaDeviceSynchronize(); #endif CHECK_CUDA_NO_ERROR(); constexpr int kOtherCount = count - 1; OutlierDepthMapFusionCUDAKernelParam<count, DepthT> p; for (int i = 0; i < kOtherCount; ++ i) { p.other_TR_reference[i] = others_TR_reference[i]; p.other_depths[i] = *other_depths[i]; } const float max_tolerance_factor = 1 + tolerance; const float min_tolerance_factor = 1 - tolerance; // Unprojection intrinsics for pixel center convention. const float fx_inv = 1.0f / depth_fx; const float fy_inv = 1.0f / depth_fy; const float cx_pixel_center = depth_cx - 0.5f; const float cy_pixel_center = depth_cy - 0.5f; const float cx_inv_pixel_center = -cx_pixel_center / depth_fx; const float cy_inv_pixel_center = -cy_pixel_center / depth_fy; constexpr int kBlockWidth = 32; constexpr int kBlockHeight = 32; dim3 grid_dim(GetBlockCount(output_depth->width(), kBlockWidth), GetBlockCount(output_depth->height(), kBlockHeight)); dim3 block_dim(kBlockWidth, kBlockHeight); OutlierDepthMapFusionCUDAKernel<count, DepthT> <<<grid_dim, block_dim, 0, stream>>>( max_tolerance_factor, min_tolerance_factor, input_depth, depth_fx, depth_fy, depth_cx, depth_cy, fx_inv, fy_inv, cx_inv_pixel_center, cy_inv_pixel_center, p, *output_depth); #ifdef CUDA_SEQUENTIAL_CHECKS cudaDeviceSynchronize(); #endif CHECK_CUDA_NO_ERROR(); } template void OutlierDepthMapFusionCUDA<9, u16>( cudaStream_t stream, float tolerance, const CUDABuffer_<u16>& input_depth, float depth_fx, float depth_fy, float depth_cx, float depth_cy, const CUDABuffer_<u16>** other_depths, const CUDAMatrix3x4* others_TR_reference, CUDABuffer_<u16>* output_depth); template void OutlierDepthMapFusionCUDA<7, u16>( cudaStream_t stream, float tolerance, const CUDABuffer_<u16>& input_depth, float depth_fx, float depth_fy, float depth_cx, float depth_cy, const CUDABuffer_<u16>** other_depths, const CUDAMatrix3x4* others_TR_reference, CUDABuffer_<u16>* output_depth); template void OutlierDepthMapFusionCUDA<5, u16>( cudaStream_t stream, float tolerance, const CUDABuffer_<u16>& input_depth, float depth_fx, float depth_fy, float depth_cx, float depth_cy, const CUDABuffer_<u16>** other_depths, const CUDAMatrix3x4* others_TR_reference, CUDABuffer_<u16>* output_depth); template void OutlierDepthMapFusionCUDA<3, u16>( cudaStream_t stream, float tolerance, const CUDABuffer_<u16>& input_depth, float depth_fx, float depth_fy, float depth_cx, float depth_cy, const CUDABuffer_<u16>** other_depths, const CUDAMatrix3x4* others_TR_reference, CUDABuffer_<u16>* output_depth); template <int count, typename DepthT> __global__ void OutlierDepthMapFusionCUDAKernel( int required_count, float max_tolerance_factor, float min_tolerance_factor, CUDABuffer_<DepthT> input_depth, float fx, float fy, float cx, float cy, float fx_inv, float fy_inv, float cx_inv, float cy_inv, OutlierDepthMapFusionCUDAKernelParam<count, DepthT> p, CUDABuffer_<DepthT> output_depth) { constexpr int kOtherCount = count - 1; unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < output_depth.width() && y < output_depth.height()) { DepthT depth_value = input_depth(y, x); if (depth_value == 0) { output_depth(y, x) = 0; return; } float3 reference_point = make_float3(depth_value * (fx_inv * x + cx_inv), depth_value * (fy_inv * y + cy_inv), depth_value); // Project the pixel into the other depth maps to verify that there are enough close other depth estimates. int ok_count = 0; for (int other_index = 0; other_index < kOtherCount; ++ other_index) { float3 other_point = p.other_TR_reference[other_index] * reference_point; if (other_point.z <= 0) { continue; } float2 pixel_pos = make_float2(fx * (other_point.x / other_point.z) + cx, fy * (other_point.y / other_point.z) + cy); // TODO: for pixel_pos.x or .y in ]-1, 0] this will also treat the pixel as in the image int px = static_cast<int>(pixel_pos.x); int py = static_cast<int>(pixel_pos.y); if (px < 0 || py < 0 || px >= output_depth.width() || py >= output_depth.height()) { continue; } DepthT other_depth_value = p.other_depths[other_index](py, px); if (other_depth_value <= 0 || other_depth_value > max_tolerance_factor * other_point.z || other_depth_value < min_tolerance_factor * other_point.z) { continue; } // TODO: Break if required_count cannot be achieved anymore given the number of remaining other depth maps to check? ++ ok_count; } output_depth(y, x) = (ok_count >= required_count) ? depth_value : 0; } } template <int count, typename DepthT> void OutlierDepthMapFusionCUDA( cudaStream_t stream, int required_count, float tolerance, const CUDABuffer_<DepthT>& input_depth, float depth_fx, float depth_fy, float depth_cx, float depth_cy, const CUDABuffer_<DepthT>** other_depths, const CUDAMatrix3x4* others_TR_reference, CUDABuffer_<u16>* output_depth) { #ifdef CUDA_SEQUENTIAL_CHECKS cudaDeviceSynchronize(); #endif CHECK_CUDA_NO_ERROR(); constexpr int kOtherCount = count - 1; OutlierDepthMapFusionCUDAKernelParam<count, DepthT> p; for (int i = 0; i < kOtherCount; ++ i) { p.other_TR_reference[i] = others_TR_reference[i]; p.other_depths[i] = *other_depths[i]; } const float max_tolerance_factor = 1 + tolerance; const float min_tolerance_factor = 1 - tolerance; // Unprojection intrinsics for pixel center convention. const float fx_inv = 1.0f / depth_fx; const float fy_inv = 1.0f / depth_fy; const float cx_pixel_center = depth_cx - 0.5f; const float cy_pixel_center = depth_cy - 0.5f; const float cx_inv_pixel_center = -cx_pixel_center / depth_fx; const float cy_inv_pixel_center = -cy_pixel_center / depth_fy; constexpr int kBlockWidth = 32; constexpr int kBlockHeight = 32; dim3 grid_dim(GetBlockCount(output_depth->width(), kBlockWidth), GetBlockCount(output_depth->height(), kBlockHeight)); dim3 block_dim(kBlockWidth, kBlockHeight); OutlierDepthMapFusionCUDAKernel<count, DepthT> <<<grid_dim, block_dim, 0, stream>>>( required_count, max_tolerance_factor, min_tolerance_factor, input_depth, depth_fx, depth_fy, depth_cx, depth_cy, fx_inv, fy_inv, cx_inv_pixel_center, cy_inv_pixel_center, p, *output_depth); #ifdef CUDA_SEQUENTIAL_CHECKS cudaDeviceSynchronize(); #endif CHECK_CUDA_NO_ERROR(); } template void OutlierDepthMapFusionCUDA<9, u16>( cudaStream_t stream, int required_count, float tolerance, const CUDABuffer_<u16>& input_depth, float depth_fx, float depth_fy, float depth_cx, float depth_cy, const CUDABuffer_<u16>** other_depths, const CUDAMatrix3x4* others_TR_reference, CUDABuffer_<u16>* output_depth); template void OutlierDepthMapFusionCUDA<7, u16>( cudaStream_t stream, int required_count, float tolerance, const CUDABuffer_<u16>& input_depth, float depth_fx, float depth_fy, float depth_cx, float depth_cy, const CUDABuffer_<u16>** other_depths, const CUDAMatrix3x4* others_TR_reference, CUDABuffer_<u16>* output_depth); template void OutlierDepthMapFusionCUDA<5, u16>( cudaStream_t stream, int required_count, float tolerance, const CUDABuffer_<u16>& input_depth, float depth_fx, float depth_fy, float depth_cx, float depth_cy, const CUDABuffer_<u16>** other_depths, const CUDAMatrix3x4* others_TR_reference, CUDABuffer_<u16>* output_depth); template void OutlierDepthMapFusionCUDA<3, u16>( cudaStream_t stream, int required_count, float tolerance, const CUDABuffer_<u16>& input_depth, float depth_fx, float depth_fy, float depth_cx, float depth_cy, const CUDABuffer_<u16>** other_depths, const CUDAMatrix3x4* others_TR_reference, CUDABuffer_<u16>* output_depth); // TODO: This is potentially faster using a box filter. template <int radius, typename DepthT> __global__ void ErodeDepthMapCUDAKernel( CUDABuffer_<DepthT> input_depth, CUDABuffer_<DepthT> output_depth) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < output_depth.width() && y < output_depth.height()) { if (x < radius || y < radius || x >= output_depth.width() - radius || y >= output_depth.height() - radius) { output_depth(y, x) = 0; } else { bool all_valid = true; for (int dy = y - radius; dy <= y + radius; ++ dy) { for (int dx = x - radius; dx <= x + radius; ++ dx) { if (input_depth(dy, dx) == 0) { all_valid = false; } } } output_depth(y, x) = all_valid ? input_depth(y, x) : 0; } } } template <typename DepthT> void ErodeDepthMapCUDA( cudaStream_t stream, int radius, const CUDABuffer_<DepthT>& input_depth, CUDABuffer_<DepthT>* output_depth) { #ifdef CUDA_SEQUENTIAL_CHECKS cudaDeviceSynchronize(); #endif CHECK_CUDA_NO_ERROR(); constexpr int kBlockWidth = 32; constexpr int kBlockHeight = 32; dim3 grid_dim(GetBlockCount(output_depth->width(), kBlockWidth), GetBlockCount(output_depth->height(), kBlockHeight)); dim3 block_dim(kBlockWidth, kBlockHeight); if (radius == 1) { ErodeDepthMapCUDAKernel<1, DepthT> <<<grid_dim, block_dim, 0, stream>>>( input_depth, *output_depth); } else if (radius == 2) { ErodeDepthMapCUDAKernel<2, DepthT> <<<grid_dim, block_dim, 0, stream>>>( input_depth, *output_depth); } else if (radius == 3) { ErodeDepthMapCUDAKernel<3, DepthT> <<<grid_dim, block_dim, 0, stream>>>( input_depth, *output_depth); } else { LOG(FATAL) << "radius value of " << radius << " is not supported."; } #ifdef CUDA_SEQUENTIAL_CHECKS cudaDeviceSynchronize(); #endif CHECK_CUDA_NO_ERROR(); } template void ErodeDepthMapCUDA<u16>( cudaStream_t stream, int radius, const CUDABuffer_<u16>& input_depth, CUDABuffer_<u16>* output_depth); template <typename DepthT> __global__ void CopyWithoutBorderCUDAKernel( CUDABuffer_<DepthT> input_depth, CUDABuffer_<DepthT> output_depth) { constexpr int kBorderSize = 1; unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < output_depth.width() && y < output_depth.height()) { if (x < kBorderSize || y < kBorderSize || x >= output_depth.width() - kBorderSize || y >= output_depth.height() - kBorderSize) { output_depth(y, x) = 0; } else { output_depth(y, x) = input_depth(y, x); } } } template <typename DepthT> void CopyWithoutBorderCUDA( cudaStream_t stream, const CUDABuffer_<DepthT>& input_depth, CUDABuffer_<DepthT>* output_depth) { #ifdef CUDA_SEQUENTIAL_CHECKS cudaDeviceSynchronize(); #endif CHECK_CUDA_NO_ERROR(); constexpr int kBlockWidth = 32; constexpr int kBlockHeight = 32; dim3 grid_dim(GetBlockCount(output_depth->width(), kBlockWidth), GetBlockCount(output_depth->height(), kBlockHeight)); dim3 block_dim(kBlockWidth, kBlockHeight); CopyWithoutBorderCUDAKernel<DepthT> <<<grid_dim, block_dim, 0, stream>>>( input_depth, *output_depth); #ifdef CUDA_SEQUENTIAL_CHECKS cudaDeviceSynchronize(); #endif CHECK_CUDA_NO_ERROR(); } template void CopyWithoutBorderCUDA<u16>( cudaStream_t stream, const CUDABuffer_<u16>& input_depth, CUDABuffer_<u16>* output_depth); __global__ void ComputeNormalsAndDropBadPixelsCUDAKernel( float normal_dot_threshold, float inv_depth_scaling, float fx_inv, float fy_inv, float cx_inv, float cy_inv, CUDABuffer_<u16> in_depth, CUDABuffer_<u16> out_depth, CUDABuffer_<float2> out_normals) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < in_depth.width() && y < in_depth.height()) { if (in_depth(y, x) == 0) { out_depth(y, x) = 0; out_normals(y, x) = make_float2(0, 0); return; } u16 right_depth = in_depth(y, x + 1); u16 left_depth = in_depth(y, x - 1); u16 bottom_depth = in_depth(y + 1, x); u16 top_depth = in_depth(y - 1, x); if (right_depth == 0 || left_depth == 0 || bottom_depth == 0 || top_depth == 0) { out_depth(y, x) = 0; out_normals(y, x) = make_float2(0, 0); return; } float3 left_point; UnprojectPoint(x - 1, y, inv_depth_scaling * left_depth, fx_inv, fy_inv, cx_inv, cy_inv, &left_point); float3 top_point; UnprojectPoint(x, y - 1, inv_depth_scaling * top_depth, fx_inv, fy_inv, cx_inv, cy_inv, &top_point); float3 right_point; UnprojectPoint(x + 1, y, inv_depth_scaling * right_depth, fx_inv, fy_inv, cx_inv, cy_inv, &right_point); float3 bottom_point; UnprojectPoint(x, y + 1, inv_depth_scaling * bottom_depth, fx_inv, fy_inv, cx_inv, cy_inv, &bottom_point); float3 left_to_right = make_float3(right_point.x - left_point.x, right_point.y - left_point.y, right_point.z - left_point.z); float3 bottom_to_top = make_float3(top_point.x - bottom_point.x, top_point.y - bottom_point.y, top_point.z - bottom_point.z); float3 normal; CrossProduct(left_to_right, bottom_to_top, &normal); float length = Norm(normal); if (!(length > 1e-6f)) { normal = make_float3(0, 0, -1); // avoid NaNs } else { float inv_length = ((fy_inv < 0) ? -1.0f : 1.0f) / length; // Account for negative fy in ICL-NUIM data normal = make_float3(normal.x * inv_length, normal.y * inv_length, normal.z * inv_length); } out_normals(y, x) = make_float2(normal.x, normal.y); // Discard depth if the normal points too far away from the viewing direction. float3 viewing_direction = make_float3(fx_inv * x + cx_inv, fy_inv * y + cy_inv, 1); float inv_dir_length = 1.0f / Norm(viewing_direction); viewing_direction = make_float3(inv_dir_length * viewing_direction.x, inv_dir_length * viewing_direction.y, inv_dir_length * viewing_direction.z); float dot = viewing_direction.x * normal.x + viewing_direction.y * normal.y + viewing_direction.z * normal.z; out_depth(y, x) = (dot >= normal_dot_threshold) ? 0 : in_depth(y, x); } } void ComputeNormalsAndDropBadPixelsCUDA( cudaStream_t stream, float observation_angle_threshold_deg, float depth_scaling, float depth_fx, float depth_fy, float depth_cx, float depth_cy, const CUDABuffer_<u16>& in_depth, CUDABuffer_<u16>* out_depth, CUDABuffer_<float2>* out_normals) { #ifdef CUDA_SEQUENTIAL_CHECKS cudaDeviceSynchronize(); #endif CHECK_CUDA_NO_ERROR(); // Unprojection intrinsics for pixel center convention. const float fx_inv = 1.0f / depth_fx; const float fy_inv = 1.0f / depth_fy; const float cx_pixel_center = depth_cx - 0.5f; const float cy_pixel_center = depth_cy - 0.5f; const float cx_inv_pixel_center = -cx_pixel_center / depth_fx; const float cy_inv_pixel_center = -cy_pixel_center / depth_fy; constexpr int kBlockWidth = 32; constexpr int kBlockHeight = 32; dim3 grid_dim(GetBlockCount(out_depth->width(), kBlockWidth), GetBlockCount(out_depth->height(), kBlockHeight)); dim3 block_dim(kBlockWidth, kBlockHeight); ComputeNormalsAndDropBadPixelsCUDAKernel <<<grid_dim, block_dim, 0, stream>>>( -1 * cosf(M_PI / 180.f * observation_angle_threshold_deg), 1.0f / depth_scaling, fx_inv, fy_inv, cx_inv_pixel_center, cy_inv_pixel_center, in_depth, *out_depth, *out_normals); #ifdef CUDA_SEQUENTIAL_CHECKS cudaDeviceSynchronize(); #endif CHECK_CUDA_NO_ERROR(); } __global__ void ComputePointRadiiAndRemoveIsolatedPixelsCUDAKernel( float point_radius_extension_factor_squared, float clamp_factor_term, float inv_depth_scaling, float fx_inv, float fy_inv, float cx_inv, float cy_inv, CUDABuffer_<u16> depth_buffer, CUDABuffer_<float> radius_buffer, CUDABuffer_<u16> out_depth) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < depth_buffer.width() && y < depth_buffer.height()) { if (depth_buffer(y, x) == 0) { out_depth(y, x) = 0; return; } float depth = inv_depth_scaling * depth_buffer(y, x); float3 local_position = make_float3(depth * (fx_inv * x + cx_inv), depth * (fy_inv * y + cy_inv), depth); // Determine the radius of the surfel such that it can connect to its // 8-neighborhood, however clamp the radius such that it is not larger // than the distance to a 4-neighbor at a given maximum angle to the // camera's optical axis. int neighbor_count = 0; float radius_squared = 0; float min_neighbor_distance_squared = CUDART_INF_F; for (int dy = y - 1, end_dy = y + 2; dy < end_dy; ++ dy) { for (int dx = x - 1, end_dx = x + 2; dx < end_dx; ++ dx) { float ddepth = inv_depth_scaling * depth_buffer(dy, dx); if ((dx == x && dy == y) || ddepth <= 0) { continue; } ++ neighbor_count; float3 other_point = make_float3(ddepth * (fx_inv * dx + cx_inv), ddepth * (fy_inv * dy + cy_inv), ddepth); float3 local_to_other = make_float3(other_point.x - local_position.x, other_point.y - local_position.y, other_point.z - local_position.z); float distance_squared = local_to_other.x * local_to_other.x + local_to_other.y * local_to_other.y + local_to_other.z * local_to_other.z; if (distance_squared > radius_squared) { radius_squared = distance_squared; } if (distance_squared < min_neighbor_distance_squared) { min_neighbor_distance_squared = distance_squared; } } } radius_squared *= point_radius_extension_factor_squared; float distance_squared_clamp = clamp_factor_term * min_neighbor_distance_squared; if (radius_squared > distance_squared_clamp) { radius_squared = distance_squared_clamp; } // If we only have neighbors on one side, the radius computation will be // affected since the angle of the surface cannot be determined properly. // Require at least a reasonable number of neighbors. Use 8 for the most // noise-free results. constexpr int kMinNeighborPixelsForRadiusComputation = 8; radius_buffer(y, x) = radius_squared; out_depth(y, x) = (neighbor_count < kMinNeighborPixelsForRadiusComputation) ? 0 : depth_buffer(y, x); } } void ComputePointRadiiAndRemoveIsolatedPixelsCUDA( cudaStream_t stream, float point_radius_extension_factor, float point_radius_clamp_factor, float depth_scaling, float depth_fx, float depth_fy, float depth_cx, float depth_cy, const CUDABuffer_<u16>& depth_buffer, CUDABuffer_<float>* radius_buffer, CUDABuffer_<u16>* out_depth) { #ifdef CUDA_SEQUENTIAL_CHECKS cudaDeviceSynchronize(); #endif CHECK_CUDA_NO_ERROR(); // Unprojection intrinsics for pixel center convention. const float fx_inv = 1.0f / depth_fx; const float fy_inv = 1.0f / depth_fy; const float cx_pixel_center = depth_cx - 0.5f; const float cy_pixel_center = depth_cy - 0.5f; const float cx_inv_pixel_center = -cx_pixel_center / depth_fx; const float cy_inv_pixel_center = -cy_pixel_center / depth_fy; constexpr int kBlockWidth = 32; constexpr int kBlockHeight = 32; dim3 grid_dim(GetBlockCount(depth_buffer.width(), kBlockWidth), GetBlockCount(depth_buffer.height(), kBlockHeight)); dim3 block_dim(kBlockWidth, kBlockHeight); ComputePointRadiiAndRemoveIsolatedPixelsCUDAKernel <<<grid_dim, block_dim, 0, stream>>>( point_radius_extension_factor * point_radius_extension_factor, point_radius_clamp_factor * point_radius_clamp_factor * sqrtf(2) * sqrtf(2), 1.0f / depth_scaling, fx_inv, fy_inv, cx_inv_pixel_center, cy_inv_pixel_center, depth_buffer, *radius_buffer, *out_depth); #ifdef CUDA_SEQUENTIAL_CHECKS cudaDeviceSynchronize(); #endif CHECK_CUDA_NO_ERROR(); } }
the_stack
#include <raft/cuda_utils.cuh> #include <rmm/device_uvector.hpp> #include <thrust/for_each.h> #include <numeric> namespace cugraph { namespace detail { template <typename T> rmm::device_uvector<T> append_all(raft::handle_t const& handle, std::vector<rmm::device_uvector<T>>&& input) { size_t size{0}; // for (size_t i = 0; i < input.size(); ++i) size += input[i].size(); for (auto& element : input) size += element.size(); rmm::device_uvector<T> output(size, handle.get_stream()); auto output_iter = output.begin(); for (auto& element : input) { raft::copy(output_iter, element.begin(), element.size(), handle.get_stream()); output_iter += element.size(); } /* for (size_t i = 0; i < input.size(); ++i) { raft::copy(output_iter, input[i].begin(), input[i].size(), handle.get_stream()); output_iter += input[i].size(); } */ return output; } } // namespace detail template <typename vertex_t> void scramble_vertex_ids(raft::handle_t const& handle, rmm::device_uvector<vertex_t>& d_src_v, rmm::device_uvector<vertex_t>& d_dst_v, vertex_t vertex_id_offset, uint64_t seed) { vertex_t scale = 1 + raft::log2(d_src_v.size()); auto pair_first = thrust::make_zip_iterator(thrust::make_tuple(d_src_v.begin(), d_dst_v.begin())); thrust::transform(handle.get_thrust_policy(), pair_first, pair_first + d_src_v.size(), pair_first, [scale] __device__(auto pair) { return thrust::make_tuple(detail::scramble(thrust::get<0>(pair), scale), detail::scramble(thrust::get<1>(pair), scale)); }); } template <typename vertex_t, typename weight_t> std::tuple<rmm::device_uvector<vertex_t>, rmm::device_uvector<vertex_t>, std::optional<rmm::device_uvector<weight_t>>> combine_edgelists(raft::handle_t const& handle, std::vector<rmm::device_uvector<vertex_t>>&& sources, std::vector<rmm::device_uvector<vertex_t>>&& dests, std::optional<std::vector<rmm::device_uvector<weight_t>>>&& optional_d_weights, bool remove_multi_edges) { CUGRAPH_EXPECTS(sources.size() == dests.size(), "sources and dests vertex lists must be the same size"); if (optional_d_weights) { CUGRAPH_EXPECTS(sources.size() == optional_d_weights.value().size(), "has_weights is specified, sources and weights must be the same size"); thrust::for_each_n( thrust::host, thrust::make_zip_iterator( thrust::make_tuple(sources.begin(), dests.begin(), optional_d_weights.value().begin())), sources.size(), [](auto tuple) { CUGRAPH_EXPECTS(thrust::get<0>(tuple).size() != thrust::get<1>(tuple).size(), "source vertex and dest vertex uvectors must be same size"); CUGRAPH_EXPECTS(thrust::get<0>(tuple).size() != thrust::get<2>(tuple).size(), "source vertex and weights uvectors must be same size"); }); } else { thrust::for_each_n( thrust::host, thrust::make_zip_iterator(thrust::make_tuple(sources.begin(), dests.begin())), sources.size(), [](auto tuple) { CUGRAPH_EXPECTS(thrust::get<0>(tuple).size() == thrust::get<1>(tuple).size(), "source vertex and dest vertex uvectors must be same size"); }); } std::vector<rmm::device_uvector<weight_t>> d_weights; rmm::device_uvector<vertex_t> srcs_v(0, handle.get_stream()); rmm::device_uvector<vertex_t> dsts_v(0, handle.get_stream()); rmm::device_uvector<weight_t> weights_v(0, handle.get_stream()); srcs_v = detail::append_all<vertex_t>(handle, std::move(sources)); dsts_v = detail::append_all<vertex_t>(handle, std::move(dests)); if (optional_d_weights) { weights_v = detail::append_all(handle, std::move(optional_d_weights.value())); } if (remove_multi_edges) { size_t number_of_edges{srcs_v.size()}; if (optional_d_weights) { thrust::sort( handle.get_thrust_policy(), thrust::make_zip_iterator( thrust::make_tuple(srcs_v.begin(), dsts_v.begin(), weights_v.begin())), thrust::make_zip_iterator(thrust::make_tuple(srcs_v.end(), dsts_v.end(), weights_v.end()))); auto pair_first = thrust::make_zip_iterator(thrust::make_tuple(srcs_v.begin(), dsts_v.begin())); auto end_iter = thrust::unique_by_key( handle.get_thrust_policy(), pair_first, pair_first + srcs_v.size(), weights_v.begin()); number_of_edges = thrust::distance(pair_first, thrust::get<0>(end_iter)); } else { thrust::sort(handle.get_thrust_policy(), thrust::make_zip_iterator(thrust::make_tuple(srcs_v.begin(), dsts_v.begin())), thrust::make_zip_iterator(thrust::make_tuple(srcs_v.end(), dsts_v.end()))); auto pair_first = thrust::make_zip_iterator(thrust::make_tuple(srcs_v.begin(), dsts_v.begin())); auto end_iter = thrust::unique( handle.get_thrust_policy(), thrust::make_zip_iterator(thrust::make_tuple(srcs_v.begin(), dsts_v.begin())), thrust::make_zip_iterator(thrust::make_tuple(srcs_v.end(), dsts_v.end()))); number_of_edges = thrust::distance(pair_first, end_iter); } srcs_v.resize(number_of_edges, handle.get_stream()); srcs_v.shrink_to_fit(handle.get_stream()); dsts_v.resize(number_of_edges, handle.get_stream()); dsts_v.shrink_to_fit(handle.get_stream()); if (optional_d_weights) { weights_v.resize(number_of_edges, handle.get_stream()); weights_v.shrink_to_fit(handle.get_stream()); } } return std::make_tuple( std::move(srcs_v), std::move(dsts_v), optional_d_weights ? std::move(std::optional<rmm::device_uvector<weight_t>>(std::move(weights_v))) : std::nullopt); } template <typename vertex_t, typename weight_t> std::tuple<rmm::device_uvector<vertex_t>, rmm::device_uvector<vertex_t>, std::optional<rmm::device_uvector<weight_t>>> symmetrize_edgelist_from_triangular( raft::handle_t const& handle, rmm::device_uvector<vertex_t>&& d_src_v, rmm::device_uvector<vertex_t>&& d_dst_v, std::optional<rmm::device_uvector<weight_t>>&& optional_d_weights_v, bool check_diagonal) { auto num_strictly_triangular_edges = d_src_v.size(); if (check_diagonal) { if (optional_d_weights_v) { auto edge_first = thrust::make_zip_iterator( thrust::make_tuple(d_src_v.begin(), d_dst_v.begin(), (*optional_d_weights_v).begin())); auto strictly_triangular_last = thrust::partition( handle.get_thrust_policy(), edge_first, edge_first + d_src_v.size(), [] __device__(auto e) { return thrust::get<0>(e) != thrust::get<1>(e); }); num_strictly_triangular_edges = static_cast<size_t>(thrust::distance(edge_first, strictly_triangular_last)); } else { auto edge_first = thrust::make_zip_iterator(thrust::make_tuple(d_src_v.begin(), d_dst_v.begin())); auto strictly_triangular_last = thrust::partition( handle.get_thrust_policy(), edge_first, edge_first + d_src_v.size(), [] __device__(auto e) { return thrust::get<0>(e) != thrust::get<1>(e); }); num_strictly_triangular_edges = static_cast<size_t>(thrust::distance(edge_first, strictly_triangular_last)); } } auto offset = d_src_v.size(); d_src_v.resize(offset + num_strictly_triangular_edges, handle.get_stream_view()); d_dst_v.resize(offset + num_strictly_triangular_edges, handle.get_stream_view()); thrust::copy(handle.get_thrust_policy(), d_dst_v.begin(), d_dst_v.begin() + num_strictly_triangular_edges, d_src_v.begin() + offset); thrust::copy(handle.get_thrust_policy(), d_src_v.begin(), d_src_v.begin() + num_strictly_triangular_edges, d_dst_v.begin() + offset); if (optional_d_weights_v) { optional_d_weights_v->resize(d_src_v.size(), handle.get_stream_view()); thrust::copy(handle.get_thrust_policy(), optional_d_weights_v->begin(), optional_d_weights_v->begin() + num_strictly_triangular_edges, optional_d_weights_v->begin() + offset); } return std::make_tuple(std::move(d_src_v), std::move(d_dst_v), optional_d_weights_v ? std::move(optional_d_weights_v) : std::nullopt); } template void scramble_vertex_ids(raft::handle_t const& handle, rmm::device_uvector<int32_t>& d_src_v, rmm::device_uvector<int32_t>& d_dst_v, int32_t vertex_id_offset, uint64_t seed); template void scramble_vertex_ids(raft::handle_t const& handle, rmm::device_uvector<int64_t>& d_src_v, rmm::device_uvector<int64_t>& d_dst_v, int64_t vertex_id_offset, uint64_t seed); template std::tuple<rmm::device_uvector<int32_t>, rmm::device_uvector<int32_t>, std::optional<rmm::device_uvector<float>>> combine_edgelists(raft::handle_t const& handle, std::vector<rmm::device_uvector<int32_t>>&& sources, std::vector<rmm::device_uvector<int32_t>>&& dests, std::optional<std::vector<rmm::device_uvector<float>>>&& optional_d_weights, bool remove_multi_edges); template std::tuple<rmm::device_uvector<int64_t>, rmm::device_uvector<int64_t>, std::optional<rmm::device_uvector<float>>> combine_edgelists(raft::handle_t const& handle, std::vector<rmm::device_uvector<int64_t>>&& sources, std::vector<rmm::device_uvector<int64_t>>&& dests, std::optional<std::vector<rmm::device_uvector<float>>>&& optional_d_weights, bool remove_multi_edges); template std::tuple<rmm::device_uvector<int32_t>, rmm::device_uvector<int32_t>, std::optional<rmm::device_uvector<double>>> combine_edgelists(raft::handle_t const& handle, std::vector<rmm::device_uvector<int32_t>>&& sources, std::vector<rmm::device_uvector<int32_t>>&& dests, std::optional<std::vector<rmm::device_uvector<double>>>&& optional_d_weights, bool remove_multi_edges); template std::tuple<rmm::device_uvector<int64_t>, rmm::device_uvector<int64_t>, std::optional<rmm::device_uvector<double>>> combine_edgelists(raft::handle_t const& handle, std::vector<rmm::device_uvector<int64_t>>&& sources, std::vector<rmm::device_uvector<int64_t>>&& dests, std::optional<std::vector<rmm::device_uvector<double>>>&& optional_d_weights, bool remove_multi_edges); template std::tuple<rmm::device_uvector<int32_t>, rmm::device_uvector<int32_t>, std::optional<rmm::device_uvector<float>>> symmetrize_edgelist_from_triangular( raft::handle_t const& handle, rmm::device_uvector<int32_t>&& d_src_v, rmm::device_uvector<int32_t>&& d_dst_v, std::optional<rmm::device_uvector<float>>&& optional_d_weights_v, bool check_diagonal); template std::tuple<rmm::device_uvector<int64_t>, rmm::device_uvector<int64_t>, std::optional<rmm::device_uvector<float>>> symmetrize_edgelist_from_triangular( raft::handle_t const& handle, rmm::device_uvector<int64_t>&& d_src_v, rmm::device_uvector<int64_t>&& d_dst_v, std::optional<rmm::device_uvector<float>>&& optional_d_weights_v, bool check_diagonal); template std::tuple<rmm::device_uvector<int32_t>, rmm::device_uvector<int32_t>, std::optional<rmm::device_uvector<double>>> symmetrize_edgelist_from_triangular( raft::handle_t const& handle, rmm::device_uvector<int32_t>&& d_src_v, rmm::device_uvector<int32_t>&& d_dst_v, std::optional<rmm::device_uvector<double>>&& optional_d_weights_v, bool check_diagonal); template std::tuple<rmm::device_uvector<int64_t>, rmm::device_uvector<int64_t>, std::optional<rmm::device_uvector<double>>> symmetrize_edgelist_from_triangular( raft::handle_t const& handle, rmm::device_uvector<int64_t>&& d_src_v, rmm::device_uvector<int64_t>&& d_dst_v, std::optional<rmm::device_uvector<double>>&& optional_d_weights_v, bool check_diagonal); } // namespace cugraph
the_stack
#pragma once #include <libgen.h> #include <math.h> #include <stdio.h> #include <vector> #include <time.h> #include <iostream> #include <sstream> #include <string> #include <gunrock/util/parameters.h> #include <gunrock/graph/coo.cuh> namespace gunrock { namespace graphio { namespace labels { /** * @brief Reads a user->labels file from an input-stream into a CSR sparse * * Here is an example of the labels format * +----------------------------------------------+ * |%%Labels Formatted File | <--- header line * |% | <--+ * |% comments | |-- 0 or more comment * lines * |% | <--+ * | N L L | <--- nodes, labels, labels * | I1 L1A L1B | <--+ * | I2 L2A L2B | | * | . . . | | * | IN LNA LNB | <--+ * +----------------------------------------------+ * * * @param[in] f_in Input labels graph file. * @param[in] labels_a Array for first labels column * @param[in] labels_b Array for second labels column (optional) * * \return If there is any File I/O error along the way. */ template <typename ArrayT> cudaError_t ReadLabelsStream(FILE *f_in, util::Parameters &parameters, ArrayT &labels_a, ArrayT &labels_b) { typedef typename ArrayT::ValueT ValueT; cudaError_t retval = cudaSuccess; bool quiet = parameters.Get<bool>("quiet"); int labels_read = -1; long long nodes = 0; // bool label_b_exists = false; // change this to a parameter // util::Array1D<SizeT, EdgePairT> temp_edge_pairs; // temp_edge_pairs.SetName("graphio::market::ReadMarketStream::temp_edge_pairs"); // EdgeTupleType *coo = NULL; // read in COO format time_t mark0 = time(NULL); util::PrintMsg(" Parsing LABELS", !quiet); char line[1024]; while (true) { if (fscanf(f_in, "%[^\n]\n", line) <= 0) { break; } if (line[0] == '%') { // Comment if (strlen(line) >= 2 && line[1] == '%') { // Header -> Can be used to extract info for labels } } // -> if else if (!util::isValid(labels_read)) { // Problem description-> First line // with nodes and labels info long long ll_nodes, ll_label_x, ll_label_y; int items_scanned = sscanf(line, "%lld %lld %lld", &ll_nodes, &ll_label_x, &ll_label_y); if (ll_label_x != ll_label_y) { return util::GRError( "Error parsing LABELS, problem description invalid (" + std::to_string(ll_label_x) + " =/= " + std::to_string(ll_label_y) + ")", __FILE__, __LINE__); } nodes = ll_nodes; util::PrintMsg(" (" + std::to_string(ll_nodes) + " nodes) ", !quiet); for (int k = 0; k < nodes; k++) { labels_a[k] = util::PreDefinedValues<ValueT>::InvalidValue; labels_b[k] = util::PreDefinedValues<ValueT>::InvalidValue; } labels_read = 0; } // -> else if else { // Now we can start storing labels if (labels_read >= nodes) { return util::GRError( "Error parsing LABELS: " "encountered more than " + std::to_string(nodes) + " nodes", __FILE__, __LINE__); } long long ll_node; // Active node // Used for sscanf double lf_label_a = util::PreDefinedValues<ValueT>::InvalidValue; double lf_label_b = util::PreDefinedValues<ValueT>::InvalidValue; ValueT ll_label_a, ll_label_b; // Used to parse float/double int num_input = sscanf(line, "%lld %lf %lf", &ll_node, &lf_label_a, &lf_label_b); if (typeid(ValueT) == typeid(float) || typeid(ValueT) == typeid(double) || typeid(ValueT) == typeid(long double)) { ll_label_a = (ValueT)lf_label_a; ll_label_b = (ValueT)lf_label_b; } else { ll_label_a = lf_label_a; ll_label_b = lf_label_b; } if (!util::isValid( ll_label_a)) { // Populate the missing labels as invalid (-1) ll_label_a = util::PreDefinedValues<ValueT>::InvalidValue; } if (!util::isValid( ll_label_b)) { // Populate the missing label b as invalid (-1) ll_label_b = util::PreDefinedValues<ValueT>::InvalidValue; } labels_a[ll_node - 1] = ll_label_a; labels_b[ll_node - 1] = ll_label_b; labels_read++; } // -> else } // -> while if (labels_read != nodes) { return util::GRError( "Error parsing LABELS: " "only " + std::to_string(labels_read) + "/" + std::to_string(nodes) + " nodes read", __FILE__, __LINE__); } time_t mark1 = time(NULL); util::PrintMsg("Done parsing (" + std::to_string(mark1 - mark0) + " s).", !quiet); return retval; } template <typename SizeT, typename ValueT> cudaError_t ReadLabelsStream(FILE *f_in, util::Parameters &parameters, util::Array1D<SizeT, ValueT>& labels) { cudaError_t retval = cudaSuccess; bool quiet = parameters.Get<bool>("quiet"); bool transpose = parameters.Get<bool>("transpose"); if (transpose) printf("table is gonna be tranposed\n"); else printf("table is not tranposed\n"); long long dim; long long num_labels; long long labels_read = -1; time_t mark0 = time(NULL); long long ll_node = 0; char line[10000]; while (true) { if (fscanf(f_in, "%[^\n]\n", line) <= 0){ break; } #if DEBUG_LABEL std::cerr << line << std::endl; #endif if (line[0] == '%' || line[0] == '#') { // Comment if (strlen(line) >= 2 && line[1] == '%'){ } } // -> if comment else if (!util::isValid(labels_read)) { // Problem description-> First line // with nodes and labels info long long ll_num_labels, ll_dim; int items_scanned = sscanf(line, "%lld %lld", &ll_num_labels, &ll_dim); if ((!util::isValid(ll_num_labels)) or (!util::isValid(ll_dim))){ return util::GRError( "Error parsing LABELS, problem description invalid (" + std::to_string(ll_num_labels) + " < 0" + std::to_string(ll_dim) + " < 0", __FILE__, __LINE__); } num_labels = ll_num_labels; dim = ll_dim; util::PrintMsg("Number of labels " + std::to_string(num_labels) + ", dimension " + std::to_string(dim), !quiet); parameters.Set("n", num_labels); parameters.Set("dim", dim); // Allocation memory for points GUARD_CU(labels.Allocate(num_labels*dim, util::HOST)); for (int k = 0; k < num_labels; k++) { for (int d = 0; d < dim; d++){ labels[k * dim + d] = util::PreDefinedValues<ValueT>::InvalidValue; } } labels_read = 0; } // -> else if problem description else { // Now we can start storing labels if (labels_read >= num_labels) { // Reading labels is done break; } int d = 0; while (d < dim){ double lf_label = util::PreDefinedValues<ValueT>::InvalidValue; int num_input = sscanf(line, "%lf", &lf_label); if (d < dim-1){ int i=0; while (line[i] != ' ' && line[i] != '\n') ++i; memmove(line, line+i+1, 10000-(i+2)); } ValueT ll_label; if (typeid(ValueT) == typeid(float) || typeid(ValueT) == typeid(double) || typeid(ValueT) == typeid(long double)) { ll_label = (ValueT)lf_label; }else{ ll_label = lf_label; } if (!util::isValid(ll_label)){ return util::GRError( "Error parsing LABELS: " "Invalid " + std::to_string(d) + "th element of label: " + std::to_string(ll_label), __FILE__, __LINE__); } if (!transpose){ //N M // DA DB .. DM //I1 L1A L1B .. L1M //I2 L2A L2B .. L2M //.. .. .. .. .. //IN LNA LNB .. LNM labels[ll_node * dim + d] = ll_label; }else{ //N M // I1 I2 .. IN //DA L1A L2A .. LNA //DB L1B L2B .. LNB //.. .. .. .. .. //DM L1M L2M .. LNM labels[d * num_labels + ll_node] = ll_label; } ++d; } // -> while reading line ++ll_node; if (d < dim){ return util::GRError( "Error parsing LABELS: " "Invalid length of label: " + std::to_string(d), __FILE__, __LINE__); } labels_read++; } // -> else storing labels } // -> while if (labels_read != num_labels) { return util::GRError( "Error parsing LABELS: " "only " + std::to_string(labels_read) + "/" + std::to_string(num_labels) + " nodes read", __FILE__, __LINE__); } time_t mark1 = time(NULL); util::PrintMsg("Done parsing (" + std::to_string(mark1 - mark0) + " s).", !quiet); return retval; } /** * \defgroup Public Interface * @{ */ /** * @brief Loads a LABELS-formatted array(s) from the specified file. * * @param[in] filename Labels file name, if empty, it is loaded from STDIN. * @param[in] parameters Idk if we need any parameters (placeholder). * @param[in] labels_a Array that we can populate with the 2nd column values. * @param[in] labels_b (optional) Array that we can populate with 3rd column * values. * * \return If there is any File I/O error along the way. 0 for no error. */ template <typename ArrayT> cudaError_t BuildLabelsArray(std::string filename, util::Parameters &parameters, ArrayT &labels_a, ArrayT &labels_b) { typedef typename ArrayT::ValueT ValueT; cudaError_t retval = cudaSuccess; bool quiet = parameters.Get<bool>("quiet"); FILE *f_in = fopen(filename.c_str(), "r"); if (f_in) { util::PrintMsg("Reading from " + filename + ":", !quiet); if (retval = ReadLabelsStream(f_in, parameters, labels_a, labels_b)) { fclose(f_in); return retval; } } else { return util::GRError("Unable to open file " + filename, __FILE__, __LINE__); } return retval; } template <typename SizeT, typename ValueT> cudaError_t BuildLabelsArray(std::string filename, util::Parameters &parameters, util::Array1D<SizeT, ValueT> &labels) { cudaError_t retval = cudaSuccess; bool quiet = parameters.Get<bool>("quiet"); FILE *f_in = fopen(filename.c_str(), "r"); if (f_in) { util::PrintMsg("Reading from " + filename + ":", !quiet); if (retval = ReadLabelsStream(f_in, parameters, labels)) { fclose(f_in); return retval; } } else { return util::GRError("Unable to open file " + filename, __FILE__, __LINE__); } return retval; } cudaError_t UseParameters(util::Parameters &parameters, std::string graph_prefix = "") { cudaError_t retval = cudaSuccess; return retval; } template <typename ArrayT> cudaError_t Read(util::Parameters &parameters, ArrayT &labels_a, ArrayT &labels_b) { cudaError_t retval = cudaSuccess; bool quiet = parameters.Get<bool>("quiet"); util::PrintMsg("Loading Labels into an array ...", !quiet); std::string filename = parameters.Get<std::string>("labels-file"); std::ifstream fp(filename.c_str()); if (filename == "" || !fp.is_open()) { return util::GRError("Input labels file " + filename + " does not exist.", __FILE__, __LINE__); } if (parameters.UseDefault("dataset")) { std::string dir, file, extension; util::SeperateFileName(filename, dir, file, extension); // util::PrintMsg("filename = " + filename // + ", dir = " + dir // + ", file = " + file // + ", extension = " + extension); parameters.Set("dataset", file); } GUARD_CU(BuildLabelsArray(filename, parameters, labels_a, labels_b)); return retval; } template <typename SizeT, typename ValueT> cudaError_t Read(util::Parameters &parameters, util::Array1D<SizeT, ValueT> &labels) { // TO DO initialized graph cudaError_t retval = cudaSuccess; bool quiet = parameters.Get<bool>("quiet"); util::PrintMsg("Loading Labels into an array ...", !quiet); std::string filename = parameters.Get<std::string>("labels-file"); std::ifstream fp(filename.c_str()); if (filename == "" || !fp.is_open()) { return util::GRError("Input labels file " + filename + " does not exist.", __FILE__, __LINE__); } if (parameters.UseDefault("dataset")) { std::string dir, file, extension; util::SeperateFileName(filename, dir, file, extension); // util::PrintMsg("filename = " + filename // + ", dir = " + dir // + ", file = " + file // + ", extension = " + extension); parameters.Set("dataset", file); } GUARD_CU(BuildLabelsArray(filename, parameters, labels)); return retval; } /**@}*/ } // namespace labels } // namespace graphio } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include <stdio.h> #include <memory.h> #include "cuda_helper.h" #ifdef __INTELLISENSE__ #define __CUDA_ARCH__ 500 #define __funnelshift_r(x,y,n) (x >> n) #define atomicExch(p,x) x #endif #if __CUDA_ARCH__ >= 300 // 64 Registers Variant for Compute 3.0 #include "quark/groestl_functions_quad.h" #include "quark/groestl_transf_quad.h" #endif // globaler Speicher für alle HeftyHashes aller Threads static uint32_t *d_outputHashes[MAX_GPUS]; static uint32_t *d_resultNonces[MAX_GPUS]; __constant__ uint32_t pTarget[2]; // Same for all GPU __constant__ uint32_t myriadgroestl_gpu_msg[32]; // muss expandiert werden __constant__ uint32_t myr_sha256_gpu_constantTable[64]; __constant__ uint32_t myr_sha256_gpu_constantTable2[64]; const uint32_t myr_sha256_cpu_constantTable[] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, }; const uint32_t myr_sha256_cpu_w2Table[] = { 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000200, 0x80000000, 0x01400000, 0x00205000, 0x00005088, 0x22000800, 0x22550014, 0x05089742, 0xa0000020, 0x5a880000, 0x005c9400, 0x0016d49d, 0xfa801f00, 0xd33225d0, 0x11675959, 0xf6e6bfda, 0xb30c1549, 0x08b2b050, 0x9d7c4c27, 0x0ce2a393, 0x88e6e1ea, 0xa52b4335, 0x67a16f49, 0xd732016f, 0x4eeb2e91, 0x5dbf55e5, 0x8eee2335, 0xe2bc5ec2, 0xa83f4394, 0x45ad78f7, 0x36f3d0cd, 0xd99c05e8, 0xb0511dc7, 0x69bc7ac4, 0xbd11375b, 0xe3ba71e5, 0x3b209ff2, 0x18feee17, 0xe25ad9e7, 0x13375046, 0x0515089d, 0x4f0d0f04, 0x2627484e, 0x310128d2, 0xc668b434, 0x420841cc, 0x62d311b8, 0xe59ba771, 0x85a7a484 }; #define SWAB32(x) cuda_swab32(x) #if __CUDA_ARCH__ < 320 // Kepler (Compute 3.0) #define ROTR32(x, n) (((x) >> (n)) | ((x) << (32 - (n)))) #else // Kepler (Compute 3.5) #define ROTR32(x, n) __funnelshift_r( (x), (x), (n) ) #endif #define R(x, n) ((x) >> (n)) #define Ch(x, y, z) ((x & (y ^ z)) ^ z) #define Maj(x, y, z) ((x & (y | z)) | (y & z)) #define S0(x) (ROTR32(x, 2) ^ ROTR32(x, 13) ^ ROTR32(x, 22)) #define S1(x) (ROTR32(x, 6) ^ ROTR32(x, 11) ^ ROTR32(x, 25)) #define s0(x) (ROTR32(x, 7) ^ ROTR32(x, 18) ^ R(x, 3)) #define s1(x) (ROTR32(x, 17) ^ ROTR32(x, 19) ^ R(x, 10)) __device__ __forceinline__ void myriadgroestl_gpu_sha256(uint32_t *message) { uint32_t W1[16]; #pragma unroll for(int k=0; k<16; k++) W1[k] = SWAB32(message[k]); uint32_t regs[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; // Progress W1 #pragma unroll for(int j=0; j<16; j++) { uint32_t T1 = regs[7] + S1(regs[4]) + Ch(regs[4], regs[5], regs[6]) + myr_sha256_gpu_constantTable[j] + W1[j]; uint32_t T2 = S0(regs[0]) + Maj(regs[0], regs[1], regs[2]); #pragma unroll 7 for (int k=6; k >= 0; k--) regs[k+1] = regs[k]; regs[0] = T1 + T2; regs[4] += T1; } // Progress W2...W3 uint32_t W2[16]; ////// PART 1 #pragma unroll for(int j=0; j<2; j++) W2[j] = s1(W1[14+j]) + W1[9+j] + s0(W1[1+j]) + W1[j]; #pragma unroll 5 for(int j=2; j<7;j++) W2[j] = s1(W2[j-2]) + W1[9+j] + s0(W1[1+j]) + W1[j]; #pragma unroll for(int j=7; j<15; j++) W2[j] = s1(W2[j-2]) + W2[j-7] + s0(W1[1+j]) + W1[j]; W2[15] = s1(W2[13]) + W2[8] + s0(W2[0]) + W1[15]; // Round function #pragma unroll for(int j=0; j<16; j++) { uint32_t T1 = regs[7] + S1(regs[4]) + Ch(regs[4], regs[5], regs[6]) + myr_sha256_gpu_constantTable[j + 16] + W2[j]; uint32_t T2 = S0(regs[0]) + Maj(regs[0], regs[1], regs[2]); #pragma unroll 7 for (int l=6; l >= 0; l--) regs[l+1] = regs[l]; regs[0] = T1 + T2; regs[4] += T1; } ////// PART 2 #pragma unroll for(int j=0; j<2; j++) W1[j] = s1(W2[14+j]) + W2[9+j] + s0(W2[1+j]) + W2[j]; #pragma unroll 5 for(int j=2; j<7; j++) W1[j] = s1(W1[j-2]) + W2[9+j] + s0(W2[1+j]) + W2[j]; #pragma unroll for(int j=7; j<15; j++) W1[j] = s1(W1[j-2]) + W1[j-7] + s0(W2[1+j]) + W2[j]; W1[15] = s1(W1[13]) + W1[8] + s0(W1[0]) + W2[15]; // Round function #pragma unroll for(int j=0; j<16; j++) { uint32_t T1 = regs[7] + S1(regs[4]) + Ch(regs[4], regs[5], regs[6]) + myr_sha256_gpu_constantTable[j + 32] + W1[j]; uint32_t T2 = S0(regs[0]) + Maj(regs[0], regs[1], regs[2]); #pragma unroll 7 for (int l=6; l >= 0; l--) regs[l+1] = regs[l]; regs[0] = T1 + T2; regs[4] += T1; } ////// PART 3 #pragma unroll for(int j=0; j<2; j++) W2[j] = s1(W1[14+j]) + W1[9+j] + s0(W1[1+j]) + W1[j]; #pragma unroll 5 for(int j=2; j<7; j++) W2[j] = s1(W2[j-2]) + W1[9+j] + s0(W1[1+j]) + W1[j]; #pragma unroll for(int j=7; j<15; j++) W2[j] = s1(W2[j-2]) + W2[j-7] + s0(W1[1+j]) + W1[j]; W2[15] = s1(W2[13]) + W2[8] + s0(W2[0]) + W1[15]; // Round function #pragma unroll for(int j=0; j<16; j++) { uint32_t T1 = regs[7] + S1(regs[4]) + Ch(regs[4], regs[5], regs[6]) + myr_sha256_gpu_constantTable[j + 48] + W2[j]; uint32_t T2 = S0(regs[0]) + Maj(regs[0], regs[1], regs[2]); #pragma unroll 7 for (int l=6; l >= 0; l--) regs[l+1] = regs[l]; regs[0] = T1 + T2; regs[4] += T1; } uint32_t hash[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; #pragma unroll 8 for(int k=0; k<8; k++) hash[k] += regs[k]; ///// ///// 2nd Round (wegen Msg-Padding) ///// #pragma unroll for(int k=0; k<8; k++) regs[k] = hash[k]; // Progress W1 #pragma unroll for(int j=0; j<64; j++) { uint32_t T1 = regs[7] + S1(regs[4]) + Ch(regs[4], regs[5], regs[6]) + myr_sha256_gpu_constantTable2[j]; uint32_t T2 = S0(regs[0]) + Maj(regs[0], regs[1], regs[2]); #pragma unroll 7 for (int k=6; k >= 0; k--) regs[k+1] = regs[k]; regs[0] = T1 + T2; regs[4] += T1; } #if 0 // Full sha hash #pragma unroll for(int k=0; k<8; k++) hash[k] += regs[k]; #pragma unroll for(int k=0; k<8; k++) message[k] = SWAB32(hash[k]); #else message[6] = SWAB32(hash[6] + regs[6]); message[7] = SWAB32(hash[7] + regs[7]); #endif } __global__ //__launch_bounds__(256, 6) // we want <= 40 regs void myriadgroestl_gpu_hash_sha(uint32_t threads, uint32_t startNounce, uint32_t *hashBuffer, uint32_t *resNonces) { #if __CUDA_ARCH__ >= 300 const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { const uint32_t nonce = startNounce + thread; uint32_t out_state[16]; uint32_t *inpHash = &hashBuffer[16 * thread]; #pragma unroll 16 for (int i=0; i < 16; i++) out_state[i] = inpHash[i]; myriadgroestl_gpu_sha256(out_state); if (out_state[7] <= pTarget[1] && out_state[6] <= pTarget[0]) { uint32_t tmp = atomicExch(&resNonces[0], nonce); if (tmp != UINT32_MAX) resNonces[1] = tmp; } } #endif } __global__ __launch_bounds__(256, 4) void myriadgroestl_gpu_hash_quad(uint32_t threads, uint32_t startNounce, uint32_t *hashBuffer) { #if __CUDA_ARCH__ >= 300 // durch 4 dividieren, weil jeweils 4 Threads zusammen ein Hash berechnen uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x) / 4; if (thread < threads) { // GROESTL uint32_t paddedInput[8]; #pragma unroll 8 for(int k=0; k<8; k++) paddedInput[k] = myriadgroestl_gpu_msg[4*k+threadIdx.x%4]; uint32_t nounce = startNounce + thread; if ((threadIdx.x % 4) == 3) paddedInput[4] = SWAB32(nounce); // 4*4+3 = 19 uint32_t msgBitsliced[8]; to_bitslice_quad(paddedInput, msgBitsliced); uint32_t state[8]; groestl512_progressMessage_quad(state, msgBitsliced); uint32_t out_state[16]; from_bitslice_quad(state, out_state); if ((threadIdx.x & 0x03) == 0) { uint32_t *outpHash = &hashBuffer[16 * thread]; #pragma unroll 16 for(int k=0; k<16; k++) outpHash[k] = out_state[k]; } } #endif } // Setup Function __host__ void myriadgroestl_cpu_init(int thr_id, uint32_t threads) { uint32_t temp[64]; for(int i=0; i<64; i++) temp[i] = myr_sha256_cpu_w2Table[i] + myr_sha256_cpu_constantTable[i]; cudaMemcpyToSymbol( myr_sha256_gpu_constantTable2, temp, sizeof(uint32_t) * 64 ); cudaMemcpyToSymbol( myr_sha256_gpu_constantTable, myr_sha256_cpu_constantTable, sizeof(uint32_t) * 64 ); // to check if the binary supports SM3+ cuda_get_arch(thr_id); cudaMalloc(&d_outputHashes[thr_id], (size_t) 64 * threads); cudaMalloc(&d_resultNonces[thr_id], 2 * sizeof(uint32_t)); } __host__ void myriadgroestl_cpu_free(int thr_id) { cudaFree(d_outputHashes[thr_id]); cudaFree(d_resultNonces[thr_id]); } __host__ void myriadgroestl_cpu_setBlock(int thr_id, void *data, uint32_t *pTargetIn) { uint32_t msgBlock[32] = { 0 }; memcpy(&msgBlock[0], data, 80); msgBlock[20] = 0x80; msgBlock[31] = 0x01000000; cudaMemcpyToSymbol(myriadgroestl_gpu_msg, msgBlock, 128); cudaMemcpyToSymbol(pTarget, &pTargetIn[6], 2 * sizeof(uint32_t)); } __host__ void myriadgroestl_cpu_hash(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *resNounce) { uint32_t threadsperblock = 256; cudaMemset(d_resultNonces[thr_id], 0xFF, 2 * sizeof(uint32_t)); // Compute 3.0 benutzt die registeroptimierte Quad Variante mit Warp Shuffle // mit den Quad Funktionen brauchen wir jetzt 4 threads pro Hash, daher Faktor 4 bei der Blockzahl const int factor = 4; dim3 grid(factor*((threads + threadsperblock-1)/threadsperblock)); dim3 block(threadsperblock); int dev_id = device_map[thr_id]; if (device_sm[dev_id] < 300 || cuda_arch[dev_id] < 300) { printf("Sorry, This algo is not supported by this GPU arch (SM 3.0 required)"); return; } myriadgroestl_gpu_hash_quad <<< grid, block >>> (threads, startNounce, d_outputHashes[thr_id]); dim3 grid2((threads + threadsperblock-1)/threadsperblock); myriadgroestl_gpu_hash_sha <<< grid2, block >>> (threads, startNounce, d_outputHashes[thr_id], d_resultNonces[thr_id]); cudaMemcpy(resNounce, d_resultNonces[thr_id], 2 * sizeof(uint32_t), cudaMemcpyDeviceToHost); }
the_stack
* \test Tests vector operations (BLAS level 1) for floating point arithmetic. **/ // // *** System // #include <iostream> #include <iomanip> #include <cmath> // // *** ViennaCL // //#define VIENNACL_DEBUG_ALL #include "viennacl/vector.hpp" #include "viennacl/vector_proxy.hpp" #include "viennacl/linalg/inner_prod.hpp" #include "viennacl/linalg/norm_1.hpp" #include "viennacl/linalg/norm_2.hpp" #include "viennacl/linalg/norm_inf.hpp" #include "viennacl/linalg/maxmin.hpp" #include "viennacl/linalg/sum.hpp" #include "viennacl/tools/random.hpp" /* Inject a couple of functions into std-namespace to make tests work with C++ 11 */ #if __cplusplus > 199711L namespace std { template<typename T> T exp10(T x) { return std::exp(x*T(2.302585092994045684017991454684364207601101488628772976033)); } template<typename T> T rsqrt(T x) { return std::pow(x, T(-0.5)); } template<typename T> T sign(T x) { return (x > T(0)) ? T(1) : (x < T(0) ? T(-1) : T(0)); } } #endif template<typename NumericT> class vector_proxy { public: vector_proxy(NumericT * p_values, std::size_t start_idx, std::size_t increment, std::size_t num_elements) : values_(p_values), start_(start_idx), inc_(increment), size_(num_elements) {} NumericT const & operator[](std::size_t index) const { return values_[start_ + index * inc_]; } NumericT & operator[](std::size_t index) { return values_[start_ + index * inc_]; } std::size_t size() const { return size_; } private: NumericT * values_; std::size_t start_; std::size_t inc_; std::size_t size_; }; template<typename NumericT> void proxy_copy(vector_proxy<NumericT> const & host_vec, viennacl::vector_base<NumericT> & vcl_vec) { std::vector<NumericT> std_vec(host_vec.size()); for (std::size_t i=0; i<host_vec.size(); ++i) std_vec[i] = host_vec[i]; viennacl::copy(std_vec.begin(), std_vec.end(), vcl_vec.begin()); } template<typename NumericT> void proxy_copy(viennacl::vector_base<NumericT> const & vcl_vec, vector_proxy<NumericT> & host_vec) { std::vector<NumericT> std_vec(vcl_vec.size()); viennacl::copy(vcl_vec.begin(), vcl_vec.end(), std_vec.begin()); for (std::size_t i=0; i<host_vec.size(); ++i) host_vec[i] = std_vec[i]; } // // ------------------------------------------------------------- // template<typename ScalarType> ScalarType diff(ScalarType const & s1, ScalarType const & s2) { viennacl::backend::finish(); if (std::fabs(s1 - s2) > 0 ) return (s1 - s2) / std::max(std::fabs(s1), std::fabs(s2)); return 0; } // // ------------------------------------------------------------- // template<typename ScalarType> ScalarType diff(ScalarType const & s1, viennacl::scalar<ScalarType> const & s2) { viennacl::backend::finish(); if (std::fabs(s1 - s2) > 0 ) return (s1 - s2) / std::max(std::fabs(s1), std::fabs(s2)); return 0; } // // ------------------------------------------------------------- // template<typename ScalarType> ScalarType diff(ScalarType const & s1, viennacl::entry_proxy<ScalarType> const & s2) { viennacl::backend::finish(); if (std::fabs(s1 - s2) > 0 ) return (s1 - s2) / std::max(std::fabs(s1), std::fabs(s2)); return 0; } // // ------------------------------------------------------------- // template<typename ScalarType, typename ViennaCLVectorType> ScalarType diff(vector_proxy<ScalarType> const & v1, ViennaCLVectorType const & vcl_vec) { std::vector<ScalarType> v2_cpu(vcl_vec.size()); viennacl::backend::finish(); viennacl::copy(vcl_vec, v2_cpu); for (unsigned int i=0;i<v1.size(); ++i) { if ( std::max( std::fabs(v2_cpu[i]), std::fabs(v1[i]) ) > 0 ) v2_cpu[i] = std::fabs(v2_cpu[i] - v1[i]) / std::max( std::fabs(v2_cpu[i]), std::fabs(v1[i]) ); else v2_cpu[i] = 0.0; } ScalarType ret = 0; for (std::size_t i=0; i<v2_cpu.size(); ++i) ret = std::max(ret, std::fabs(v2_cpu[i])); return ret; } template<typename T1, typename T2> int check(T1 const & t1, T2 const & t2, double epsilon) { int retval = EXIT_SUCCESS; double temp = std::fabs(diff(t1, t2)); if (temp > epsilon) { std::cout << "# Error! Relative difference: " << temp << std::endl; retval = EXIT_FAILURE; } return retval; } // // ------------------------------------------------------------- // template< typename NumericT, typename Epsilon, typename HostVectorType, typename ViennaCLVectorType1, typename ViennaCLVectorType2 > int test(Epsilon const& epsilon, HostVectorType & host_v1, HostVectorType & host_v2, ViennaCLVectorType1 & vcl_v1, ViennaCLVectorType2 & vcl_v2) { int retval = EXIT_SUCCESS; viennacl::tools::uniform_random_numbers<NumericT> randomNumber; NumericT cpu_result = NumericT(42.0); viennacl::scalar<NumericT> gpu_result = NumericT(43.0); // // Initializer: // std::cout << "Checking for zero_vector initializer..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = NumericT(0); vcl_v1 = viennacl::zero_vector<NumericT>(vcl_v1.size()); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Checking for scalar_vector initializer..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = NumericT(cpu_result); vcl_v1 = viennacl::scalar_vector<NumericT>(vcl_v1.size(), cpu_result); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = NumericT(gpu_result); vcl_v1 = viennacl::scalar_vector<NumericT>(vcl_v1.size(), gpu_result); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Checking for unit_vector initializer..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = NumericT(0); host_v1[5] = NumericT(1); vcl_v1 = viennacl::unit_vector<NumericT>(vcl_v1.size(), 5); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i<host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(1.0) + randomNumber(); } proxy_copy(host_v1, vcl_v1); //resync proxy_copy(host_v2, vcl_v2); std::cout << "Checking for successful copy..." << std::endl; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(host_v2, vcl_v2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // Part 1: Norms and inner product // // -------------------------------------------------------------------------- std::cout << "Testing inner_prod..." << std::endl; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += host_v1[i] * host_v2[i]; NumericT cpu_result2 = viennacl::linalg::inner_prod(vcl_v1, vcl_v2); gpu_result = viennacl::linalg::inner_prod(vcl_v1, vcl_v2); std::cout << "Reference: " << cpu_result << std::endl; std::cout << cpu_result2 << std::endl; std::cout << gpu_result << std::endl; if (check(cpu_result, cpu_result2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += (host_v1[i] + host_v2[i]) * (host_v2[i] - host_v1[i]); NumericT cpu_result3 = viennacl::linalg::inner_prod(vcl_v1 + vcl_v2, vcl_v2 - vcl_v1); gpu_result = viennacl::linalg::inner_prod(vcl_v1 + vcl_v2, vcl_v2 - vcl_v1); std::cout << "Reference: " << cpu_result << std::endl; std::cout << cpu_result3 << std::endl; std::cout << gpu_result << std::endl; if (check(cpu_result, cpu_result3, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing norm_1..." << std::endl; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += std::fabs(host_v1[i]); gpu_result = viennacl::linalg::norm_1(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; gpu_result = 2 * cpu_result; //reset cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += std::fabs(host_v1[i]); gpu_result = cpu_result; cpu_result = 0; cpu_result = viennacl::linalg::norm_1(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += std::fabs(host_v1[i] + host_v2[i]); gpu_result = cpu_result; cpu_result = 0; cpu_result = viennacl::linalg::norm_1(vcl_v1 + vcl_v2); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing norm_2..." << std::endl; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += host_v1[i] * host_v1[i]; cpu_result = std::sqrt(cpu_result); gpu_result = viennacl::linalg::norm_2(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; gpu_result = 2 * cpu_result; //reset cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += host_v1[i] * host_v1[i]; gpu_result = std::sqrt(cpu_result); cpu_result = viennacl::linalg::norm_2(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += (host_v1[i] + host_v2[i]) * (host_v1[i] + host_v2[i]); gpu_result = std::sqrt(cpu_result); cpu_result = viennacl::linalg::norm_2(vcl_v1 + vcl_v2); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing norm_inf..." << std::endl; cpu_result = std::fabs(host_v1[0]); for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::max(std::fabs(host_v1[i]), cpu_result); gpu_result = viennacl::linalg::norm_inf(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; gpu_result = 2 * cpu_result; //reset cpu_result = std::fabs(host_v1[0]); for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::max(std::fabs(host_v1[i]), cpu_result); gpu_result = cpu_result; cpu_result = 0; cpu_result = viennacl::linalg::norm_inf(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = std::fabs(host_v1[0]); for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::max(std::fabs(host_v1[i] + host_v2[i]), cpu_result); gpu_result = cpu_result; cpu_result = 0; cpu_result = viennacl::linalg::norm_inf(vcl_v1 + vcl_v2); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing index_norm_inf..." << std::endl; std::size_t cpu_index = 0; cpu_result = std::fabs(host_v1[0]); for (std::size_t i=0; i<host_v1.size(); ++i) { if (std::fabs(host_v1[i]) > cpu_result) { cpu_result = std::fabs(host_v1[i]); cpu_index = i; } } std::size_t gpu_index = viennacl::linalg::index_norm_inf(vcl_v1); if (check(static_cast<NumericT>(cpu_index), static_cast<NumericT>(gpu_index), epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- cpu_result = host_v1[cpu_index]; gpu_result = vcl_v1[viennacl::linalg::index_norm_inf(vcl_v1)]; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = std::fabs(host_v1[0] + host_v2[0]); for (std::size_t i=0; i<host_v1.size(); ++i) { if (std::fabs(host_v1[i] + host_v2[i]) > cpu_result) { cpu_result = std::fabs(host_v1[i] + host_v2[i]); cpu_index = i; } } cpu_result = host_v1[cpu_index]; gpu_result = vcl_v1[viennacl::linalg::index_norm_inf(vcl_v1 + vcl_v2)]; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing max..." << std::endl; cpu_result = host_v1[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::max<NumericT>(cpu_result, host_v1[i]); gpu_result = viennacl::linalg::max(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = host_v1[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::max<NumericT>(cpu_result, host_v1[i]); gpu_result = cpu_result; cpu_result *= 2; //reset cpu_result = viennacl::linalg::max(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = host_v1[0] + host_v2[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::max<NumericT>(cpu_result, host_v1[i] + host_v2[i]); gpu_result = cpu_result; cpu_result *= 2; //reset cpu_result = viennacl::linalg::max(vcl_v1 + vcl_v2); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing min..." << std::endl; cpu_result = host_v1[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::min<NumericT>(cpu_result, host_v1[i]); gpu_result = viennacl::linalg::min(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = host_v1[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::min<NumericT>(cpu_result, host_v1[i]); gpu_result = cpu_result; cpu_result *= 2; //reset cpu_result = viennacl::linalg::min(vcl_v1); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = host_v1[0] + host_v2[0]; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result = std::min<NumericT>(cpu_result, host_v1[i] + host_v2[i]); gpu_result = cpu_result; cpu_result *= 2; //reset cpu_result = viennacl::linalg::min(vcl_v1 + vcl_v2); if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing sum..." << std::endl; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += host_v1[i]; cpu_result2 = viennacl::linalg::sum(vcl_v1); gpu_result = viennacl::linalg::sum(vcl_v1); if (check(cpu_result, cpu_result2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; cpu_result = 0; for (std::size_t i=0; i<host_v1.size(); ++i) cpu_result += host_v1[i] + host_v2[i]; cpu_result3 = viennacl::linalg::sum(vcl_v1 + vcl_v2); gpu_result = viennacl::linalg::sum(vcl_v1 + vcl_v2); if (check(cpu_result, cpu_result3, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(cpu_result, gpu_result, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // Plane rotation and assignments // // -------------------------------------------------------------------------- for (std::size_t i=0; i<host_v1.size(); ++i) { NumericT temp = NumericT(1.1) * host_v1[i] + NumericT(2.3) * host_v2[i]; host_v2[i] = - NumericT(2.3) * host_v1[i] + NumericT(1.1) * host_v2[i]; host_v1[i] = temp; } viennacl::linalg::plane_rotation(vcl_v1, vcl_v2, NumericT(1.1), NumericT(2.3)); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(host_v2, vcl_v2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- std::cout << "Testing assignments..." << std::endl; NumericT val = static_cast<NumericT>(1e-1); for (size_t i=0; i < host_v1.size(); ++i) host_v1[i] = val; for (size_t i=0; i < vcl_v1.size(); ++i) vcl_v1(i) = val; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing assignments via iterators..." << std::endl; host_v1[2] = static_cast<NumericT>(1.9); vcl_v1[2] = static_cast<NumericT>(1.9); host_v1[2] = static_cast<NumericT>(1.5); typename ViennaCLVectorType1::iterator vcl_v1_it = vcl_v1.begin(); ++vcl_v1_it; ++vcl_v1_it; *vcl_v1_it = static_cast<NumericT>(1.5); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // multiplication and division of vectors by scalars // for (std::size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); //resync proxy_copy(host_v2, vcl_v2); std::cout << "Testing scaling with CPU scalar..." << std::endl; NumericT alpha = static_cast<NumericT>(1.7182); viennacl::scalar<NumericT> gpu_alpha = alpha; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] *= NumericT(long(alpha)); vcl_v1 *= long(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] *= NumericT(float(alpha)); vcl_v1 *= float(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] *= NumericT(double(alpha)); vcl_v1 *= double(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing scaling with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] *= alpha; vcl_v1 *= gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing scaling with scalar expression..." << std::endl; cpu_result = 0; for (std::size_t i=0; i < host_v1.size(); ++i) cpu_result += host_v1[i] * host_v2[i]; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] *= cpu_result; vcl_v1 *= viennacl::linalg::inner_prod(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; NumericT beta = static_cast<NumericT>(1.4153); viennacl::scalar<NumericT> gpu_beta = beta; std::cout << "Testing shrinking with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] /= NumericT(long(beta)); vcl_v1 /= long(beta); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] /= NumericT(float(beta)); vcl_v1 /= float(beta); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] /= NumericT(double(beta)); vcl_v1 /= double(beta); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing shrinking with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] /= beta; vcl_v1 /= gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // add and inplace_add of vectors // for (size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); //resync proxy_copy(host_v2, vcl_v2); std::cout << "Testing add on vector..." << std::endl; std::cout << "Checking for successful copy..." << std::endl; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(host_v2, vcl_v2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i]; vcl_v1 = vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing add on vector with flipsign..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) host_v1[i] = - host_v1[i] + host_v2[i]; vcl_v1 = - vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace-add on vector..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v2[i]; vcl_v1 += vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing assignment to vector with vector multiplied by scalar expression..." << std::endl; cpu_result = 0; for (std::size_t i=0; i < host_v1.size(); ++i) cpu_result += host_v1[i] * host_v2[i]; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = cpu_result * host_v2[i]; //host_v1 = inner_prod(host_v1, host_v2) * host_v2; vcl_v1 = viennacl::linalg::inner_prod(vcl_v1, vcl_v2) * vcl_v2; // // subtract and inplace_subtract of vectors // std::cout << "Testing sub on vector..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - host_v2[i]; vcl_v1 = vcl_v1 - vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace-sub on vector..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v2[i]; vcl_v1 -= vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // multiply-add // std::cout << "Testing multiply-add on vector with CPU scalar (right)..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] * NumericT(float(alpha)); vcl_v1 = vcl_v1 + vcl_v2 * float(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] * NumericT(double(alpha)); vcl_v1 = vcl_v1 + vcl_v2 * double(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-add on vector with CPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(long(alpha)) * host_v1[i] + host_v2[i]; vcl_v1 = long(alpha) * vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(float(alpha)) * host_v1[i] + host_v2[i]; vcl_v1 = float(alpha) * vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(double(alpha)) * host_v1[i] + host_v2[i]; vcl_v1 = double(alpha) * vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-add on vector with CPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(long(alpha)) * host_v1[i] + NumericT(long(beta)) * host_v2[i]; vcl_v1 = long(alpha) * vcl_v1 + long(beta) * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(float(alpha)) * host_v1[i] + NumericT(float(beta)) * host_v2[i]; vcl_v1 = float(alpha) * vcl_v1 + float(beta) * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = NumericT(double(alpha)) * host_v1[i] + NumericT(double(beta)) * host_v2[i]; vcl_v1 = double(alpha) * vcl_v1 + double(beta) * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-add on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v2[i] * NumericT(long(alpha)); vcl_v1 += vcl_v2 * long(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v2[i] * NumericT(float(alpha)); vcl_v1 += vcl_v2 * float(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += NumericT(double(alpha)) * host_v2[i]; vcl_v1 += double(alpha) * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-add on vector with GPU scalar (right)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + alpha * host_v2[i]; vcl_v1 = vcl_v1 + gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-add on vector with GPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + alpha * host_v2[i]; vcl_v1 = vcl_v1 + gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-add on vector with GPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = alpha * host_v1[i] + beta * host_v2[i]; vcl_v1 = gpu_alpha * vcl_v1 + gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-add on vector with GPU scalar (both, adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += alpha * host_v1[i] + beta * host_v2[i]; vcl_v1 += gpu_alpha * vcl_v1 + gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-add on vector with GPU scalar (both, subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += alpha * host_v1[i] - beta * host_v2[i]; vcl_v1 += gpu_alpha * vcl_v1 - gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-add on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += alpha * host_v2[i]; vcl_v1 += gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // division-add // std::cout << "Testing division-add on vector with CPU scalar (right)..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] / NumericT(long(alpha)); vcl_v1 = vcl_v1 + vcl_v2 / long(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] / NumericT(float(alpha)); vcl_v1 = vcl_v1 + vcl_v2 / float(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] / NumericT(double(alpha)); vcl_v1 = vcl_v1 + vcl_v2 / double(alpha); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-add on vector with CPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / NumericT(float(alpha)) + host_v2[i]; vcl_v1 = vcl_v1 / float(alpha) + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / NumericT(double(alpha)) + host_v2[i]; vcl_v1 = vcl_v1 / double(alpha) + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-add on vector with CPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / NumericT(float(alpha)) + host_v2[i] / NumericT(float(beta)); vcl_v1 = vcl_v1 / float(alpha) + vcl_v2 / float(beta); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / NumericT(double(alpha)) + host_v2[i] / NumericT(double(beta)); vcl_v1 = vcl_v1 / double(alpha) + vcl_v2 / double(beta); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-multiply-add on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha + host_v2[i] * beta; vcl_v1 = vcl_v1 / alpha + vcl_v2 * beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-division-add on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] * alpha + host_v2[i] / beta; vcl_v1 = vcl_v1 * alpha + vcl_v2 / beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-add on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v2[i] / alpha; vcl_v1 += vcl_v2 / alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-add on vector with GPU scalar (right)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] / alpha; vcl_v1 = vcl_v1 + vcl_v2 / gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-add on vector with GPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] + host_v2[i] / alpha; vcl_v1 = vcl_v1 + vcl_v2 / gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-add on vector with GPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha + host_v2[i] / beta; vcl_v1 = vcl_v1 / gpu_alpha + vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-add on vector with GPU scalar (both, adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] / alpha + host_v2[i] / beta; vcl_v1 += vcl_v1 / gpu_alpha + vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-add on vector with GPU scalar (both, subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] / alpha - host_v2[i] / beta; vcl_v1 += vcl_v1 / gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-multiply-add on vector with GPU scalar (adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] / alpha + host_v2[i] * beta; vcl_v1 += vcl_v1 / gpu_alpha + vcl_v2 * gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-division-add on vector with GPU scalar (subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] * alpha - host_v2[i] / beta; vcl_v1 += vcl_v1 * gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-add on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v2[i] * alpha; vcl_v1 += vcl_v2 * gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // multiply-subtract // std::cout << "Testing multiply-subtract on vector with CPU scalar (right)..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - alpha * host_v2[i]; vcl_v1 = vcl_v1 - alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-subtract on vector with CPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = alpha * host_v1[i] - host_v2[i]; vcl_v1 = alpha * vcl_v1 - vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-subtract on vector with CPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = alpha * host_v1[i] - beta * host_v2[i]; vcl_v1 = alpha * vcl_v1 - beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-subtract on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= alpha * host_v2[i]; vcl_v1 -= alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-subtract on vector with GPU scalar (right)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - alpha * host_v2[i]; vcl_v1 = vcl_v1 - gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-subtract on vector with GPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - alpha * host_v2[i]; vcl_v1 = vcl_v1 - gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-subtract on vector with GPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = alpha * host_v1[i] - beta * host_v2[i]; vcl_v1 = gpu_alpha * vcl_v1 - gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-subtract on vector with GPU scalar (both, adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= alpha * host_v1[i] + beta * host_v2[i]; vcl_v1 -= gpu_alpha * vcl_v1 + gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-subtract on vector with GPU scalar (both, subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= alpha * host_v1[i] - beta * host_v2[i]; vcl_v1 -= gpu_alpha * vcl_v1 - gpu_beta * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-subtract on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= alpha * host_v2[i]; vcl_v1 -= gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // division-subtract // std::cout << "Testing division-subtract on vector with CPU scalar (right)..." << std::endl; for (size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - host_v2[i] / alpha; vcl_v1 = vcl_v1 - vcl_v2 / alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-subtract on vector with CPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha - host_v2[i]; vcl_v1 = vcl_v1 / alpha - vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-subtract on vector with CPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha - host_v2[i] / alpha; vcl_v1 = vcl_v1 / alpha - vcl_v2 / alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-subtract on vector with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v2[i] / alpha; vcl_v1 -= vcl_v2 / alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-subtract on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v2[i] / alpha; vcl_v1 -= vcl_v2 / gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-subtract on vector with GPU scalar (right)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - host_v2[i] / alpha; vcl_v1 = vcl_v1 - vcl_v2 / gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-subtract on vector with GPU scalar (left)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] - host_v2[i] / alpha; vcl_v1 = vcl_v1 - vcl_v2 / gpu_alpha; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-subtract on vector with GPU scalar (both)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha - host_v2[i] / beta; vcl_v1 = vcl_v1 / gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-subtract on vector with GPU scalar (both, adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / alpha + host_v2[i] / beta; vcl_v1 -= vcl_v1 / gpu_alpha + vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-subtract on vector with GPU scalar (both, subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / alpha - host_v2[i] / beta; vcl_v1 -= vcl_v1 / gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing multiply-division-subtract on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] * alpha - host_v2[i] / beta; vcl_v1 = vcl_v1 * gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing division-multiply-subtract on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / alpha - host_v2[i] * beta; vcl_v1 = vcl_v1 / gpu_alpha - vcl_v2 * gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-division-subtract on vector with GPU scalar (adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] * alpha + host_v2[i] / beta; vcl_v1 -= vcl_v1 * gpu_alpha + vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-multiply-subtract on vector with GPU scalar (adding)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / alpha + host_v2[i] * beta; vcl_v1 -= vcl_v1 / gpu_alpha + vcl_v2 * gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace multiply-division-subtract on vector with GPU scalar (subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] * alpha - host_v2[i] / beta; vcl_v1 -= vcl_v1 * gpu_alpha - vcl_v2 / gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-multiply-subtract on vector with GPU scalar (subtracting)..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / alpha - host_v2[i] * beta; vcl_v1 -= vcl_v1 / gpu_alpha - vcl_v2 * gpu_beta; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing inplace division-subtract on vector with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= alpha * host_v2[i]; vcl_v1 -= gpu_alpha * vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // More complicated expressions (for ensuring the operator overloads work correctly) // for (std::size_t i=0; i < host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(3.1415) * host_v1[i]; } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); std::cout << "Testing three vector additions..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v2[i] + host_v1[i] + host_v2[i]; vcl_v1 = vcl_v2 + vcl_v1 + vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); std::cout << "Testing complicated vector expression with CPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = beta * (host_v1[i] - alpha * host_v2[i]); vcl_v1 = beta * (vcl_v1 - alpha * vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing complicated vector expression with GPU scalar..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = beta * (host_v1[i] - alpha * host_v2[i]); vcl_v1 = gpu_beta * (vcl_v1 - gpu_alpha * vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- for (std::size_t i=0; i < host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); std::cout << "Testing swap..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) { NumericT temp = host_v1[i]; host_v1[i] = host_v2[i]; host_v2[i] = temp; } swap(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- for (std::size_t i=0; i<host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(5.0) + randomNumber(); } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); std::cout << "Testing unary operator-..." << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = - host_v2[i]; vcl_v1 = - vcl_v2; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing elementwise multiplication..." << std::endl; std::cout << " v1 = element_prod(v1, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] * host_v2[i]; vcl_v1 = viennacl::linalg::element_prod(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 += element_prod(v1, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] * host_v2[i]; vcl_v1 += viennacl::linalg::element_prod(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 -= element_prod(v1, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] * host_v2[i]; vcl_v1 -= viennacl::linalg::element_prod(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// std::cout << " v1 = element_prod(v1 + v2, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = (host_v1[i] + host_v2[i]) * host_v2[i]; vcl_v1 = viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 += element_prod(v1 + v2, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += (host_v1[i] + host_v2[i]) * host_v2[i]; vcl_v1 += viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 -= element_prod(v1 + v2, v2);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= (host_v1[i] + host_v2[i]) * host_v2[i]; vcl_v1 -= viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// std::cout << " v1 = element_prod(v1, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] * (host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_prod(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 += element_prod(v1, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] * (host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_prod(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 -= element_prod(v1, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] * (host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_prod(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// std::cout << " v1 = element_prod(v1 + v2, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = (host_v1[i] + host_v2[i]) * (host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 += element_prod(v1 + v2, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += (host_v1[i] + host_v2[i]) * (host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " v1 -= element_prod(v1 + v2, v2 + v1);" << std::endl; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= (host_v1[i] + host_v2[i]) * (host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_prod(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing elementwise division..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) { host_v1[i] = NumericT(1.0) + randomNumber(); host_v2[i] = NumericT(5.0) + randomNumber(); } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / host_v2[i]; vcl_v1 = viennacl::linalg::element_div(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] / host_v2[i]; vcl_v1 += viennacl::linalg::element_div(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / host_v2[i]; vcl_v1 -= viennacl::linalg::element_div(vcl_v1, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = (host_v1[i] + host_v2[i]) / host_v2[i]; vcl_v1 = viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += (host_v1[i] + host_v2[i]) / host_v2[i]; vcl_v1 += viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= (host_v1[i] + host_v2[i]) / host_v2[i]; vcl_v1 -= viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = host_v1[i] / (host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_div(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += host_v1[i] / (host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_div(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= host_v1[i] / (host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_div(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; /////// for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] = (host_v1[i] + host_v2[i]) / (host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] += (host_v1[i] + host_v2[i]) / (host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; for (std::size_t i=0; i < host_v1.size(); ++i) host_v1[i] -= (host_v1[i] + host_v2[i]) / (host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_div(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing elementwise power function..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) { host_v1[i] = NumericT(1.1) + NumericT(0.5) * randomNumber(); host_v2[i] = NumericT(1.1) + NumericT(0.5) * randomNumber(); } std::vector<NumericT> std_v3(host_v1.size()); vector_proxy<NumericT> host_v3(&std_v3[0], 0, 1, host_v1.size()); proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = std::pow(host_v1[i], host_v2[i]); vcl_v1 = viennacl::linalg::element_pow(vcl_v1, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 = pow(v1, v2);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] += std::pow(host_v1[i], host_v2[i]); vcl_v1 += viennacl::linalg::element_pow(vcl_v1, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 += pow(v1, v2);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] -= std::pow(host_v1[i], host_v2[i]); vcl_v1 -= viennacl::linalg::element_pow(vcl_v1, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 -= pow(v1, v2);" << std::endl; return EXIT_FAILURE; } /////// proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = std::pow(host_v1[i] + host_v2[i], host_v2[i]); vcl_v1 = viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 = pow(v1 + v2, v2);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] += std::pow(host_v1[i] + host_v2[i], host_v2[i]); vcl_v1 += viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 += pow(v1 + v2, v2);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] -= std::pow(host_v1[i] + host_v2[i], host_v2[i]); vcl_v1 -= viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 -= pow(v1 + v2, v2);" << std::endl; return EXIT_FAILURE; } /////// proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = std::pow(host_v1[i], host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_pow(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 = pow(v1, v2 + v1);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] += std::pow(host_v1[i], host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_pow(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 += pow(v1, v2 + v1);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] -= std::pow(host_v1[i], host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_pow(vcl_v1, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 -= pow(v1, v2 + v1);" << std::endl; return EXIT_FAILURE; } /////// proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = std::pow(host_v1[i] + host_v2[i], host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 = pow(v1 + v2, v2 + v1);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] += std::pow(host_v1[i] + host_v2[i], host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 += pow(v1 + v2, v2 + v1);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] -= std::pow(host_v1[i] + host_v2[i], host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_pow(vcl_v1 + vcl_v2, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 -= pow(v1 + v2, v2 + v1);" << std::endl; return EXIT_FAILURE; } std::cout << "Testing elementwise power function with alpha on the left..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) { host_v1[i] = NumericT(1.1) + NumericT(0.5) * randomNumber(); host_v2[i] = NumericT(1.1) + NumericT(0.5) * randomNumber(); } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = std::pow(alpha, host_v2[i]); vcl_v1 = viennacl::linalg::element_pow(alpha, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 = pow(alpha, v2);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] += std::pow(alpha, host_v2[i]); vcl_v1 += viennacl::linalg::element_pow(alpha, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 += pow(alpha, v2);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] -= std::pow(alpha, host_v2[i]); vcl_v1 -= viennacl::linalg::element_pow(alpha, vcl_v2); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 -= pow(alpha, v2);" << std::endl; return EXIT_FAILURE; } /////// proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = std::pow(alpha, host_v2[i] + host_v1[i]); vcl_v1 = viennacl::linalg::element_pow(alpha, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 = pow(alpha, v2 + v1);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] += std::pow(alpha, host_v2[i] + host_v1[i]); vcl_v1 += viennacl::linalg::element_pow(alpha, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 += pow(alpha, v2 + v1);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] -= std::pow(alpha, host_v2[i] + host_v1[i]); vcl_v1 -= viennacl::linalg::element_pow(alpha, vcl_v2 + vcl_v1); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 -= pow(alpha, v2 + v1);" << std::endl; return EXIT_FAILURE; } std::cout << "Testing elementwise power function with alpha on the right..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) { host_v1[i] = NumericT(1.1) + NumericT(0.5) * randomNumber(); host_v2[i] = NumericT(1.1) + NumericT(0.5) * randomNumber(); } proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = std::pow(host_v2[i], alpha); vcl_v1 = viennacl::linalg::element_pow(vcl_v2, alpha); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 = pow(v2, alpha);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] += std::pow(host_v2[i], alpha); vcl_v1 += viennacl::linalg::element_pow(vcl_v2, alpha); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 += pow(v2, alpha);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] -= std::pow(host_v2[i], alpha); vcl_v1 -= viennacl::linalg::element_pow(vcl_v2, alpha); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 -= pow(v2, alpha);" << std::endl; return EXIT_FAILURE; } /////// proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = std::pow(host_v2[i] + host_v1[i], alpha); vcl_v1 = viennacl::linalg::element_pow(vcl_v2 + vcl_v1, alpha); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 = pow(v2 + v1, alpha);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] += std::pow(host_v2[i] + host_v1[i], alpha); vcl_v1 += viennacl::linalg::element_pow(vcl_v2 + vcl_v1, alpha); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 += pow(v2 + v1, alpha);" << std::endl; return EXIT_FAILURE; } proxy_copy(host_v1, vcl_v1); for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] = host_v1[i]; for (std::size_t i=0; i<host_v3.size(); ++i) host_v3[i] -= std::pow(host_v2[i] + host_v1[i], alpha); vcl_v1 -= viennacl::linalg::element_pow(vcl_v2 + vcl_v1, alpha); if (check(host_v3, vcl_v1, epsilon) != EXIT_SUCCESS) { std::cerr << "** Failure in v1 -= pow(v2 + v1, alpha);" << std::endl; return EXIT_FAILURE; } std::cout << "Testing unary elementwise operations..." << std::endl; #define GENERATE_UNARY_OP_TEST(FUNCNAME) \ for (std::size_t i=0; i < host_v1.size(); ++i) \ host_v1[i] = NumericT(0.01) + randomNumber() / NumericT(8); \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v2[i] = NumericT(3.1415) * host_v1[i]; \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v3[i] = host_v1[i]; \ proxy_copy(host_v1, vcl_v1); \ proxy_copy(host_v2, vcl_v2); \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] = std::FUNCNAME(host_v2[i]); \ vcl_v1 = viennacl::linalg::element_##FUNCNAME(vcl_v2); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 = " << #FUNCNAME << "(v2)" << std::endl; \ return EXIT_FAILURE; \ } \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] = host_v3[i]; \ proxy_copy(host_v3, vcl_v1); \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] = std::FUNCNAME(host_v1[i] + host_v2[i]); \ vcl_v1 = viennacl::linalg::element_##FUNCNAME(vcl_v1 + vcl_v2); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 = " << #FUNCNAME << "(v1 + v2)" << std::endl; \ return EXIT_FAILURE; \ } \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] = host_v3[i]; \ proxy_copy(host_v3, vcl_v1); \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] += std::FUNCNAME(host_v1[i]); \ vcl_v1 += viennacl::linalg::element_##FUNCNAME(vcl_v1); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 += " << #FUNCNAME << "(v2)" << std::endl; \ return EXIT_FAILURE; \ } \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] = host_v3[i]; \ proxy_copy(host_v3, vcl_v1); \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] += std::FUNCNAME(host_v1[i] + host_v2[i]); \ vcl_v1 += viennacl::linalg::element_##FUNCNAME(vcl_v1 + vcl_v2); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 += " << #FUNCNAME << "(v1 + v2)" << std::endl; \ return EXIT_FAILURE; \ } \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] = host_v3[i]; \ proxy_copy(host_v3, vcl_v1); \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] -= std::FUNCNAME(host_v2[i]); \ vcl_v1 -= viennacl::linalg::element_##FUNCNAME(vcl_v2); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 -= " << #FUNCNAME << "(v2)" << std::endl; \ return EXIT_FAILURE; \ } \ \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] = host_v3[i]; \ proxy_copy(host_v3, vcl_v1); \ for (std::size_t i=0; i<host_v1.size(); ++i) \ host_v1[i] -= std::FUNCNAME(host_v1[i] + host_v2[i]); \ vcl_v1 -= viennacl::linalg::element_##FUNCNAME(vcl_v1 + vcl_v2); \ \ if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) \ { \ std::cout << "Failure at v1 -= " << #FUNCNAME << "(v1 + v2)" << std::endl; \ return EXIT_FAILURE; \ } \ GENERATE_UNARY_OP_TEST(acos); GENERATE_UNARY_OP_TEST(asin); GENERATE_UNARY_OP_TEST(atan); GENERATE_UNARY_OP_TEST(cos); GENERATE_UNARY_OP_TEST(cosh); GENERATE_UNARY_OP_TEST(exp); GENERATE_UNARY_OP_TEST(floor); GENERATE_UNARY_OP_TEST(fabs); GENERATE_UNARY_OP_TEST(log); GENERATE_UNARY_OP_TEST(log10); GENERATE_UNARY_OP_TEST(sin); GENERATE_UNARY_OP_TEST(sinh); GENERATE_UNARY_OP_TEST(fabs); //GENERATE_UNARY_OP_TEST(abs); //OpenCL allows abs on integers only GENERATE_UNARY_OP_TEST(sqrt); GENERATE_UNARY_OP_TEST(tan); GENERATE_UNARY_OP_TEST(tanh); #if __cplusplus > 199711L GENERATE_UNARY_OP_TEST(acosh); GENERATE_UNARY_OP_TEST(asinh); GENERATE_UNARY_OP_TEST(atanh); GENERATE_UNARY_OP_TEST(erf); GENERATE_UNARY_OP_TEST(erfc); GENERATE_UNARY_OP_TEST(exp2); GENERATE_UNARY_OP_TEST(exp10); GENERATE_UNARY_OP_TEST(log2); GENERATE_UNARY_OP_TEST(round); GENERATE_UNARY_OP_TEST(rsqrt); GENERATE_UNARY_OP_TEST(sign); GENERATE_UNARY_OP_TEST(trunc); #endif // -------------------------------------------------------------------------- for (std::size_t i=0; i<host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); std::cout << "Testing another complicated vector expression with CPU scalars..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = host_v2[i] / alpha + beta * (host_v1[i] - alpha*host_v2[i]); vcl_v1 = vcl_v2 / alpha + beta * (vcl_v1 - alpha*vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing another complicated vector expression with GPU scalars..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = host_v2[i] / alpha + beta * (host_v1[i] - alpha*host_v2[i]); vcl_v1 = vcl_v2 / gpu_alpha + gpu_beta * (vcl_v1 - gpu_alpha*vcl_v2); if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << "Testing lenghty sum of scaled vectors..." << std::endl; for (std::size_t i=0; i<host_v1.size(); ++i) host_v2[i] = NumericT(3.1415) * host_v1[i]; proxy_copy(host_v1, vcl_v1); proxy_copy(host_v2, vcl_v2); for (std::size_t i=0; i<host_v1.size(); ++i) host_v1[i] = host_v2[i] / alpha + beta * host_v1[i] - alpha * host_v2[i] + beta * host_v1[i] - alpha * host_v1[i]; vcl_v1 = vcl_v2 / gpu_alpha + gpu_beta * vcl_v1 - alpha * vcl_v2 + beta * vcl_v1 - alpha * vcl_v1; if (check(host_v1, vcl_v1, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // -------------------------------------------------------------------------- return retval; } template< typename NumericT, typename Epsilon > int test(Epsilon const& epsilon) { int retval = EXIT_SUCCESS; std::size_t size = 24656; viennacl::tools::uniform_random_numbers<NumericT> randomNumber; std::cout << "Running tests for vector of size " << size << std::endl; // // Set up host objects // std::vector<NumericT> std_full_vec(size); std::vector<NumericT> std_full_vec2(std_full_vec.size()); for (std::size_t i=0; i<std_full_vec.size(); ++i) { std_full_vec[i] = NumericT(1.0) + randomNumber(); std_full_vec2[i] = NumericT(1.0) + randomNumber(); } std::size_t r1_start = std_full_vec.size() / 4; std::size_t r1_stop = 2 * std_full_vec.size() / 4; std::size_t r2_start = 2 * std_full_vec2.size() / 4; std::size_t r2_stop = 3 * std_full_vec2.size() / 4; vector_proxy<NumericT> host_range_vec (&std_full_vec[0], r1_start, 1, r1_stop - r1_start); vector_proxy<NumericT> host_range_vec2(&std_full_vec2[0], r2_start, 1, r2_stop - r2_start); std::size_t s1_start = std_full_vec.size() / 4; std::size_t s1_inc = 3; std::size_t s1_size = std_full_vec.size() / 4; std::size_t s2_start = 2 * std_full_vec2.size() / 4; std::size_t s2_inc = 2; std::size_t s2_size = std_full_vec2.size() / 4; vector_proxy<NumericT> host_slice_vec (&std_full_vec[0], s1_start, s1_inc, s1_size); vector_proxy<NumericT> host_slice_vec2(&std_full_vec2[0], s2_start, s2_inc, s2_size); // // Set up ViennaCL objects // viennacl::vector<NumericT> vcl_full_vec(std_full_vec.size()); viennacl::vector<NumericT> vcl_full_vec2(std_full_vec2.size()); viennacl::fast_copy(std_full_vec.begin(), std_full_vec.end(), vcl_full_vec.begin()); viennacl::copy(std_full_vec2.begin(), std_full_vec2.end(), vcl_full_vec2.begin()); viennacl::range vcl_r1( vcl_full_vec.size() / 4, 2 * vcl_full_vec.size() / 4); viennacl::range vcl_r2(2 * vcl_full_vec2.size() / 4, 3 * vcl_full_vec2.size() / 4); viennacl::vector_range< viennacl::vector<NumericT> > vcl_range_vec(vcl_full_vec, vcl_r1); viennacl::vector_range< viennacl::vector<NumericT> > vcl_range_vec2(vcl_full_vec2, vcl_r2); { viennacl::vector<NumericT> vcl_short_vec(vcl_range_vec); viennacl::vector<NumericT> vcl_short_vec2 = vcl_range_vec2; std::vector<NumericT> std_short_vec(host_range_vec.size()); for (std::size_t i=0; i<std_short_vec.size(); ++i) std_short_vec[i] = host_range_vec[i]; vector_proxy<NumericT> host_short_vec(&std_short_vec[0], 0, 1, std_short_vec.size()); std::vector<NumericT> std_short_vec2(host_range_vec2.size()); for (std::size_t i=0; i<std_short_vec2.size(); ++i) std_short_vec2[i] = host_range_vec2[i]; vector_proxy<NumericT> host_short_vec2(&std_short_vec2[0], 0, 1, std_short_vec.size()); std::cout << "Testing creation of vectors from range..." << std::endl; if (check(host_short_vec, vcl_short_vec, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(host_short_vec2, vcl_short_vec2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; } viennacl::slice vcl_s1( vcl_full_vec.size() / 4, 3, vcl_full_vec.size() / 4); viennacl::slice vcl_s2(2 * vcl_full_vec2.size() / 4, 2, vcl_full_vec2.size() / 4); viennacl::vector_slice< viennacl::vector<NumericT> > vcl_slice_vec(vcl_full_vec, vcl_s1); viennacl::vector_slice< viennacl::vector<NumericT> > vcl_slice_vec2(vcl_full_vec2, vcl_s2); viennacl::vector<NumericT> vcl_short_vec(vcl_slice_vec); viennacl::vector<NumericT> vcl_short_vec2 = vcl_slice_vec2; std::vector<NumericT> std_short_vec(host_slice_vec.size()); for (std::size_t i=0; i<std_short_vec.size(); ++i) std_short_vec[i] = host_slice_vec[i]; vector_proxy<NumericT> host_short_vec(&std_short_vec[0], 0, 1, std_short_vec.size()); std::vector<NumericT> std_short_vec2(host_slice_vec2.size()); for (std::size_t i=0; i<std_short_vec2.size(); ++i) std_short_vec2[i] = host_slice_vec2[i]; vector_proxy<NumericT> host_short_vec2(&std_short_vec2[0], 0, 1, std_short_vec.size()); std::cout << "Testing creation of vectors from slice..." << std::endl; if (check(host_short_vec, vcl_short_vec, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; if (check(host_short_vec2, vcl_short_vec2, epsilon) != EXIT_SUCCESS) return EXIT_FAILURE; // // Now start running tests for vectors, ranges and slices: // std::cout << " ** vcl_v1 = vector, vcl_v2 = vector **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_short_vec, vcl_short_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = vector, vcl_v2 = range **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_short_vec, vcl_range_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = vector, vcl_v2 = slice **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_short_vec, vcl_slice_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; /////// std::cout << " ** vcl_v1 = range, vcl_v2 = vector **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_range_vec, vcl_short_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = range, vcl_v2 = range **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_range_vec, vcl_range_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = range, vcl_v2 = slice **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_range_vec, vcl_slice_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; /////// std::cout << " ** vcl_v1 = slice, vcl_v2 = vector **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_slice_vec, vcl_short_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = slice, vcl_v2 = range **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_slice_vec, vcl_range_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; std::cout << " ** vcl_v1 = slice, vcl_v2 = slice **" << std::endl; retval = test<NumericT>(epsilon, host_short_vec, host_short_vec2, vcl_slice_vec, vcl_slice_vec2); if (retval != EXIT_SUCCESS) return EXIT_FAILURE; return EXIT_SUCCESS; } // // ------------------------------------------------------------- // int main() { std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << "## Test :: Vector" << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; int retval = EXIT_SUCCESS; std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; { typedef float NumericT; NumericT epsilon = static_cast<NumericT>(1.0E-2); std::cout << "# Testing setup:" << std::endl; std::cout << " eps: " << epsilon << std::endl; std::cout << " numeric: float" << std::endl; retval = test<NumericT>(epsilon); if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl; else return retval; } std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; #ifdef VIENNACL_WITH_OPENCL if ( viennacl::ocl::current_device().double_support() ) #endif { { typedef double NumericT; NumericT epsilon = 1.0E-10; std::cout << "# Testing setup:" << std::endl; std::cout << " eps: " << epsilon << std::endl; std::cout << " numeric: double" << std::endl; retval = test<NumericT>(epsilon); if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl; else return retval; } std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; } std::cout << std::endl; std::cout << "------- Test completed --------" << std::endl; std::cout << std::endl; return retval; }
the_stack
//#include "Tests.h" #include "Chunk.cuh" #include <bitset> #include <iostream> #include <cstdio> using namespace std; #ifdef __GPU namespace FFF{ /*------- CHUNK ------*/ /* * Chunk constant memory */ __constant__ idx_t p_Mod[max_nonzero_coefs_in_mod]; __constant__ idx_t p_ModLen; __constant__ Element element_mul; __constant__ Chunk c; /* * FFT Constant Memory */ __constant__ chunk_cell_t d_chunk_cell_mask[Chunk::log_elements_in_chunk+2]; __constant__ chunk_cell_t d_partition[1<<bits_in_byte]; __constant__ chunk_cell_t d_alter_mask[Chunk::log_elements_in_chunk+1]; __constant__ Chunk d_linear_mul[1]; __constant__ Chunk d_ilinear_mul[1]; #define DUP_ODD_BITS(cell) cell = ((cell) & 0x55555555) | (((cell)&0x55555555)<<1) #define DUP_EVEN_BITS(cell) cell = ((cell) & 0xaaaaaaaa) | (((cell)&0xaaaaaaaa)>>1) __device__ void a_chunkToNormal(Chunk *d_a, Elements_Chunk *d_b, idx_t idx) { cell_t ans = 0; idx_t element_idx = idx & andMask(Chunk::log_elements_in_chunk); idx_t cell_idx = idx >> Chunk::log_elements_in_chunk; for(unsigned int i = cell_idx<<Element::log_bits_in_cell ; i < ((cell_idx+1)<<Element::log_bits_in_cell); ++i) ans^=(((cell_t)(((d_a->v[i])>>(element_idx))&1))<<(i-(cell_idx<<Element::log_bits_in_cell))); d_b->e[element_idx].c[cell_idx]=ans; } __global__ void k_chunkToNormal(Chunk *d_a,Elements_Chunk *d_b , len_t len) { const unsigned int threads_in_chunk = Chunk::elements_in_chunk * Element::element_len; __shared__ Chunk input[max_block_size / threads_in_chunk]; idx_t idx = threadIdx.x + blockDim.x*blockIdx.x; if(idx >= len*threads_in_chunk) return; idx_t chunkIdx = (idx) / (Element::element_len*Chunk::elements_in_chunk); idx_t in_chunkIdx = (idx & (Element::element_len * Chunk::elements_in_chunk - 1)); idx_t chunks_in_block = blockDim.x / Chunk::cells_in_chunk; idx_t inBlockChunkIdx = chunkIdx & (threads_in_chunk-1); for(unsigned int i = 0 ; i < sizeof(cell_t)/sizeof(chunk_cell_t) ; ++i){ input[inBlockChunkIdx].v[in_chunkIdx + i*threads_in_chunk] = d_a[chunkIdx].v[in_chunkIdx+i*threads_in_chunk]; } a_chunkToNormal(&(input[inBlockChunkIdx]), &(d_b[chunkIdx]),in_chunkIdx); } __host__ void Chunk::chunkToNormal(Chunk(*h_a), Elements_Chunk(*h_b), len_t len, bool copy) { //Declare device variables Chunk (*d_a); Elements_Chunk (*d_b); const unsigned int num_element = len*elements_in_chunk; const unsigned int threads = Element::element_len * num_element; //Define Block and Grid Size. dim3 blockSize(max_block_size,1,1); dim3 gridSize(sizeCiel(threads,max_block_size),1,1); if(copy){ //Allocate Memory on GPU. (global) cudaMalloc(&d_a,sizeof(Chunk)*len); cudaMalloc(&d_b,sizeof(Elements_Chunk)*len); //Copy memory to GPU. cudaMemcpy(d_a,h_a,sizeof(Chunk)*len,cudaMemcpyHostToDevice); } else { d_a = h_a; d_b = h_b; } //Launch Kernel k_chunkToNormal<<<gridSize,blockSize>>>(d_a,d_b,len); if(copy){ //Copy results back to memory cudaMemcpy(h_b,d_b,sizeof(Elements_Chunk)*len,cudaMemcpyDeviceToHost); //Free allocated memory. cudaFree(d_a); cudaFree(d_b); } } __device__ void a_normalToChunk(Elements_Chunk *d_a, Chunk *d_b, idx_t idx) { chunk_cell_t ans = 0; idx_t cell_idx = idx>>Element::log_bits_in_cell; for(unsigned int i = 0 ; i < Chunk::elements_in_chunk ; ++i) ans^=((((d_a->e[i].c[cell_idx])>>(idx& andMask(Element::log_bits_in_cell)))&1)<<i); d_b->v[idx]=ans; } __global__ void k_normalToChunk(Elements_Chunk *d_a,Chunk *d_b , len_t len) { idx_t idx = threadIdx.x + blockDim.x*blockIdx.x; if(idx >= (len<<Chunk::log_cells_in_chunk)) return; idx_t chunkIdx = (idx) >> Chunk::log_cells_in_chunk; idx_t in_chunkIdx = (idx & andMask(Chunk::log_cells_in_chunk)); a_normalToChunk(&(d_a[chunkIdx]),&(d_b[chunkIdx]),in_chunkIdx); } __host__ void Chunk::normalToChunk(Elements_Chunk(*h_a), Chunk (*h_b), len_t len,bool copy) { //Declare device variables Elements_Chunk (*d_a); Chunk (*d_b); const unsigned int threads = len<<Chunk::log_cells_in_chunk; //Define Block and Grid Size. dim3 blockSize(max_block_size,1,1); dim3 gridSize(sizeCiel(threads,max_block_size),1,1); //Allocate Memory on GPU. (global) if(copy){ cudaMalloc(&d_a,sizeof(Elements_Chunk)*len); cudaMalloc(&d_b,sizeof(Chunk)*len); //Copy memory to GPU. cudaMemcpy(d_a,h_a,sizeof(Elements_Chunk)*len,cudaMemcpyHostToDevice); } else{ d_a = h_a; d_b = h_b; } //Launch Kernel k_normalToChunk<<<gridSize,blockSize>>>(d_a,d_b,len); //Copy results back to memory if(copy){ cudaMemcpy(h_b,d_b,sizeof(Chunk)*len,cudaMemcpyDeviceToHost); //Free allocated memory. cudaFree(d_a); cudaFree(d_b); } } __host__ void Chunk::setMod(){ cudaMemcpyToSymbol(p_Mod,&(Element::irr_poly_index[ord>>log_warp_size]),sizeof(idx_t)*max_nonzero_coefs_in_mod); cudaMemcpyToSymbol(p_ModLen,&(Element::mod_len[ord>>log_warp_size]),sizeof(idx_t)); } //__device__ void Chunk::chunk_reduce_xor(Chunk *a, Chunk *c_bottom, Chunk*c_top, idx_t idx) //{ // chunk_cell_t ans=c_bottom->v[idx]; // unsigned int temp_idx; // for(idx_t i = 0 ; i < p_ModLen ; ++i) // { // for(idx_t j = 0 ; j < p_ModLen ; ++j) // { // temp_idx = idx+(ord<<1)-p_Mod[i]-p_Mod[j]; // if(temp_idx >= (ord<<1)-p_Mod[j] && temp_idx < (ord<<1)) // ans^=c_top->v[temp_idx-ord]; // } // } // a->v[idx]^=ans; //} __device__ void Chunk::chunk_xor(Chunk *a, Chunk* b, idx_t idx){ a->v[idx]^=b->v[idx]; } __device__ void Chunk::chunk_reduce_xor(Chunk *a, Chunk *c_bottom, idx_t idx,Chunk* to_xor ,int shift) { // replaced p_ModLen-1 by 4 for(unsigned int i = 0 ; i < (ord>>1); i+=warp_size) #pragma unroll for(unsigned int j = 0 ; j < 4 ; ++j) { c_bottom->v[(ord>>1)+idx+i+p_Mod[j]]^=c_bottom->v[(ord>>1)+ord+idx+i]; } for(unsigned int i = 0 ; i < (ord>>1); i+=warp_size) #pragma unroll for(unsigned int j = 0 ; (j) < 4 ; ++j) { c_bottom->v[idx+i+p_Mod[j]]^=c_bottom->v[ord+idx+i]; } for(unsigned int i = 0 ; i < ord ; i+=warp_size){ to_xor->v[idx+i]^=(c_bottom->v[idx+i]>>shift); } } __device__ void Chunk::chunk_reduce(Chunk *a, Chunk *c_bottom, idx_t idx) { //replaced p_ModLen with 5 for(unsigned int i = 0 ; i < (ord>>1); i+=warp_size) for(unsigned int j = 0 ; j+1 < 5 ; ++j) { c_bottom->v[(ord>>1)+idx+i+p_Mod[j]]^=c_bottom->v[(ord>>1)+ord+idx+i]; } for(unsigned int i = 0 ; i < (ord>>1); i+=warp_size) for(unsigned int j = 0 ; (j+1) < 5 ; ++j) { c_bottom->v[idx+i+p_Mod[j]]^=c_bottom->v[ord+idx+i]; } for(unsigned int i = 0 ; i < ord ; i+=warp_size){ a->v[idx+i]=c_bottom->v[idx+i]; } } __device__ void Chunk::chunkClmul(Chunk (*a), Element (*e), idx_t idx, Chunk (*c)) { chunk_cell_t my_ans[2][(ord>>(log_warp_size))]={0}; for(unsigned int k = 0 ; k < ord ; ++k) { if(EXTRACT_BIT(e->c,k)) for(unsigned int t = 0 ; t < (ord>>log_warp_size); ++t) { int b = (k>(idx+warp_size*t)); my_ans[b][t]^=a->v[idx+warp_size*t+(b<<log_ord)-k]; } } for(unsigned int i = 0 ; i < (ord>>log_warp_size); ++i) { c->v[idx+i*warp_size] = my_ans[0][i]; c->v[ord+idx+i*warp_size] = my_ans[1][i]; } } __device__ void Chunk::aux_k_clmul(Chunk *a, Element* e, len_t len,Chunk* c_shared) { idx_t idx = blockDim.x*blockIdx.x+threadIdx.x; const idx_t chunk_idx = (idx >> Chunk::log_threads_in_chunk); const idx_t in_chunk_idx = idx & andMask(Chunk::log_threads_in_chunk); const idx_t shared_chunk_idx = ((idx & andMask(log_max_block_size)) >> (Chunk::log_threads_in_chunk)); Chunk* my_shared_chunk = c_shared+(shared_chunk_idx<<1); for(unsigned int i =0 ; i < Chunk::ord ;i+=warp_size) my_shared_chunk->v[in_chunk_idx+i]=a[chunk_idx].v[in_chunk_idx+i]; Chunk::chunkClmul(my_shared_chunk,e,in_chunk_idx,my_shared_chunk); Chunk::chunk_reduce(a+chunk_idx,my_shared_chunk,in_chunk_idx); } __global__ void k_clmul(Chunk *a,Element *e,len_t len ) { const idx_t shared_len = max_block_size>>Chunk::log_threads_in_chunk; __shared__ Chunk c_shared[shared_len<<1]; idx_t idx = blockDim.x*blockIdx.x+threadIdx.x; if(idx>=(len<<Chunk::log_threads_in_chunk)) return; Chunk::aux_k_clmul(a,e,len,c_shared); } __host__ void Chunk::mul(Chunk (*h_a),Element (*h_e),len_t len, Chunk (*h_res)){ #ifdef __MEASURE cudaEvent_t start,stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); #endif //Declare device variables Chunk (*d_a); Element (*d_e); //Define Block and Grid Size. dim3 blockSize(max_block_size,1,1); dim3 gridSize(sizeCiel(len<<Chunk::log_threads_in_chunk,max_block_size),1,1); //Allocate Memory on GPU. (global) cudaMalloc(&d_a,sizeof(Chunk)*len); cudaMalloc(&d_e,sizeof(Element)); //Copy memory to GPU. cudaMemcpy(d_a,h_a,sizeof(Chunk)*len,cudaMemcpyHostToDevice); cudaMemcpy(d_e,h_e,sizeof(Element),cudaMemcpyHostToDevice); //Set Mod setMod(); // setElementMul(h_e); //Launch Kernel #ifdef __MEASURE cudaEventRecord(start,0); #endif k_clmul<<<gridSize,blockSize>>>(d_a,d_e,len); #ifdef __MEASURE cudaEventRecord(stop,0); #endif //Copy results to host cudaMemcpy(h_res,d_a,sizeof(Chunk)*len,cudaMemcpyDeviceToHost); //Free allocated memory. cudaFree(d_a); cudaFree(d_e); #ifdef __MEASURE cudaEventElapsedTime(&time,start,stop); printf("Time for the mul: %f ms on %d chunks \n",time,len); #endif } __global__ void k_add(Chunk (*a), Chunk (*b), len_t l) { unsigned int idx = threadIdx.x+blockIdx.x*blockDim.x; if(idx>=l*Chunk::cells_in_chunk) return; ((chunk_cell_t*)a)[idx]^=((chunk_cell_t*)b)[idx]; } __host__ void Chunk::add(Chunk (*h_a),Chunk (*h_b),len_t len) { //Declare device variables Chunk (*d_a); Chunk (*d_b); //Define Block and Grid Size. dim3 blockSize(max_block_size,1,1); dim3 gridSize(sizeCiel(max_block_size,len),1,1); //Allocate Memory on GPU. (global) cudaMalloc(&d_a,sizeof(Chunk)*len); cudaMalloc(&d_b,sizeof(Chunk)*len); //Copy memory to GPU. cudaMemcpy(d_a,h_a,sizeof(Chunk)*len,cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b,sizeof(Chunk)*len,cudaMemcpyHostToDevice); //Launch Kernel k_add<<<gridSize,blockSize>>>(d_a,d_b,len); //Copy results to CPU memory cudaMemcpy(h_a,d_a,sizeof(Chunk)*len,cudaMemcpyDeviceToHost); //Free allocated memory. cudaFree(d_a); cudaFree(d_b); } __host__ void Chunk::print() const { for(unsigned int i = 0 ; i < cells_in_chunk ; ++i){ cout << bitset<bits_in_byte*sizeof(chunk_cell_t)>(this->v[i])<<endl; } } __host__ void Elements_Chunk::print() const{ for(unsigned int i = 0 ; i < elements_in_elements_chunk ; ++i){ Element::printElement(this->e[i]); cout<<endl; } } //Mul chunk by another chunk __device__ void Chunk::clmul_by_chunk_bShuffle_ixor_mask(const Chunk& a,const Chunk& e, const idx_t in_chunk_idx, Chunk& c, const int shift, const idx_t mask_idx){ /* * Carryles multiplication */ chunk_cell_t a_reg[2]; chunk_cell_t e_reg; a_reg[0] = a.v[in_chunk_idx]; a_reg[1] = a.v[in_chunk_idx+warp_size]; a_reg[0]^=(a_reg[0]&d_alter_mask[mask_idx+1])<<(1<<mask_idx); a_reg[1]^=(a_reg[1]&d_alter_mask[mask_idx+1])<<(1<<mask_idx); chunk_cell_t my_ans[2][2]={0}; int b; for(unsigned k = 0 ; k < warp_size; ++k){ e_reg=e.v[k]; b= (in_chunk_idx>= k); my_ans[0][0] ^= (b*__shfl_up(a_reg[0],k)) & e_reg; my_ans[0][1] ^= ((1-b)*__shfl_down(a_reg[0],warp_size-k))& e_reg; my_ans[0][1] ^= (b*__shfl_up(a_reg[1],k)) & e_reg; my_ans[1][0] ^= ((1-b)*__shfl_down(a_reg[1],32-k))& e_reg; e_reg=e.v[k+warp_size];; my_ans[0][1] ^= (b*__shfl_up(a_reg[0],k)) & e_reg; my_ans[1][0] ^= ((1-b)*__shfl_down(a_reg[0],32-k))& e_reg; my_ans[1][0] ^= (b*__shfl_up(a_reg[1],k)) & e_reg; my_ans[1][1] ^= ((1-b)*__shfl_down(a_reg[1],32-k))& e_reg; } /* * Reduce */ #pragma unroll 4 for(unsigned int i = 0 ; i < 4 ; ++i){ b=(in_chunk_idx<p_Mod[i]); my_ans[1][0]^=(b*__shfl_down(my_ans[1][1],32-p_Mod[i])); my_ans[0][1]^=((1-b)*__shfl_up(my_ans[1][1],p_Mod[i])); } #pragma unroll 4 for(unsigned int i = 0 ; i < 4 ; ++i){ b=(in_chunk_idx<p_Mod[i]); my_ans[0][1]^=(b * __shfl_down(my_ans[1][0],32-p_Mod[i])); my_ans[0][0]^=((1-b) * __shfl_up(my_ans[1][0],p_Mod[i])); } a_reg[0] ^= (my_ans[0][0] >> shift); a_reg[1] ^= (my_ans[0][1] >> shift); c.v[in_chunk_idx] = a_reg[0]; c.v[in_chunk_idx+warp_size] = a_reg[1]; } __device__ void Chunk::clmul_by_chunk_bShuffle_xor_mask(const Chunk& a,const Chunk& e, const idx_t in_chunk_idx, Chunk& c, const int shift, const idx_t mask_idx){ /* * Carryles multiplication */ chunk_cell_t a_reg[2]; chunk_cell_t e_reg; a_reg[0] = a.v[in_chunk_idx]; a_reg[1] = a.v[in_chunk_idx+warp_size]; chunk_cell_t my_ans[2][2]={0}; int b; for(unsigned k = 0 ; k < warp_size; ++k){ e_reg=e.v[k]; b= (in_chunk_idx>= k); my_ans[0][0] ^= (b*__shfl_up(a_reg[0],k)) & e_reg; my_ans[0][1] ^= ((1-b)*__shfl_down(a_reg[0],warp_size-k))& e_reg; my_ans[0][1] ^= (b*__shfl_up(a_reg[1],k)) & e_reg; my_ans[1][0] ^= ((1-b)*__shfl_down(a_reg[1],32-k))& e_reg; e_reg=e.v[k+warp_size];; my_ans[0][1] ^= (b*__shfl_up(a_reg[0],k)) & e_reg; my_ans[1][0] ^= ((1-b)*__shfl_down(a_reg[0],32-k))& e_reg; my_ans[1][0] ^= (b*__shfl_up(a_reg[1],k)) & e_reg; my_ans[1][1] ^= ((1-b)*__shfl_down(a_reg[1],32-k))& e_reg; } /* * Reduce */ #pragma unroll 4 for(unsigned int i = 0 ; i < 4 ; ++i){ b=(in_chunk_idx<p_Mod[i]); my_ans[1][0]^=(b*__shfl_down(my_ans[1][1],32-p_Mod[i])); my_ans[0][1]^=((1-b)*__shfl_up(my_ans[1][1],p_Mod[i])); } #pragma unroll 4 for(unsigned int i = 0 ; i < 4 ; ++i){ b=(in_chunk_idx<p_Mod[i]); my_ans[0][1]^=(b * __shfl_down(my_ans[1][0],32-p_Mod[i])); my_ans[0][0]^=((1-b) * __shfl_up(my_ans[1][0],p_Mod[i])); } a_reg[0] ^= (my_ans[0][0] >> shift); a_reg[1] ^= (my_ans[0][1] >> shift); a_reg[0]^=(a_reg[0]&d_alter_mask[mask_idx+1])<<(1<<mask_idx); a_reg[1]^=(a_reg[1]&d_alter_mask[mask_idx+1])<<(1<<mask_idx); c.v[in_chunk_idx] = a_reg[0]; c.v[in_chunk_idx+warp_size] = a_reg[1]; } __device__ void Chunk::clmul_by_chunk_bShuffle_xor(const Chunk& a,const Chunk& e, const idx_t in_chunk_idx, Chunk& c, const int shift){ /* * Carryles multiplication */ chunk_cell_t a_reg[2]; chunk_cell_t e_reg; a_reg[0] = a.v[in_chunk_idx]; a_reg[1] = a.v[in_chunk_idx+warp_size]; chunk_cell_t my_ans[2][2]={0}; int b; for(unsigned k = 0 ; k < warp_size; ++k){ e_reg=e.v[k]; b= (in_chunk_idx>= k); my_ans[0][0] ^= (b*__shfl_up(a_reg[0],k)) & e_reg; my_ans[0][1] ^= ((1-b)*__shfl_down(a_reg[0],warp_size-k))& e_reg; my_ans[0][1] ^= (b*__shfl_up(a_reg[1],k)) & e_reg; my_ans[1][0] ^= ((1-b)*__shfl_down(a_reg[1],32-k))& e_reg; e_reg=e.v[k+warp_size];; my_ans[0][1] ^= (b*__shfl_up(a_reg[0],k)) & e_reg; my_ans[1][0] ^= ((1-b)*__shfl_down(a_reg[0],32-k))& e_reg; my_ans[1][0] ^= (b*__shfl_up(a_reg[1],k)) & e_reg; my_ans[1][1] ^= ((1-b)*__shfl_down(a_reg[1],32-k))& e_reg; } /* * Reduce */ #pragma unroll 4 for(unsigned int i = 0 ; i < 4 ; ++i){ b=(in_chunk_idx<p_Mod[i]); my_ans[1][0]^=(b*__shfl_down(my_ans[1][1],32-p_Mod[i])); my_ans[0][1]^=((1-b)*__shfl_up(my_ans[1][1],p_Mod[i])); } #pragma unroll 4 for(unsigned int i = 0 ; i < 4 ; ++i){ b=(in_chunk_idx<p_Mod[i]); my_ans[0][1]^=(b * __shfl_down(my_ans[1][0],32-p_Mod[i])); my_ans[0][0]^=((1-b) * __shfl_up(my_ans[1][0],p_Mod[i])); } c.v[in_chunk_idx] ^= (my_ans[0][0] >> shift); c.v[in_chunk_idx+warp_size] ^= (my_ans[0][1] >> shift); } __device__ void Chunk::clmul_by_chunk_bShuffle(const Chunk& a,const Chunk& e, const idx_t in_chunk_idx, Chunk& c){ /* * Carryles multiplication */ chunk_cell_t a_reg[2]; chunk_cell_t e_reg; a_reg[0] = a.v[in_chunk_idx]; a_reg[1] = a.v[in_chunk_idx+warp_size]; chunk_cell_t my_ans[2][2]={0}; int b; for(unsigned k = 0 ; k < warp_size; ++k){ e_reg=e.v[k]; b= (in_chunk_idx>= k); my_ans[0][0] ^= (b*__shfl_up(a_reg[0],k)) & e_reg; my_ans[0][1] ^= ((1-b)*__shfl_down(a_reg[0],warp_size-k))& e_reg; my_ans[0][1] ^= (b*__shfl_up(a_reg[1],k)) & e_reg; my_ans[1][0] ^= ((1-b)*__shfl_down(a_reg[1],32-k))& e_reg; e_reg=e.v[k+warp_size]; my_ans[0][1] ^= (b*__shfl_up(a_reg[0],k)) & e_reg; my_ans[1][0] ^= ((1-b)*__shfl_down(a_reg[0],32-k))& e_reg; my_ans[1][0] ^= (b*__shfl_up(a_reg[1],k)) & e_reg; my_ans[1][1] ^= ((1-b)*__shfl_down(a_reg[1],32-k))& e_reg; } /* * Reduce */ #pragma unroll 4 for(unsigned int i = 0 ; i < 4 ; ++i){ b=(in_chunk_idx<p_Mod[i]); my_ans[1][0]^=(b*__shfl_down(my_ans[1][1],32-p_Mod[i])); my_ans[0][1]^=((1-b)*__shfl_up(my_ans[1][1],p_Mod[i])); } #pragma unroll 4 for(unsigned int i = 0 ; i < 4 ; ++i){ b=(in_chunk_idx<p_Mod[i]); my_ans[0][1]^=(b * __shfl_down(my_ans[1][0],32-p_Mod[i])); my_ans[0][0]^=((1-b) * __shfl_up(my_ans[1][0],p_Mod[i])); } c.v[in_chunk_idx] = my_ans[0][0]; c.v[in_chunk_idx+warp_size] = my_ans[0][1]; } __device__ void Chunk::clmul_by_chunk(Chunk& a, Chunk& e, idx_t idx, Chunk* c){ chunk_cell_t my_ans[2][(ord>>(log_warp_size))]={0}; int b; // const int l = ord>>log_warp_size; for(unsigned int k = 0 ; k < ord ; ++k) #pragma unroll 2 for(unsigned int t = 0 ; t < 2; ++t) { b = (k>(idx+warp_size*t)); my_ans[b][t]^=a.v[idx+warp_size*t+(b<<log_ord)-k]&e.v[k]; } #pragma unroll 2 for(unsigned int i = 0 ; i < 2; ++i) { c->v[idx+i*warp_size] = my_ans[0][i]; c->v[ord+idx+i*warp_size] = my_ans[1][i]; } } __global__ void k_mul_chunk(Chunk* cs, Chunk* c, len_t cs_len) { const idx_t shared_len = max_block_size>>Chunk::log_threads_in_chunk; __shared__ Chunk c_shared[shared_len<<1]; idx_t idx = blockDim.x*blockIdx.x+threadIdx.x; if(idx>=(cs_len<<Chunk::log_threads_in_chunk)) return; const idx_t chunk_idx = (idx >> Chunk::log_threads_in_chunk); const idx_t in_chunk_idx = idx & andMask(Chunk::log_threads_in_chunk); const idx_t shared_chunk_idx = ((idx & andMask(log_max_block_size)) >> (Chunk::log_threads_in_chunk)); Chunk* my_shared_chunk = c_shared+(shared_chunk_idx<<1); for(unsigned int i =0 ; i < Chunk::ord ;i+=warp_size){ my_shared_chunk->v[in_chunk_idx+i]=cs[chunk_idx].v[in_chunk_idx+i]; my_shared_chunk[1].v[in_chunk_idx+i]=c->v[in_chunk_idx+i]; } Chunk::clmul_by_chunk(my_shared_chunk[0],my_shared_chunk[1],in_chunk_idx,my_shared_chunk); Chunk::chunk_reduce(cs+chunk_idx,my_shared_chunk,in_chunk_idx); } __global__ void k_mul_chunk_xor(Chunk* cs, Chunk* c, len_t cs_len,Chunk* to_xor, int shift = 0) { const idx_t shared_len = max_block_size>>Chunk::log_threads_in_chunk; __shared__ Chunk c_shared[shared_len<<1]; idx_t idx = blockDim.x*blockIdx.x+threadIdx.x; if(idx>=(cs_len<<Chunk::log_threads_in_chunk)) return; const idx_t chunk_idx = (idx >> Chunk::log_threads_in_chunk); const idx_t in_chunk_idx = idx & andMask(Chunk::log_threads_in_chunk); const idx_t shared_chunk_idx = ((idx & andMask(log_max_block_size)) >> (Chunk::log_threads_in_chunk)); Chunk* my_shared_chunk = c_shared+(shared_chunk_idx<<1); for(unsigned int i =0 ; i < Chunk::ord ;i+=warp_size){ my_shared_chunk->v[in_chunk_idx+i]=cs[chunk_idx].v[in_chunk_idx+i]; my_shared_chunk[1].v[in_chunk_idx+i]=c->v[in_chunk_idx+i]; } Chunk::clmul_by_chunk(my_shared_chunk[0],my_shared_chunk[1],in_chunk_idx,my_shared_chunk); Chunk::chunk_reduce_xor(cs+chunk_idx,my_shared_chunk,in_chunk_idx,to_xor,shift); } //Mul a chunk by a chunk void Chunk::chunk_mul(Chunk (* h_a), Chunk (*h_b) , len_t len, Chunk (*h_res), bool copy, bool do_xor, int shift){ #ifdef __MEASURE cudaEvent_t start,stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); #endif //Declare device variables Chunk (*d_a); Chunk (*d_b); //Define Block and Grid Size. dim3 blockSize(max_block_size,1,1); dim3 gridSize(sizeCiel(len<<Chunk::log_threads_in_chunk,max_block_size),1,1); if(copy){ //Allocate Memory on GPU. (global) cudaMalloc(&d_a,sizeof(Chunk)*len); cudaMalloc(&d_b,sizeof(Chunk)); //Copy memory to GPU. cudaMemcpy(d_a,h_a,sizeof(Chunk)*len,cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b,sizeof(Chunk),cudaMemcpyHostToDevice); } else { d_a = h_a; d_b = h_b; } //Set Mod setMod(); // setElementMul(h_e); //Launch Kernel #ifdef __MEASURE cudaEventRecord(start,0); #endif if(do_xor) k_mul_chunk_xor<<<gridSize,blockSize>>>(d_a,d_b,len,d_a,shift); else k_mul_chunk<<<gridSize,blockSize>>>(d_a,d_b,len); #ifdef __MEASURE cudaEventRecord(stop,0); #endif if(copy){ //Copy results to host cudaMemcpy(h_res,d_a,sizeof(Chunk)*len,cudaMemcpyDeviceToHost); //Free allocated memory. cudaFree(d_a); cudaFree(d_b); } #ifdef __MEASURE cudaEventElapsedTime(&time,start,stop); printf("Time for the mul: %f ms on %d chunks \n",time,len); #endif } /*-------------------------------------*/ /*** GPU FFT ***/ const unsigned int multThreadsInBlock = 1024; __global__ void k_multiExp_mult_bShuffle(Chunk* d_a, Chunk* d_b , len_t b_len , len_t a_len){ idx_t idx = blockDim.x*blockIdx.x+threadIdx.x; if(idx>=(a_len<<Chunk::log_threads_in_chunk)) return; const idx_t chunk_idx = (idx >> Chunk::log_threads_in_chunk); const idx_t in_chunk_idx = idx & andMask(Chunk::log_threads_in_chunk); // const idx_t shared_chunk_idx = ; Chunk::clmul_by_chunk_bShuffle(d_a[chunk_idx],d_b[chunk_idx & (b_len-1)],in_chunk_idx,d_a[chunk_idx]); // Chunk::chunk_reduce(d_a+chunk_idx,my_shared_chunk,in_chunk_idx); } __global__ void k_multiExp_mult(Chunk* d_a, Chunk* d_b , len_t b_len , len_t a_len){ // const idx_t shared_len = max_block_size>>Chunk::log_threads_in_chunk; // __shared__ Chunk c_shared[shared_len<<1]; idx_t idx = blockDim.x*blockIdx.x+threadIdx.x; if(idx>=(a_len<<Chunk::log_threads_in_chunk)) return; const idx_t chunk_idx = (idx >> Chunk::log_threads_in_chunk); const idx_t in_chunk_idx = idx & andMask(Chunk::log_threads_in_chunk); // const idx_t shared_chunk_idx = ; // Chunk* my_shared_chunk = c_shared+(((idx & andMask(log_max_block_size)) >> (Chunk::log_threads_in_chunk))<<1); // for(unsigned int i =0 ; i < Chunk::ord ;i+=warp_size){ // my_shared_chunk->v[in_chunk_idx+i]=d_a[chunk_idx].v[in_chunk_idx+i]; // my_shared_chunk[1].v[in_chunk_idx+i]=d_b[chunk_idx & (b_len - 1)].v[in_chunk_idx+i]; // } Chunk::clmul_by_chunk_bShuffle(d_a[chunk_idx],d_b[chunk_idx & (b_len - 1)],in_chunk_idx,d_a[chunk_idx]); // Chunk::chunk_reduce(d_a+chunk_idx,my_shared_chunk,in_chunk_idx); } /* * Gets as input: * d_a - the polynomial. * d_b - the multiexponent of current b_m for on strip of elements. * b_len - length of d_b in chunks. * * Multiplies chunk d_a[i] by chunk d_b[i mod b_len]. */ void GPU_FFT::multiExp_mult(len_t a_len, Chunk* d_a , Chunk* d_b , len_t b_len){ unsigned int threads = (a_len<<Chunk::log_elements_in_chunk); dim3 blockSize(multThreadsInBlock,1,1); dim3 gridSize(sizeCiel(threads,multThreadsInBlock),1,1); //Launch Kernel k_multiExp_mult_bShuffle<<<gridSize,blockSize>>>(d_a,d_b,b_len, threads>>Chunk::log_elements_in_chunk); } void GPU_FFT::multiExponentiate_gpu(const FFT* fft,Chunk* d_chunk_P, len_t p_len, len_t dim, Chunk* d_exp ){ //Copy exps to memory. if(dim<=Chunk::log_elements_in_chunk){ cudaMemcpy(d_exp,fft->gpu_exp[fft->basis.getSize()-dim], sizeof(Chunk),cudaMemcpyHostToDevice); multiExp_mult(p_len,d_chunk_P,d_exp,1); } else { cudaMemcpy(d_exp,fft->gpu_exp[fft->basis.getSize()-dim], sizeof(Chunk)*(1<<(dim-Chunk::log_elements_in_chunk)),cudaMemcpyHostToDevice); multiExp_mult(p_len,d_chunk_P,d_exp,1<<(dim-Chunk::log_elements_in_chunk)); } } void GPU_FFT::imultiExponentiate_gpu(const FFT* fft,Chunk* d_chunk_P, len_t p_len, len_t dim, Chunk* d_exp ){ //Copy exps to memory. if(dim<=Chunk::log_elements_in_chunk){ cudaMemcpy(d_exp,fft->gpu_i_exp[fft->basis.getSize()-dim], sizeof(Chunk),cudaMemcpyHostToDevice); multiExp_mult(p_len,d_chunk_P,d_exp,1); } else { cudaMemcpy(d_exp,fft->gpu_i_exp[fft->basis.getSize()-dim], sizeof(Chunk)*(1<<(dim-Chunk::log_elements_in_chunk)),cudaMemcpyHostToDevice); multiExp_mult(p_len,d_chunk_P,d_exp,1<<(dim-Chunk::log_elements_in_chunk)); } } __device__ void taylor_smaller_than_chunk(Chunk* chunk,idx_t in_chunk_idx, len_t t_dim, len_t p_len, idx_t idx){ if(idx >= ((p_len) << (Chunk::log_cells_in_chunk))) return; //Performs the rest of the expansion. chunk_cell_t cell = chunk->v[in_chunk_idx]; for(; t_dim >=2 ; --t_dim){ cell ^= (cell & (d_chunk_cell_mask[t_dim]<<((1<<t_dim)-(1<<(t_dim-2)))))>>(1<<(t_dim-2)); cell ^= (cell & (d_chunk_cell_mask[t_dim]<<(1<<(t_dim-1))))>>(1<<(t_dim-2)); } chunk->v[in_chunk_idx]= cell; } __global__ void k_taylorExpansion_iteration_large(Chunk* d_chunk_P , len_t p_len , len_t t_dim){ idx_t idx = threadIdx.x + blockIdx.x*blockDim.x; idx_t in_chunk_idx = idx & (Chunk::cells_in_chunk-1); /* * flag = true if current sub-polynomial fits in a thread block. */ bool flag = (t_dim-Chunk::log_elements_in_chunk <= log_max_block_size-Chunk::log_cells_in_chunk + 2); if(idx >= ((p_len) << (Chunk::log_cells_in_chunk-2))) return; do { len_t sub_len = (1<<(t_dim-Chunk::log_elements_in_chunk)); idx_t chunk_idx = idx >> (Chunk::log_cells_in_chunk); chunk_idx = (chunk_idx /(sub_len>>2))*(sub_len) + ((chunk_idx) & ((sub_len>>2)-1)); d_chunk_P[(sub_len>>1) + chunk_idx ].v[in_chunk_idx] ^= d_chunk_P[(3*(sub_len>>2)) + chunk_idx].v[in_chunk_idx]; d_chunk_P[(sub_len>>2) + chunk_idx ].v[in_chunk_idx] ^= d_chunk_P[(sub_len>>1) + chunk_idx].v[in_chunk_idx]; if(flag) __syncthreads(); --t_dim; } while ( flag && t_dim > Chunk::log_elements_in_chunk + 1); //If number of threads needed is less than a thread block - we can just continue! } __global__ void k_taylorExpansion_iteration_twoChunks(Chunk* d_chunk_P, len_t p_len, len_t t_dim){ idx_t idx = threadIdx.x + blockIdx.x*blockDim.x; idx_t in_chunk_idx = idx & (Chunk::cells_in_chunk-1); if(idx >= ((p_len) << (Chunk::log_cells_in_chunk-1))) return; idx_t chunk_idx= (idx >> (Chunk::log_cells_in_chunk) ) << 1; d_chunk_P[(chunk_idx)+1].v[in_chunk_idx]^= (d_chunk_P[(chunk_idx)+1].v[in_chunk_idx]>>(Chunk::elements_in_chunk>>1)); d_chunk_P[(chunk_idx)].v[in_chunk_idx]^= (d_chunk_P[(chunk_idx)+1].v[in_chunk_idx]<<(Chunk::elements_in_chunk>>1)); } __global__ void k_taylorExpansion_iteration_singleChunk(Chunk* d_chunk_P, len_t p_len, len_t t_dim){ idx_t idx = threadIdx.x + blockIdx.x*blockDim.x; idx_t in_chunk_idx = idx & (Chunk::cells_in_chunk-1); //Performs the rest of the expansion. idx_t chunk_idx = (idx >> (Chunk::log_cells_in_chunk)); taylor_smaller_than_chunk(d_chunk_P+chunk_idx,in_chunk_idx,t_dim,p_len,idx); } void taylorExpansion_iteration(const FFT* fft, Chunk * d_chunk_P , len_t p_len , len_t t_dim){ unsigned int threads; dim3 blockSize(max_block_size,1,1); if( t_dim >= Chunk::log_elements_in_chunk + 2){ threads = p_len<<(Chunk::log_cells_in_chunk-2); dim3 gridSize(sizeCiel(threads,max_block_size),1,1); k_taylorExpansion_iteration_large<<<gridSize,blockSize>>>(d_chunk_P,p_len,t_dim); } else if (t_dim == Chunk::log_elements_in_chunk + 1){ threads = p_len <<(Chunk::log_cells_in_chunk-1); dim3 gridSize(sizeCiel(threads,max_block_size),1,1); k_taylorExpansion_iteration_twoChunks<<<gridSize,blockSize>>>(d_chunk_P,p_len,t_dim); } else { threads = p_len << Chunk::log_cells_in_chunk; dim3 gridSize(sizeCiel(threads,max_block_size),1,1); k_taylorExpansion_iteration_singleChunk<<<gridSize,blockSize>>>(d_chunk_P,p_len,t_dim); } // k_taylorExpansion_iteration<<<gridSize,blockSize>>>(d_chunk_P,p_len,t_dim); } void GPU_FFT::taylorExpansion_gpu(const FFT* fft, Chunk* d_chunk_P, len_t p_len , len_t dim){ len_t t_dim = dim; /* * The sub-polynomial requires more than a single thread block. */ while(t_dim+Chunk::log_cells_in_chunk > Chunk::log_elements_in_chunk+log_max_block_size + 2){ taylorExpansion_iteration(fft,d_chunk_P,p_len,t_dim); --t_dim; } /* * The sub-polynomial requires more than 2 chunks BUT less than a single thread block. */ if(t_dim >= Chunk::log_elements_in_chunk + 2){ taylorExpansion_iteration(fft,d_chunk_P,p_len,t_dim); t_dim = Chunk::log_elements_in_chunk+1; } /* * Each sub-polynomial takes exactly two chunks. */ if(t_dim == Chunk::log_elements_in_chunk + 1){ taylorExpansion_iteration(fft,d_chunk_P,p_len,t_dim); --t_dim; } /* * Each sub-polynomial takes at most one chunk. */ taylorExpansion_iteration(fft,d_chunk_P,p_len,t_dim); } /* * Input: * 1) d_chunk_p - The polynomial on device's memory. * 2) p_len - number of chunks in d_chunk_p. * 3) chunk_idx - the chunk current thread has to deal with. * 4) in_chunk_idx - the number of the cell the current thread deals with. * 5) t_dim - The dim of the original (input) subpolynomial. * * This function performs what a single thread does when performing the partition function, on a single chunk. */ __device__ void partition_in_chunk(Chunk* d_chunk_p, len_t p_len , idx_t in_chunk_idx, len_t t_dim){ chunk_cell_t ans = d_chunk_p->v[in_chunk_idx]; for(unsigned int i = 2 ; i <= Chunk::log_elements_in_chunk && i<=t_dim ; ++i){ ans = (ans & d_chunk_cell_mask[i]) | (ans & (d_chunk_cell_mask[i]<<((1<<(i))-(1<<(i-2))))) | (ans & (d_chunk_cell_mask[i]<<(1<<(i-1))))>>(1<<(i-2)) | (ans & (d_chunk_cell_mask[i]<<(1<<(i-2))))<<(1<<(i-2)); } d_chunk_p->v[in_chunk_idx]=ans; } __device__ void partition_two_chunks(Chunk* d_chunk_p, idx_t in_chunk_idx){ chunk_cell_t ans[2]; chunk_cell_t load[2]; const chunk_cell_t mask = d_chunk_cell_mask[Chunk::log_elements_in_chunk+1]; load[0]=d_chunk_p->v[in_chunk_idx]; load[1]=d_chunk_p[1].v[in_chunk_idx]; ans[0]=(load[0] & mask) | ((load[1] & mask)<<(Chunk::elements_in_chunk>>1)); ans[1]=(load[1] & (mask<< (Chunk::elements_in_chunk>>1)) )| ((load[0] >> (Chunk::elements_in_chunk>>1)) & mask); d_chunk_p->v[in_chunk_idx] = ans[0]; d_chunk_p[1].v[in_chunk_idx] = ans[1]; } __device__ void partition_general(Chunk* d_chunk_p_src, Chunk* d_chunk_p_dst, idx_t chunk_idx, idx_t in_chunk_idx, idx_t t_dim){ if(chunk_idx & 1) d_chunk_p_dst[(chunk_idx>>1) + (1<<(t_dim-1))].v[in_chunk_idx] = d_chunk_p_src[chunk_idx].v[in_chunk_idx]; else d_chunk_p_dst[chunk_idx>>1].v[in_chunk_idx] = d_chunk_p_src[chunk_idx].v[in_chunk_idx]; } __global__ void k_partition_iteration_two_chunks(Chunk* d_chunk_p_src, len_t p_len){ idx_t idx = threadIdx.x + blockIdx.x * blockDim.x; idx_t chunk_idx = (idx >> Chunk::log_cells_in_chunk)<<1; idx_t in_chunk_idx = idx & andMask(Chunk::log_cells_in_chunk); if(chunk_idx >= p_len){ return; } partition_two_chunks(d_chunk_p_src+chunk_idx, in_chunk_idx); } __global__ void k_partition_iteration_in_chunk(Chunk* d_chunk_p_src, len_t p_len, len_t t_dim){ idx_t idx = threadIdx.x + blockIdx.x * blockDim.x; idx_t chunk_idx = idx >> Chunk::log_cells_in_chunk; idx_t in_chunk_idx = idx & andMask(Chunk::log_cells_in_chunk); if(chunk_idx >= p_len){ return; } partition_in_chunk(d_chunk_p_src+chunk_idx,p_len,in_chunk_idx,t_dim); } __global__ void k_partition_iteration_general(Chunk* d_chunk_p_src, Chunk* d_chunk_p_dst, len_t p_len, len_t t_dim){ idx_t idx = threadIdx.x + blockIdx.x * blockDim.x; idx_t chunk_idx = idx >> Chunk::log_cells_in_chunk; idx_t in_chunk_idx = idx & andMask(Chunk::log_cells_in_chunk); idx_t base = chunk_idx ^ (chunk_idx & andMask(t_dim-Chunk::log_elements_in_chunk)); chunk_idx &= andMask(t_dim-Chunk::log_elements_in_chunk); if(base+chunk_idx >= p_len) return; partition_general(d_chunk_p_src+base,d_chunk_p_dst+base, chunk_idx, in_chunk_idx,t_dim-Chunk::log_elements_in_chunk ); } /* * If t_dim > Chunk::log_elements_in_chunk+1 the result is written in dst, otherwise it will be written in src. */ bool GPU_FFT::partition(Chunk* d_chunk_p_src, Chunk* d_chunk_p_dst, len_t p_len, len_t t_dim){ len_t threads = p_len << Chunk::log_cells_in_chunk; dim3 blockSize(max_block_size,1,1); dim3 gridSize(sizeCiel(threads,max_block_size),1,1); k_partition_iteration_in_chunk<<<gridSize,blockSize>>>(d_chunk_p_src,p_len,t_dim); if(t_dim >= Chunk::log_elements_in_chunk +1){ dim3 gridSizeTwoChunks(sizeCiel(threads>>1,max_block_size),1,1); k_partition_iteration_two_chunks<<<gridSizeTwoChunks,blockSize>>>(d_chunk_p_src,p_len); } if(t_dim > Chunk::log_elements_in_chunk+1){ k_partition_iteration_general<<<gridSize,blockSize>>>(d_chunk_p_src,d_chunk_p_dst,p_len,t_dim); return true; } return false; } //__global__ void k_xor_chunk_vector_by_single_chuk(Chunk* chunk_vec, Chunk* single_chunk, len_t chunk_vec_len){ // idx_t idx = threadIdx.x + blockIdx.x*blockDim.x; // idx_t chunk_idx = idx >> Chunk::log_cells_in_chunk; // idx_t in_chunk_idx = idx & andMask(Chunk::log_cells_in_chunk); // if(chunk_idx >= chunk_vec_len){ // return; // } // chunk_vec[chunk_idx].v[in_chunk_idx] ^= single_chunk->v[in_chunk_idx]; // return; //} __global__ void k_copy_and_shift_vec(Chunk* d_chunk_src, Chunk* d_chunk_dst, len_t p_len){ const idx_t shared_len = max_block_size>>Chunk::log_threads_in_chunk; __shared__ Chunk c_shared[shared_len<<1]; idx_t idx = blockDim.x*blockIdx.x+threadIdx.x; const idx_t chunk_idx = (idx >> Chunk::log_threads_in_chunk); if(chunk_idx >= p_len){ return; } const idx_t in_chunk_idx = idx & andMask(Chunk::log_threads_in_chunk); const idx_t shared_chunk_idx = ((idx & andMask(log_max_block_size)) >> (Chunk::log_threads_in_chunk)); Chunk* my_shared_chunk = c_shared+(shared_chunk_idx<<1); for(unsigned int i =0 ; i < Chunk::ord ;i+=warp_size){ my_shared_chunk->v[in_chunk_idx+i]=d_chunk_src[chunk_idx].v[in_chunk_idx+i]; my_shared_chunk[1].v[in_chunk_idx+i]=d_linear_mul->v[in_chunk_idx+i]; } chunk_cell_t tmp; for(unsigned int i = 0 ; i < Element::ord ; i+=warp_size){ tmp = my_shared_chunk->v[in_chunk_idx+i]; my_shared_chunk->v[in_chunk_idx+i] = (tmp & 0xaaaaaaaa) ^ ((tmp & 0xaaaaaaaa)>>1); d_chunk_dst[chunk_idx].v[in_chunk_idx+i] = (tmp & 0x55555555) ^ ((tmp & 0x55555555)<<1); } Chunk::clmul_by_chunk(my_shared_chunk[0],my_shared_chunk[1],in_chunk_idx,my_shared_chunk); Chunk::chunk_reduce(d_chunk_src+chunk_idx,my_shared_chunk,in_chunk_idx); for(unsigned int i = 0 ; i < Element::ord ; i+=warp_size){ d_chunk_src[chunk_idx].v[in_chunk_idx+i] ^= d_chunk_dst[chunk_idx].v[in_chunk_idx+i]; } return; } //__global__ void k_xor_chunk_vectors(Chunk* d_chunk, Chunk* x, len_t p_len){ // idx_t idx = threadIdx.x + blockIdx.x*blockDim.x; // idx_t chunk_idx = idx >> Chunk::log_cells_in_chunk; // idx_t in_chunk_idx = idx & andMask(Chunk::log_cells_in_chunk); // if(chunk_idx >= p_len){ // return; // } // d_chunk[chunk_idx].v[in_chunk_idx] ^= x[chunk_idx].v[in_chunk_idx]; //} void GPU_FFT::linearEvaluation(Chunk* d_chunk_p,Chunk* d_chunk_p_cpy, len_t p_len){ len_t threads = p_len << Chunk::log_threads_in_chunk; dim3 blockSize(max_block_size,1,1); dim3 gridSize(sizeCiel(threads,max_block_size),1,1); k_copy_and_shift_vec<<<gridSize,blockSize>>>(d_chunk_p,d_chunk_p_cpy,p_len); } __global__ void k_subspaceAdd_general(Chunk* d_a, len_t a_len, len_t b_len){ idx_t idx = blockDim.x*blockIdx.x+threadIdx.x; idx_t chunk_idx = (idx >> Chunk::log_cells_in_chunk); chunk_idx = ((chunk_idx / b_len)*(2*b_len)) +b_len+(chunk_idx & (b_len-1)); if(chunk_idx >= a_len){ return; } const idx_t in_chunk_idx = idx & andMask(Chunk::log_cells_in_chunk); d_a[chunk_idx].v[in_chunk_idx]^=d_a[chunk_idx-b_len].v[in_chunk_idx]; } __global__ void k_subspaceMult_general(Chunk* d_a, Chunk* d_b , len_t b_len , len_t a_len){ const idx_t shared_len = max_block_size>>Chunk::log_threads_in_chunk; __shared__ Chunk c_shared[shared_len<<1]; idx_t idx = blockDim.x*blockIdx.x+threadIdx.x; idx_t chunk_idx = (((idx >> Chunk::log_threads_in_chunk))/b_len)*(2*b_len)+b_len; const idx_t b_chunk_idx = (idx>>Chunk::log_threads_in_chunk) & (b_len - 1); chunk_idx+=b_chunk_idx; if(chunk_idx >= a_len){ return; } const idx_t in_chunk_idx = idx & andMask(Chunk::log_threads_in_chunk); const idx_t shared_chunk_idx = ((idx & andMask(log_max_block_size)) >> (Chunk::log_threads_in_chunk)); Chunk* my_shared_chunk = c_shared+(shared_chunk_idx<<1); for(unsigned int i =0 ; i < Chunk::ord ;i+=warp_size){ my_shared_chunk->v[in_chunk_idx+i]=d_a[chunk_idx].v[in_chunk_idx+i]; my_shared_chunk[1].v[in_chunk_idx+i]=d_b[b_chunk_idx].v[in_chunk_idx+i]; } Chunk::clmul_by_chunk_bShuffle_xor(d_a[chunk_idx],d_b[b_chunk_idx],in_chunk_idx,d_a[chunk_idx-b_len],0); // Chunk::clmul_by_chunk(my_shared_chunk[0],my_shared_chunk[1],in_chunk_idx,my_shared_chunk); // Chunk::chunk_reduce_xor(d_a+chunk_idx,my_shared_chunk,in_chunk_idx,d_a+chunk_idx-b_len,0); } __global__ void k_subspaceMult_chunk_and_add(Chunk* d_a, Chunk* d_b , len_t log_elements_in_b ,len_t a_len ){ idx_t idx = blockDim.x*blockIdx.x+threadIdx.x; const idx_t chunk_idx = (idx >> Chunk::log_threads_in_chunk); if(chunk_idx >= a_len){ return; } const idx_t in_chunk_idx = idx & andMask(Chunk::log_threads_in_chunk); Chunk::clmul_by_chunk_bShuffle_xor_mask(d_a[chunk_idx],*d_b,in_chunk_idx,d_a[chunk_idx],1<<log_elements_in_b,log_elements_in_b); } /* * Dim = Size of U. */ void GPU_FFT::WFromUV(Chunk* d_chunk_p, len_t p_len, Chunk* subspace, len_t dim){ if(dim>=Chunk::log_elements_in_chunk){ len_t threadsMul = p_len << (Chunk::log_threads_in_chunk-1); len_t threadsAdd = p_len << (Chunk::log_cells_in_chunk-1); dim3 block(multThreadsInBlock,1,1); dim3 gridMul(sizeCiel(threadsMul,multThreadsInBlock),1,1); dim3 gridAdd(sizeCiel(threadsAdd,multThreadsInBlock),1,1); k_subspaceMult_general<<<gridMul,block>>>(d_chunk_p, subspace, (1<<(dim-Chunk::log_elements_in_chunk)),p_len); k_subspaceAdd_general<<<gridAdd,block>>>(d_chunk_p,p_len,1<<(dim-Chunk::log_elements_in_chunk)); } else { len_t threadsMul = p_len << Chunk::log_threads_in_chunk; dim3 block(multThreadsInBlock,1,1); dim3 gridMul(sizeCiel(threadsMul,multThreadsInBlock),1,1); k_subspaceMult_chunk_and_add<<<gridMul,block>>>(d_chunk_p,subspace, dim , p_len); } } void GPU_FFT::setUpConstantMemory(const FFT* fft){ /* * Masks for taylor expansion on small dimensions. */ cudaMemcpyToSymbol(d_chunk_cell_mask,taylorExp_masks,sizeof(chunk_cell_t)*(Chunk::log_elements_in_chunk+2)); /* * Masks for WFromUV */ cudaMemcpyToSymbol(d_alter_mask,alter_masks,sizeof(chunk_cell_t)*(Chunk::log_elements_in_chunk+1)); /* * Table for partition operation on small dimension. */ cudaMemcpyToSymbol(d_partition,partition_byte,sizeof(chunk_cell_t)*(1<<bits_in_byte)); /* * Chunks to multiply and add when calculating linear functions. */ cudaMemcpyToSymbol(d_linear_mul,&fft->linear_mul,sizeof(Chunk)); cudaMemcpyToSymbol(d_ilinear_mul,&fft->ilinear_mul,sizeof(Chunk)); } /* * p is a pointer to a chunk array on gpu, l is its length. * prints that array. */ void printChunkOnGPU(Chunk* p , len_t l){ // TODO: No real need for printing and this break the building of FFT as separate library /* Chunk* h_p = Tests::copyFromGPU(p,l); for(unsigned int i = 0 ; i < l ; ++i){ h_p[i].print(); std::cout << std::endl << std::endl; } free(h_p); std::cout << std::endl << std::endl; */ } /* * This is the gpu implementation of the FFT when it fits into a single thread block. * In that case, the whole FFT can be calculated using a single kernel invocation and some additional * synchronization primitives. * * In this implementation, each WARP is responsible for a single chunk, therefore if 32 threads compose a single warp and * we use 1024 threads in a thread block, then each thread block is responsible for 32 chunks. * * 0) First we load ALL subspaces and exponents into global memory in a special pre-allocated array. * We also allocate 64 Chunks long array in shared memory. * * 1) We load the shared memory with the relevant polynomial (depends on the index). * 2) Series of Multiexp -> taylorExp -> partition, all inside local memory. * 2.1) Before Each Multiexp we load 32 chunks of exps to shared memory. * 3) Linear evaluation. * 4) Series of WFromUV, while loading subspaces from global memory. * * This whole implementation assumes that: * Chunk::log_cells_in_chunk = 1+ Chunk::log_elements_in_chunk */ __device__ void InTB_preMulExpand( Chunk* const d_a, const Chunk* const d_b, const len_t b_len , const idx_t idx , const idx_t chunkIdx , const idx_t in_chunk_idx){ chunk_cell_t load[2]; load[0]=d_a[chunkIdx].v[in_chunk_idx]; load[1]=d_a[chunkIdx].v[in_chunk_idx+Chunk::elements_in_chunk]; __syncthreads(); d_a[chunkIdx<<1].v[in_chunk_idx] = load[0]; d_a[chunkIdx<<1].v[in_chunk_idx + Chunk::elements_in_chunk] = load[1]; d_a[(chunkIdx<<1)+1].v[in_chunk_idx] = d_b[chunkIdx & (b_len-1)].v[in_chunk_idx]; d_a[(chunkIdx<<1)+1].v[in_chunk_idx + Chunk::elements_in_chunk] = d_b[chunkIdx & (b_len-1)].v[in_chunk_idx + Chunk::elements_in_chunk]; __syncthreads(); } /* * Takes every second chunk and puts all of them in the first half. * Second half output is unknown. * 1) d_a - Pointer for shmem array of the polynomial. * 2) idx - index of the thread. * 3) chunkIdx - (idx / Chunk::elemetns_in_chunk). * 4) in_chunk_idx - (idx % Chunk::elements_in_chunk). */ __device__ void InTB_postMulShrink( Chunk * const d_a, const idx_t idx , const idx_t chunkIdx , const idx_t in_chunk_idx){ chunk_cell_t load[2]; load[0]=d_a[chunkIdx<<1].v[in_chunk_idx]; load[1]=d_a[chunkIdx<<1].v[in_chunk_idx+Chunk::elements_in_chunk]; __syncthreads(); d_a[chunkIdx].v[in_chunk_idx]=load[0]; d_a[chunkIdx].v[in_chunk_idx+Chunk::elements_in_chunk]=load[1]; __syncthreads(); } /* * Multiplies d_a[0] by d_a[1] and writes the answer at d_a[0]. * The state of d_a[1] is unknown at the end. * * 1) d_a - the chunk the mult. * 2) in_chunk_idx - Is thread index % Chunk::elements_in_chunk */ __device__ void InTB_Mult(Chunk* const d_a, const idx_t in_chunk_idx){ Chunk::clmul_by_chunk(*d_a,d_a[1],in_chunk_idx,d_a); Chunk::chunk_reduce(d_a,d_a,in_chunk_idx); __syncthreads(); } /* * This is the multi exponentiation. * 1) d_a - pointer to the shmem. * 2) exp - pointer to the exponents to multiply (global mem). * 3) dim - dimension of multiexponentiation. * 4) idx - thread index. * 5) chunkIdx - idx/Chunk::elements_in_chunk. * 6) in_chunk_idx - idx%Chunk::elements_in_chunk. */ __device__ void InTB_MultiExp( Chunk* const d_a, const Chunk* const exp, const len_t exp_len, const idx_t idx, const idx_t chunkIdx, const idx_t in_chunk_idx, const len_t half_d_a_len){ chunk_cell_t load[2]; chunk_cell_t swap; load[0] = d_a[half_d_a_len + chunkIdx].v[in_chunk_idx]; load[1] = d_a[half_d_a_len + chunkIdx].v[in_chunk_idx+Chunk::elements_in_chunk]; InTB_preMulExpand(d_a,exp,exp_len,idx,chunkIdx,in_chunk_idx); InTB_Mult(d_a+(chunkIdx<<1),in_chunk_idx); InTB_postMulShrink(d_a,idx,chunkIdx,in_chunk_idx); swap = load[0]; load[0]=d_a[chunkIdx].v[in_chunk_idx]; d_a[chunkIdx].v[in_chunk_idx] = swap; swap = load[1]; load[1]=d_a[chunkIdx].v[in_chunk_idx+Chunk::elements_in_chunk]; d_a[chunkIdx].v[in_chunk_idx + Chunk::elements_in_chunk] = swap; InTB_preMulExpand(d_a,exp+((half_d_a_len) & (exp_len>>1)),exp_len,idx,chunkIdx,in_chunk_idx); InTB_Mult(d_a+(chunkIdx<<1),in_chunk_idx); InTB_postMulShrink(d_a,idx,chunkIdx,in_chunk_idx); d_a[half_d_a_len + chunkIdx].v[in_chunk_idx] = d_a[chunkIdx].v[in_chunk_idx]; d_a[half_d_a_len + chunkIdx].v[in_chunk_idx + Chunk::elements_in_chunk] = d_a[chunkIdx].v[in_chunk_idx + Chunk::elements_in_chunk]; d_a[chunkIdx].v[in_chunk_idx] = load[0]; d_a[chunkIdx].v[in_chunk_idx + Chunk::elements_in_chunk] = load[1]; __syncthreads(); } /* * Taylor Expansion * * 1) d_a is the pointer to the beginning of the polynomial in shmem. * 2) dim>=2. Also dim<=10. * 3) idx - the idx of the thread. * 4) in_chunk_idx - (idx % (Chunk::chunk_cells_in_chunk)). * 5) chunkIdx - index of chunk INSIDE sub-polynomial. */ __device__ void InTB_TaylorExp( Chunk* const d_a, idx_t dim, const idx_t idx, const idx_t in_chunk_idx, const idx_t chunkIdx, const idx_t half_d_a_len){ len_t l,s; /* * If more than a single chunk is needed for a subpolynomial */ if(dim> Chunk::log_elements_in_chunk){ for(; dim > Chunk::log_elements_in_chunk+1 ; --dim){ l=dim-Chunk::log_elements_in_chunk; s=1<<(l-2); l=((chunkIdx>>(l-2))<<l)+(chunkIdx&andMask(l-2)); d_a[l+(2*s)].v[in_chunk_idx]^=d_a[l+(3*s)].v[in_chunk_idx]; d_a[l+(s)].v[in_chunk_idx]^=d_a[l+(2*s)].v[in_chunk_idx]; __syncthreads(); } /* * Subpolynomial of size 2 chunks */ s=Chunk::cells_in_chunk>>1; l=chunkIdx*2; for(unsigned int i = 0 ; i < 2 ; ++i){ d_a[l+1+i*half_d_a_len].v[in_chunk_idx]^= (d_a[l+1+i*half_d_a_len].v[in_chunk_idx]>>(Chunk::elements_in_chunk>>1)); d_a[l+i*half_d_a_len].v[in_chunk_idx]^= (d_a[l+1+i*half_d_a_len].v[in_chunk_idx]<<(Chunk::elements_in_chunk>>1)); } --dim; __syncthreads(); } unsigned int i = dim; /* * Code duplicated to prevent additional register usage for loop counter, assuming that: * Chunk::log_cells_in_chunk = Chunk::log_elements_in_chunk+1. */ for(unsigned int j = 0 ; j < 4 ; ++j ) { chunk_cell_t cell = d_a[chunkIdx+j*(half_d_a_len>>1)].v[in_chunk_idx]; for(; dim >=2 ; --dim){ cell ^= (cell & (d_chunk_cell_mask[dim]<<((1<<dim)-(1<<(dim-2)))))>>(1<<(dim-2)); cell ^= (cell & (d_chunk_cell_mask[dim]<<(1<<(dim-1))))>>(1<<(dim-2)); } d_a[chunkIdx+j*(half_d_a_len>>1)].v[in_chunk_idx] = cell ; dim = i; } __syncthreads(); } /* * This is the partition operation * 1)d_a - the shmem ptr for the polynomial. * 2) dim - the dimension of partition. * 3) idx - the thread index. * 4) chunk_idx - (idx >> Chunk::log_elements_in_chunk). * 5) in_chunk_idx - (idx % Chunk::elements_in_chunk). * 6) d_a_len - log length of subpoly in chunks. */ __device__ void InTB_partition( Chunk* const d_a, const len_t dim, const idx_t idx, const idx_t chunk_idx, const idx_t in_chunk_idx, const len_t d_a_len, const len_t half_d_a_len){ /* * Assumes: * Chunk::log_cells_in_chunk = 1+ Chunk::log_elements_in_chunk */ idx_t i=2; chunk_cell_t ans[2]; for(unsigned int j = 0 ; j < 2 ; ++j){ ans[0]= d_a[chunk_idx + j*half_d_a_len].v[in_chunk_idx]; ans[1]=d_a[chunk_idx + j*half_d_a_len].v[in_chunk_idx + (Chunk::cells_in_chunk>>1)]; for(i=2; i <= Chunk::log_elements_in_chunk && i<=dim ; ++i){ ans[0] = (ans[0] & d_chunk_cell_mask[i]) | (ans[0] & (d_chunk_cell_mask[i]<<((1<<(i))-(1<<(i-2))))) | (ans[0] & (d_chunk_cell_mask[i]<<(1<<(i-1))))>>(1<<(i-2)) | (ans[0] & (d_chunk_cell_mask[i]<<(1<<(i-2))))<<(1<<(i-2)); ans[1] = (ans[1] & d_chunk_cell_mask[i]) | (ans[1] & (d_chunk_cell_mask[i]<<((1<<(i))-(1<<(i-2))))) | (ans[1] & (d_chunk_cell_mask[i]<<(1<<(i-1))))>>(1<<(i-2)) | (ans[1] & (d_chunk_cell_mask[i]<<(1<<(i-2))))<<(1<<(i-2)); } d_a[chunk_idx + j*half_d_a_len].v[in_chunk_idx]=ans[0]; d_a[chunk_idx + j*half_d_a_len].v[in_chunk_idx+(Chunk::cells_in_chunk>>1)] = ans[1]; } __syncthreads(); if(dim<=Chunk::log_elements_in_chunk){ return; } idx_t s = (idx& andMask(log_max_block_size))>> Chunk::log_cells_in_chunk; s<<=1; idx_t t = idx & andMask(Chunk::log_cells_in_chunk); for(unsigned int j = 0 ; j < 2 ; ++j){ ans[0]=d_a[s + j*half_d_a_len].v[t]; ans[1]=d_a[s+1 + j*half_d_a_len].v[t]; d_a[s+j*half_d_a_len].v[t] = (ans[0] & d_chunk_cell_mask[Chunk::log_elements_in_chunk+1]) | ((ans[1] & d_chunk_cell_mask[Chunk::log_elements_in_chunk+1])<<(Chunk::elements_in_chunk>>1)); d_a[s+1 + j*half_d_a_len].v[t] = (ans[1] & (d_chunk_cell_mask[Chunk::log_elements_in_chunk+1]<< (Chunk::elements_in_chunk>>1)) )| ((ans[0] >> (Chunk::elements_in_chunk>>1)) & d_chunk_cell_mask[Chunk::log_elements_in_chunk+1]); } __syncthreads(); if(dim<=Chunk::log_elements_in_chunk+1){ return; } ans[0] = d_a[chunk_idx].v[in_chunk_idx]; ans[1] = d_a[chunk_idx].v[in_chunk_idx+Chunk::elements_in_chunk]; chunk_cell_t ans_u[2]; ans_u[0] = d_a[chunk_idx+half_d_a_len].v[in_chunk_idx]; ans_u[1] = d_a[chunk_idx+ half_d_a_len].v[in_chunk_idx+Chunk::elements_in_chunk]; __syncthreads(); s = (chunk_idx >> d_a_len)<<(d_a_len); t = chunk_idx & andMask(d_a_len); if(t & 1){ d_a[s+(t>>1) + (1<<(d_a_len-1))].v[in_chunk_idx] = ans[0]; d_a[s+(t>>1) + (1<<(d_a_len-1))].v[in_chunk_idx+Chunk::elements_in_chunk] = ans[1]; } else { d_a[s+(t>>1)].v[in_chunk_idx] = ans[0]; d_a[s+(t>>1)].v[in_chunk_idx+Chunk::elements_in_chunk] = ans[1]; } s=((chunk_idx+half_d_a_len) >> d_a_len)<<d_a_len; t=(chunk_idx+half_d_a_len)&andMask(d_a_len); if(t & 1){ d_a[s+(t>>1) + (1<<(d_a_len-1))].v[in_chunk_idx] = ans_u[0]; d_a[s+(t>>1) + (1<<(d_a_len-1))].v[in_chunk_idx+Chunk::elements_in_chunk] = ans_u[1]; } else { d_a[s+(t>>1)].v[in_chunk_idx] = ans_u[0]; d_a[s+(t>>1)].v[in_chunk_idx+Chunk::elements_in_chunk] = ans_u[1]; } __syncthreads(); } /* * This is the linear evaluation phase */ __device__ void InTB_LinearEvaluation( Chunk* const d_a, const idx_t idx , const idx_t chunkIdx , const idx_t in_chunk_idx, const len_t half_d_a_len){ chunk_cell_t load[4]; load[0] = d_a[chunkIdx].v[in_chunk_idx]; load[1] = d_a[chunkIdx].v[in_chunk_idx+Chunk::elements_in_chunk]; load[2] = d_a[half_d_a_len+chunkIdx].v[in_chunk_idx]; load[3] = d_a[half_d_a_len+chunkIdx].v[in_chunk_idx+Chunk::elements_in_chunk]; DUP_ODD_BITS(load[0]); DUP_ODD_BITS(load[1]); DUP_EVEN_BITS(d_a[chunkIdx].v[in_chunk_idx]); DUP_EVEN_BITS(d_a[chunkIdx].v[in_chunk_idx+Chunk::elements_in_chunk]); __syncthreads(); InTB_preMulExpand(d_a,d_linear_mul,1,idx,chunkIdx,in_chunk_idx); InTB_Mult(d_a+(chunkIdx<<1),in_chunk_idx); InTB_postMulShrink(d_a,idx,chunkIdx,in_chunk_idx); d_a[chunkIdx].v[in_chunk_idx] ^= load[0]; d_a[chunkIdx].v[in_chunk_idx + Chunk::elements_in_chunk] ^= load[1]; __syncthreads(); load[0]=load[2]; load[2]=d_a[chunkIdx].v[in_chunk_idx]; d_a[chunkIdx].v[in_chunk_idx] = load[0]; load[1]=load[3]; load[3]=d_a[chunkIdx].v[in_chunk_idx+Chunk::elements_in_chunk]; d_a[chunkIdx].v[in_chunk_idx+Chunk::elements_in_chunk] = load[1]; DUP_ODD_BITS(load[0]); DUP_ODD_BITS(load[1]); DUP_EVEN_BITS(d_a[chunkIdx].v[in_chunk_idx]); DUP_EVEN_BITS(d_a[chunkIdx].v[in_chunk_idx+Chunk::elements_in_chunk]); __syncthreads(); InTB_preMulExpand(d_a,d_linear_mul,1,idx,chunkIdx,in_chunk_idx); InTB_Mult(d_a+(chunkIdx<<1),in_chunk_idx); InTB_postMulShrink(d_a,idx,chunkIdx,in_chunk_idx); d_a[chunkIdx].v[in_chunk_idx] ^= load[0]; d_a[chunkIdx].v[in_chunk_idx + Chunk::elements_in_chunk] ^= load[1]; __syncthreads(); d_a[chunkIdx + half_d_a_len].v[in_chunk_idx] = d_a[chunkIdx].v[in_chunk_idx]; d_a[chunkIdx + half_d_a_len].v[in_chunk_idx + Chunk::elements_in_chunk] = d_a[chunkIdx].v[in_chunk_idx + Chunk::elements_in_chunk]; d_a[chunkIdx].v[in_chunk_idx]=load[2]; d_a[chunkIdx].v[in_chunk_idx + Chunk::elements_in_chunk]=load[3]; __syncthreads(); } /* * This is the WFromUV operation in chunk (i.e. dim<5) * dim = dim of subspace. */ __device__ void InTB_WFromUV_inChunk( Chunk* const d_a, const Chunk * const subspace, const len_t dim, const idx_t idx, const idx_t chunkIdx, const idx_t in_chunk_idx, const len_t half_d_a_len){ /* * 1)Backup the WHOLE cell. * 2)Multiply by subspace chunk (will nullify lower top). * 3)Xor by itself when shifting right subspace-times. * 4)Xor the load. */ chunk_cell_t load[4]; /* * Just backing up upper half */ load[2] = d_a[chunkIdx + half_d_a_len].v[in_chunk_idx]; load[3] = d_a[chunkIdx + half_d_a_len].v[in_chunk_idx + Chunk::elements_in_chunk]; load[0] = d_a[chunkIdx].v[in_chunk_idx] ; load[1] = d_a[chunkIdx].v[in_chunk_idx+Chunk::elements_in_chunk] ; load[0] ^= ((load[0]&(d_alter_mask[dim+1]))<<(1<<dim)); load[1] ^= ((load[1]&(d_alter_mask[dim+1]))<<(1<<dim)); __syncthreads(); InTB_preMulExpand(d_a,subspace,1,idx,chunkIdx,in_chunk_idx); InTB_Mult(d_a+(chunkIdx<<1),in_chunk_idx); InTB_postMulShrink(d_a,idx,chunkIdx,in_chunk_idx); d_a[chunkIdx].v[in_chunk_idx] ^= (load[0]^(d_a[chunkIdx].v[in_chunk_idx]>>(1<<dim))); d_a[chunkIdx].v[in_chunk_idx + Chunk::elements_in_chunk] ^= (load[1]^(d_a[chunkIdx].v[in_chunk_idx + Chunk::elements_in_chunk]>>(1<<dim))); __syncthreads(); /* * Switching from calculating lower half , into upper half. */ load[0] = load[2]; load[2] = d_a[chunkIdx].v[in_chunk_idx]; d_a[chunkIdx].v[in_chunk_idx] = load[0]; load[1] = load[3]; load[3] = d_a[chunkIdx].v[in_chunk_idx + Chunk::elements_in_chunk]; d_a[chunkIdx].v[in_chunk_idx + Chunk::elements_in_chunk] = load[1]; load[0] ^= ((load[0]&(d_alter_mask[dim+1]))<<(1<<dim)); load[1] ^= ((load[1]&(d_alter_mask[dim+1]))<<(1<<dim)); __syncthreads(); InTB_preMulExpand(d_a,subspace,1,idx,chunkIdx,in_chunk_idx); InTB_Mult(d_a+(chunkIdx<<1),in_chunk_idx); InTB_postMulShrink(d_a,idx,chunkIdx,in_chunk_idx); d_a[chunkIdx].v[in_chunk_idx] ^= (load[0]^(d_a[chunkIdx].v[in_chunk_idx]>>(1<<dim))); d_a[chunkIdx].v[in_chunk_idx + Chunk::elements_in_chunk] ^= (load[1]^(d_a[chunkIdx].v[in_chunk_idx + Chunk::elements_in_chunk]>>(1<<dim))); __syncthreads(); d_a[chunkIdx + half_d_a_len].v[in_chunk_idx] = d_a[chunkIdx].v[in_chunk_idx]; d_a[chunkIdx + half_d_a_len].v[in_chunk_idx + Chunk::elements_in_chunk] = d_a[chunkIdx].v[in_chunk_idx + Chunk::elements_in_chunk]; d_a[chunkIdx].v[in_chunk_idx] = load[2]; d_a[chunkIdx].v[in_chunk_idx + Chunk::elements_in_chunk] = load[3]; __syncthreads(); } /* * This is when the SUBSPACE dim it >=5. * 1) d_a - shmem ptr. * 2) subspace - global mem ptr of length (1<<(dim-1-Chunk::log_elements_in_chunk)) to a subspace of dim (dim-1). * 3) dim - dimension of subspace. * 4) idx - threadIdx. */ __device__ void InTB_WFromUV_outChunk( Chunk* const d_a, const Chunk* const subspace, const len_t dim, const idx_t idx, const len_t half_d_a_len){ /* * 1) Backup everything on register. * 2) Multiply by subspace only bottom halves of the sub-polynomials. */ idx_t chunkIdx64[2]; idx_t original_idx[2]; original_idx[0] = ((idx& andMask(log_max_block_size)) >> Chunk::log_cells_in_chunk); original_idx[1] = original_idx[0]+(max_block_size>>Chunk::log_cells_in_chunk); chunkIdx64[0] = (((original_idx[0])>>(dim-Chunk::log_elements_in_chunk))<<(dim+1-Chunk::log_elements_in_chunk)) + (original_idx[0] & andMask(dim-Chunk::log_elements_in_chunk)); chunkIdx64[1] = (((original_idx[1])>>(dim-Chunk::log_elements_in_chunk))<<(dim+1-Chunk::log_elements_in_chunk)) + (original_idx[1] & andMask(dim-Chunk::log_elements_in_chunk)); idx_t in_chunk_idx64 = (idx) & andMask(Chunk::log_cells_in_chunk); chunk_cell_t load[4]; //Backing up everything on local memory. load[0]= d_a[chunkIdx64[0]].v[in_chunk_idx64]; load[1]= d_a[chunkIdx64[0] + (1<<(dim-Chunk::log_elements_in_chunk))].v[in_chunk_idx64]; load[2] = d_a[chunkIdx64[1]].v[in_chunk_idx64]; load[3] = d_a[chunkIdx64[1]+ (1<<(dim-Chunk::log_elements_in_chunk))].v[in_chunk_idx64]; __syncthreads(); //Preparing data for multiplication. d_a[(original_idx[0])<<1].v[in_chunk_idx64]=load[1]; d_a[(original_idx[1])<<1].v[in_chunk_idx64]=load[3]; //If my chunk should be multiplied, I load the relevant subspace chunk to the next chunk, so they will be multiplied. //My chunk should be multiplied if my chunkIdx/(sizeOfSubspace in Chunk) is odd. //chunkIdx is the same for all threads in the warp - so no divergence is possible. d_a[(original_idx[0]<<1)+1].v[in_chunk_idx64]= subspace[original_idx[0] & andMask(dim-Chunk::log_elements_in_chunk)].v[in_chunk_idx64]; d_a[(original_idx[1]<<1)+1].v[in_chunk_idx64]= subspace[original_idx[1] & andMask(dim-Chunk::log_elements_in_chunk)].v[in_chunk_idx64]; __syncthreads(); InTB_Mult(d_a+(((idx&andMask(log_max_block_size))>>Chunk::log_threads_in_chunk)<<1),idx&andMask(Chunk::log_threads_in_chunk)); chunk_cell_t t[2]; t[0]= d_a[original_idx[0]<<1].v[in_chunk_idx64]; t[1]= d_a[original_idx[1]<<1].v[in_chunk_idx64]; __syncthreads(); load[1]^=load[0]; load[3]^=load[2]; d_a[chunkIdx64[0]+(1<<(dim-Chunk::log_elements_in_chunk))].v[in_chunk_idx64] = t[0] ^ load[1]; d_a[chunkIdx64[0]].v[in_chunk_idx64] = t[0] ^ load[0]; d_a[chunkIdx64[1]+(1<<(dim-Chunk::log_elements_in_chunk))].v[in_chunk_idx64] = t[1] ^ load[3]; d_a[chunkIdx64[1]].v[in_chunk_idx64] = t[1] ^ load[2]; __syncthreads(); } __global__ void //__launch_bounds__(1<<log_max_block_size) k_gpuFFT_InTB(Chunk* const d_a, Chunk** subspaces, Chunk** exps, len_t dim, const len_t poly_len){ const len_t half_d_a_len = (max_block_size>>Chunk::log_threads_in_chunk); __shared__ Chunk s[half_d_a_len<<1]; const idx_t idx = threadIdx.x + blockIdx.x*blockDim.x; const idx_t poly_idx = (idx >> log_max_block_size)<<(log_max_block_size+1-Chunk::log_elements_in_chunk); const idx_t chunkIdx = (idx & andMask(log_max_block_size))>> Chunk::log_elements_in_chunk; const idx_t in_chunk_idx = idx & andMask(Chunk::log_elements_in_chunk); if(poly_idx+chunkIdx < poly_len){ s[chunkIdx].v[in_chunk_idx] = d_a[(poly_idx) + chunkIdx].v[in_chunk_idx]; s[chunkIdx].v[in_chunk_idx+Chunk::elements_in_chunk] = d_a[(poly_idx) + chunkIdx].v[in_chunk_idx+Chunk::elements_in_chunk]; } if(poly_idx+chunkIdx+half_d_a_len < poly_len){ s[chunkIdx+half_d_a_len].v[in_chunk_idx] = d_a[(poly_idx) + chunkIdx + half_d_a_len].v[in_chunk_idx]; s[chunkIdx+half_d_a_len].v[in_chunk_idx+Chunk::elements_in_chunk] = d_a[(poly_idx) + chunkIdx + half_d_a_len].v[in_chunk_idx+Chunk::elements_in_chunk]; } __syncthreads(); idx_t i = dim; for(; i > Chunk::log_elements_in_chunk ; --i){ InTB_MultiExp(s,exps[i],1<<(i-Chunk::log_elements_in_chunk),idx,chunkIdx,in_chunk_idx,half_d_a_len); //The problem is in TaylorExp - s is nullified! InTB_TaylorExp(s,i,idx,idx & andMask(Chunk::log_cells_in_chunk),(idx&andMask(log_max_block_size)) >> Chunk::log_cells_in_chunk,half_d_a_len); InTB_partition(s,i,idx,chunkIdx,in_chunk_idx,(i-Chunk::log_elements_in_chunk),half_d_a_len); } for(; i >= 2; --i){ InTB_MultiExp(s,exps[i],1,idx,chunkIdx,in_chunk_idx,half_d_a_len); InTB_TaylorExp(s,i,idx,idx & andMask(Chunk::log_cells_in_chunk),(idx&andMask(log_max_block_size)) >> Chunk::log_cells_in_chunk,half_d_a_len); InTB_partition(s,i,idx,chunkIdx,in_chunk_idx,0,half_d_a_len); } InTB_LinearEvaluation(s,idx,chunkIdx,in_chunk_idx,half_d_a_len); for(; i < Chunk::log_elements_in_chunk && i<dim ; ++i){ InTB_WFromUV_inChunk(s,subspaces[i],i,idx,chunkIdx,in_chunk_idx,half_d_a_len); } for( ; i < dim ; ++i){ InTB_WFromUV_outChunk(s,subspaces[i],i,idx,half_d_a_len); } if(poly_idx+chunkIdx < poly_len){ d_a[(poly_idx) + chunkIdx].v[in_chunk_idx] = s[chunkIdx].v[in_chunk_idx]; d_a[(poly_idx) + chunkIdx].v[in_chunk_idx+Chunk::elements_in_chunk] = s[chunkIdx].v[in_chunk_idx+Chunk::elements_in_chunk]; } if(poly_idx+chunkIdx+half_d_a_len < poly_len){ d_a[(poly_idx) + chunkIdx + half_d_a_len].v[in_chunk_idx] = s[chunkIdx+half_d_a_len].v[in_chunk_idx]; d_a[(poly_idx) + chunkIdx + half_d_a_len].v[in_chunk_idx+Chunk::elements_in_chunk] = s[chunkIdx+half_d_a_len].v[in_chunk_idx+Chunk::elements_in_chunk]; } } void GPU_FFT::fft_gpu_InTB(len_t dim, const FFT* const fft, Chunk* const p, len_t p_len){ len_t threads = p_len<<Chunk::log_elements_in_chunk; dim3 block(max_block_size,1,1); dim3 grid(sizeCiel(threads,max_block_size),1,1); Chunk* d_subspaces[log_max_block_size+2]; Chunk* d_exps[log_max_block_size+2]; Chunk** d_subspaces_allocated; Chunk** d_exps_allocated; cudaMalloc(&d_subspaces_allocated,sizeof(Chunk*)*(log_max_block_size+2)); cudaMalloc(&d_exps_allocated,sizeof(Chunk*)*(log_max_block_size+2)); len_t fftSize = fft->basis.getSize(); for(unsigned int i = 2 ; i <= dim && i<=Chunk::log_elements_in_chunk ; ++i){ cudaMalloc(&(d_subspaces[i-1]),sizeof(Chunk)); cudaMalloc(&(d_exps[i]),sizeof(Chunk)); cudaMemcpy(d_subspaces[i-1],fft->gpu_subspace[fftSize-i],sizeof(Chunk),cudaMemcpyHostToDevice); cudaMemcpy(d_exps[i],fft->gpu_exp[fftSize-i],sizeof(Chunk),cudaMemcpyHostToDevice); } for(unsigned int i = Chunk::log_elements_in_chunk+1 ; i <= dim ; ++i){ cudaMalloc(&(d_subspaces[i-1]),sizeof(Chunk)*(1<<(i-1-Chunk::log_elements_in_chunk))); cudaMalloc(&(d_exps[i]),sizeof(Chunk)*(1<<(i-Chunk::log_elements_in_chunk))); cudaMemcpy(d_subspaces[i-1],fft->gpu_subspace[fftSize-i],sizeof(Chunk)*(1<<(i-1-Chunk::log_elements_in_chunk)),cudaMemcpyHostToDevice); cudaMemcpy(d_exps[i],fft->gpu_exp[fftSize-i],sizeof(Chunk)*(1<<(i-Chunk::log_elements_in_chunk)),cudaMemcpyHostToDevice); } cudaMemcpy(d_subspaces_allocated,d_subspaces,sizeof(Chunk*)*(log_max_block_size+2),cudaMemcpyHostToDevice); cudaMemcpy(d_exps_allocated,d_exps,sizeof(Chunk*)*(log_max_block_size+2),cudaMemcpyHostToDevice); k_gpuFFT_InTB<<<grid,block>>>(p,d_subspaces_allocated,d_exps_allocated,dim,p_len); for(unsigned int i = 2 ; i<= dim ; ++i){ cudaFree(d_subspaces[i-1]); cudaFree(d_exps[i]); } cudaFree(d_subspaces_allocated); cudaFree(d_exps_allocated); } void GPU_FFT::fft_gpu(const FFT* const fft,Polynomial* P){ Chunk::setMod(); setUpConstantMemory(fft); Element* normalized_P = *P; len_t p_len = 1<<fft->basis.getSize(); len_t fixed_len = MAX((p_len),warp_size); if(p_len < fixed_len) { normalized_P = (Element*)malloc(sizeof(Element)*fixed_len); memset(normalized_P,0,sizeof(Element)*fixed_len); memcpy(normalized_P,*P,sizeof(Element)*p_len); } fixed_len >>= Chunk::log_elements_in_chunk; p_len = fixed_len; Elements_Chunk* d_p; Chunk* d_chunk_P; Chunk* d_chunk_P_cpy; Chunk* d_swap; cudaMalloc(&d_p,sizeof(Elements_Chunk)*(fixed_len)); cudaMalloc(&d_chunk_P,sizeof(Chunk)*(fixed_len)); cudaMemcpy(d_p,normalized_P,sizeof(Elements_Chunk)*(fixed_len),cudaMemcpyHostToDevice); Chunk::normalToChunk((Elements_Chunk*)d_p,d_chunk_P,fixed_len,false); cudaFree(d_p); cudaMalloc(&d_chunk_P_cpy,sizeof(Chunk)*(fixed_len)); len_t dim = fft->basis.getSize(); #ifdef __MEASURE #ifdef __GNUC__ timespec start,end; clock_gettime(CLOCK_REALTIME,&start); #endif //#ifdef __GNUC__ #endif //#ifdef __MEASURE /* * The Algorithm */ unsigned int i = dim; for(; i > 1 ; --i){ multiExponentiate_gpu(fft,d_chunk_P,p_len,i,d_chunk_P_cpy); taylorExpansion_gpu(fft,d_chunk_P,p_len,i); if(partition(d_chunk_P,d_chunk_P_cpy,p_len,i)){ d_swap = d_chunk_P; d_chunk_P=d_chunk_P_cpy; d_chunk_P_cpy = d_swap; } } // fft_gpu_InTB(i,fft,d_chunk_P,p_len); linearEvaluation(d_chunk_P,d_chunk_P_cpy,p_len); for(; i < dim ; ++i ){ cudaMemcpy(d_chunk_P_cpy,fft->gpu_subspace[dim-1-i],sizeof(Chunk)<<(MAX((int)i-(int)Chunk::log_elements_in_chunk,0)),cudaMemcpyHostToDevice); WFromUV(d_chunk_P,p_len,d_chunk_P_cpy,i); } /* * End of algorithm */ #ifdef __MEASURE #ifdef __GNUC__ clock_gettime(CLOCK_REALTIME,&end); double elapsed_secs = end.tv_sec - start.tv_sec + ((double) (end.tv_nsec - start.tv_nsec)) / ((double) 1000000000); std::cout << elapsed_secs << "GpU!!" <<std::endl; #endif //#ifdef __GNUC__ #endif //#ifdef __MEASURE cudaFree(d_chunk_P_cpy); cudaMalloc(&d_p,sizeof(Elements_Chunk)*(fixed_len)); Chunk::chunkToNormal(d_chunk_P,(Elements_Chunk*)d_p,fixed_len,false); cudaMemcpy(normalized_P,d_p,sizeof(Elements_Chunk)*(fixed_len),cudaMemcpyDeviceToHost); cudaFree(d_chunk_P); cudaFree(d_p); if(normalized_P != *P){ free(*P); *P=normalized_P; } } /* * This is the inverse FFT implementation */ void GPU_FFT::ifft_gpu(const FFT* const fft,Polynomial* P){ Chunk::setMod(); setUpConstantMemory(fft); Element* normalized_P = *P; len_t p_len = 1<<fft->basis.getSize(); len_t fixed_len = MAX((p_len),warp_size); if(p_len < fixed_len) { normalized_P = (Element*)malloc(sizeof(Element)*fixed_len); memset(normalized_P,0,sizeof(Element)*fixed_len); memcpy(normalized_P,*P,sizeof(Element)*p_len); } fixed_len >>= Chunk::log_elements_in_chunk; p_len = fixed_len; Elements_Chunk* d_p; Chunk* d_chunk_P; Chunk* d_chunk_P_cpy; Chunk* d_swap; cudaMalloc(&d_p,sizeof(Elements_Chunk)*(fixed_len)); cudaMalloc(&d_chunk_P,sizeof(Chunk)*(fixed_len)); cudaMemcpy(d_p,normalized_P,sizeof(Elements_Chunk)*(fixed_len),cudaMemcpyHostToDevice); Chunk::normalToChunk((Elements_Chunk*)d_p,d_chunk_P,fixed_len,false); cudaFree(d_p); cudaMalloc(&d_chunk_P_cpy,sizeof(Chunk)*(fixed_len)); len_t dim = fft->basis.getSize(); #ifdef __MEASURE #ifdef __GNUC__ timespec start,end; clock_gettime(CLOCK_REALTIME,&start); #endif //#ifdef __GNUC__ #endif //#ifdef __MEASURE /* * The Algorithm */ unsigned int i = dim-1; for(; i >= 1 ; --i ){ cudaMemcpy(d_chunk_P_cpy,fft->gpu_subspace[dim-1-i],sizeof(Chunk)<<(MAX((int)i-(int)Chunk::log_elements_in_chunk,0)),cudaMemcpyHostToDevice); UVFromW(d_chunk_P,p_len,d_chunk_P_cpy,i); } ilinearEvaluation(d_chunk_P,d_chunk_P_cpy,p_len); for(i=2; i <= dim ; ++i){ if(ipartition(d_chunk_P,d_chunk_P_cpy,p_len,i)){ d_swap = d_chunk_P; d_chunk_P=d_chunk_P_cpy; d_chunk_P_cpy = d_swap; } itaylorExpansion_gpu(fft,d_chunk_P,p_len,i); imultiExponentiate_gpu(fft,d_chunk_P,p_len,i,d_chunk_P_cpy); } /* * End of algorithm */ #ifdef __MEASURE #ifdef __GNUC__ clock_gettime(CLOCK_REALTIME,&end); double elapsed_secs = end.tv_sec - start.tv_sec + ((double) (end.tv_nsec - start.tv_nsec)) / ((double) 1000000000); std::cout << elapsed_secs << "GpU!!" <<std::endl; #endif //#ifdef __GNUC__ #endif //#ifdef __MEASURE cudaFree(d_chunk_P_cpy); cudaMalloc(&d_p,sizeof(Elements_Chunk)*(fixed_len)); Chunk::chunkToNormal(d_chunk_P,(Elements_Chunk*)d_p,fixed_len,false); cudaMemcpy(normalized_P,d_p,sizeof(Elements_Chunk)*(fixed_len),cudaMemcpyDeviceToHost); cudaFree(d_chunk_P); cudaFree(d_p); if(normalized_P != *P){ free(*P); *P=normalized_P; } } /* * No need for multiexponents multiplication, it's the same as the FFT only with the multiexponents of another element. */ /* * inverse-Taylor expansion */ __device__ void itaylor_smaller_than_chunk(Chunk* chunk,idx_t in_chunk_idx, len_t t_dim, len_t p_len, idx_t idx){ if(idx >= ((p_len) << (Chunk::log_cells_in_chunk))) return; //Performs the rest of the expansion. chunk_cell_t cell = chunk->v[in_chunk_idx]; for(len_t i = 2; i<=t_dim && i <=Chunk::log_elements_in_chunk ; ++i){ cell ^= (cell & (d_chunk_cell_mask[i]<<(1<<(i-1))))>>(1<<(i-2)); cell ^= (cell & (d_chunk_cell_mask[i]<<((1<<i)-(1<<(i-2)))))>>(1<<(i-2)); } chunk->v[in_chunk_idx]= cell; } __global__ void k_itaylorExpansion_iteration_large(Chunk* d_chunk_P , len_t p_len , len_t t_dim,len_t i){ idx_t idx = threadIdx.x + blockIdx.x*blockDim.x; idx_t in_chunk_idx = idx & (Chunk::cells_in_chunk-1); /* * flag = true if current sub-polynomial fits in a thread block. */ bool flag = (i-Chunk::log_elements_in_chunk <= log_max_block_size-Chunk::log_cells_in_chunk + 2); if(idx >= ((p_len) << (Chunk::log_cells_in_chunk-2))) return; do { len_t sub_len = (1<<(i-Chunk::log_elements_in_chunk)); idx_t chunk_idx = idx >> (Chunk::log_cells_in_chunk); chunk_idx = (chunk_idx /(sub_len>>2))*(sub_len) + ((chunk_idx) & ((sub_len>>2)-1)); d_chunk_P[(sub_len>>2) + chunk_idx ].v[in_chunk_idx] ^= d_chunk_P[(sub_len>>1) + chunk_idx].v[in_chunk_idx]; d_chunk_P[(sub_len>>1) + chunk_idx ].v[in_chunk_idx] ^= d_chunk_P[(3*(sub_len>>2)) + chunk_idx].v[in_chunk_idx]; if(flag) __syncthreads(); ++i; } while (i-Chunk::log_elements_in_chunk <= log_max_block_size-Chunk::log_cells_in_chunk + 2 && i<=t_dim); //If number of threads needed is less than a thread block - we can just continue! } __global__ void k_itaylorExpansion_iteration_twoChunks(Chunk* d_chunk_P, len_t p_len, len_t t_dim){ idx_t idx = threadIdx.x + blockIdx.x*blockDim.x; idx_t in_chunk_idx = idx & (Chunk::cells_in_chunk-1); if(idx >= ((p_len) << (Chunk::log_cells_in_chunk-1))) return; idx_t chunk_idx= (idx >> (Chunk::log_cells_in_chunk) ) << 1; d_chunk_P[(chunk_idx)].v[in_chunk_idx]^= (d_chunk_P[(chunk_idx)+1].v[in_chunk_idx]<<(Chunk::elements_in_chunk>>1)); d_chunk_P[(chunk_idx)+1].v[in_chunk_idx]^= (d_chunk_P[(chunk_idx)+1].v[in_chunk_idx]>>(Chunk::elements_in_chunk>>1)); } __global__ void k_itaylorExpansion_iteration_singleChunk(Chunk* d_chunk_P, len_t p_len, len_t t_dim){ idx_t idx = threadIdx.x + blockIdx.x*blockDim.x; idx_t in_chunk_idx = idx & (Chunk::cells_in_chunk-1); //Performs the rest of the expansion. idx_t chunk_idx = (idx >> (Chunk::log_cells_in_chunk)); itaylor_smaller_than_chunk(d_chunk_P+chunk_idx,in_chunk_idx,t_dim,p_len,idx); } void itaylorExpansion_iteration(const FFT* fft, Chunk * d_chunk_P , len_t p_len , len_t t_dim,len_t i){ unsigned int threads; dim3 blockSize(max_block_size,1,1); if( i >= Chunk::log_elements_in_chunk + 2){ threads = p_len<<(Chunk::log_cells_in_chunk-2); dim3 gridSize(sizeCiel(threads,max_block_size),1,1); k_itaylorExpansion_iteration_large<<<gridSize,blockSize>>>(d_chunk_P,p_len,t_dim,i); } else if (i == Chunk::log_elements_in_chunk + 1){ threads = p_len <<(Chunk::log_cells_in_chunk-1); dim3 gridSize(sizeCiel(threads,max_block_size),1,1); k_itaylorExpansion_iteration_twoChunks<<<gridSize,blockSize>>>(d_chunk_P,p_len,t_dim); } else { threads = p_len << Chunk::log_cells_in_chunk; dim3 gridSize(sizeCiel(threads,max_block_size),1,1); k_itaylorExpansion_iteration_singleChunk<<<gridSize,blockSize>>>(d_chunk_P,p_len,t_dim); } // k_taylorExpansion_iteration<<<gridSize,blockSize>>>(d_chunk_P,p_len,t_dim); } void GPU_FFT::itaylorExpansion_gpu(const FFT* fft, Chunk* d_chunk_P, len_t p_len , len_t t_dim){ /* * The sub-polynomial requires more than a single thread block. */ itaylorExpansion_iteration(fft,d_chunk_P,p_len,t_dim,2); len_t i = Chunk::log_elements_in_chunk+1; if(t_dim>=i){ itaylorExpansion_iteration(fft,d_chunk_P,p_len,i,i); ++i; } if(t_dim>=i){ itaylorExpansion_iteration(fft,d_chunk_P,p_len,t_dim,i); i = log_max_block_size+Chunk::log_elements_in_chunk+3-Chunk::log_cells_in_chunk; } while(t_dim>=i){ itaylorExpansion_iteration(fft,d_chunk_P,p_len,t_dim,i); ++i; } } /* * inverse-partition */ /* * Input: * 1) d_chunk_p - The polynomial on device's memory. * 2) p_len - number of chunks in d_chunk_p. * 3) chunk_idx - the chunk current thread has to deal with. * 4) in_chunk_idx - the number of the cell the current thread deals with. * 5) t_dim - The dim of the original (input) subpolynomial. * * This function performs what a single thread does when performing the partition function, on a single chunk. */ __device__ void ipartition_in_chunk(Chunk* d_chunk_p, len_t p_len , idx_t in_chunk_idx, len_t t_dim){ chunk_cell_t ans = d_chunk_p->v[in_chunk_idx]; for(unsigned int i = MIN(t_dim,Chunk::log_elements_in_chunk) ; i >= 2 ; --i){ ans = (ans & d_chunk_cell_mask[i]) | (ans & (d_chunk_cell_mask[i]<<((1<<(i))-(1<<(i-2))))) | (ans & (d_chunk_cell_mask[i]<<(1<<(i-1))))>>(1<<(i-2)) | (ans & (d_chunk_cell_mask[i]<<(1<<(i-2))))<<(1<<(i-2)); } d_chunk_p->v[in_chunk_idx]=ans; } //__device__ void ipartition_two_chunks(Chunk* d_chunk_p, idx_t in_chunk_idx){ // chunk_cell_t ans[2]; // chunk_cell_t load[2]; // const chunk_cell_t mask = d_chunk_cell_mask[Chunk::log_elements_in_chunk+1]; // load[0]=d_chunk_p->v[in_chunk_idx]; // load[1]=d_chunk_p[1].v[in_chunk_idx]; // ans[0]=(load[0] & mask) | ((load[1] & mask)<<(Chunk::elements_in_chunk>>1)); // ans[1]=(load[1] & (mask<< (Chunk::elements_in_chunk>>1)) )| // ((load[0] >> (Chunk::elements_in_chunk>>1)) & mask); // d_chunk_p->v[in_chunk_idx] = ans[0]; // d_chunk_p[1].v[in_chunk_idx] = ans[1]; //} __device__ void ipartition_general(Chunk* d_chunk_p_src, Chunk* d_chunk_p_dst, idx_t chunk_idx, idx_t in_chunk_idx, idx_t t_dim){ if(chunk_idx & 1) d_chunk_p_dst[chunk_idx].v[in_chunk_idx] = d_chunk_p_src[(chunk_idx>>1) + (1<<(t_dim-1))].v[in_chunk_idx]; else d_chunk_p_dst[chunk_idx].v[in_chunk_idx] = d_chunk_p_src[chunk_idx>>1].v[in_chunk_idx]; } __global__ void k_ipartition_iteration_two_chunks(Chunk* d_chunk_p_src, len_t p_len){ idx_t idx = threadIdx.x + blockIdx.x * blockDim.x; idx_t chunk_idx = (idx >> Chunk::log_cells_in_chunk)<<1; idx_t in_chunk_idx = idx & andMask(Chunk::log_cells_in_chunk); if(chunk_idx >= p_len){ return; } partition_two_chunks(d_chunk_p_src+chunk_idx, in_chunk_idx); } __global__ void k_ipartition_iteration_in_chunk(Chunk* d_chunk_p_src, len_t p_len, len_t t_dim){ idx_t idx = threadIdx.x + blockIdx.x * blockDim.x; idx_t chunk_idx = idx >> Chunk::log_cells_in_chunk; idx_t in_chunk_idx = idx & andMask(Chunk::log_cells_in_chunk); if(chunk_idx >= p_len){ return; } ipartition_in_chunk(d_chunk_p_src+chunk_idx,p_len,in_chunk_idx,t_dim); } __global__ void k_ipartition_iteration_general(Chunk* d_chunk_p_src, Chunk* d_chunk_p_dst, len_t p_len, len_t t_dim){ idx_t idx = threadIdx.x + blockIdx.x * blockDim.x; idx_t chunk_idx = idx >> Chunk::log_cells_in_chunk; idx_t in_chunk_idx = idx & andMask(Chunk::log_cells_in_chunk); idx_t base = chunk_idx ^ (chunk_idx & andMask(t_dim-Chunk::log_elements_in_chunk)); chunk_idx &= andMask(t_dim-Chunk::log_elements_in_chunk); if(base+chunk_idx >= p_len) return; ipartition_general(d_chunk_p_src+base,d_chunk_p_dst+base, chunk_idx, in_chunk_idx,t_dim-Chunk::log_elements_in_chunk ); } /* * If t_dim > Chunk::log_elements_in_chunk+1 the result is written in dst, otherwise it will be written in src. */ bool GPU_FFT::ipartition(Chunk* d_chunk_p_src, Chunk* d_chunk_p_dst, len_t p_len, len_t t_dim){ len_t threads = p_len << Chunk::log_cells_in_chunk; dim3 blockSize(max_block_size,1,1); dim3 gridSize(sizeCiel(threads,max_block_size),1,1); bool flag = false; if(t_dim > Chunk::log_elements_in_chunk+1){ k_ipartition_iteration_general<<<gridSize,blockSize>>>(d_chunk_p_src,d_chunk_p_dst,p_len,t_dim); Chunk* swap; swap = d_chunk_p_src; d_chunk_p_src = d_chunk_p_dst; d_chunk_p_dst = swap; flag= true; } if(t_dim >= Chunk::log_elements_in_chunk +1){ dim3 gridSizeTwoChunks(sizeCiel(threads>>1,max_block_size),1,1); k_ipartition_iteration_two_chunks<<<gridSizeTwoChunks,blockSize>>>(d_chunk_p_src,p_len); } k_ipartition_iteration_in_chunk<<<gridSize,blockSize>>>(d_chunk_p_src,p_len,t_dim); return flag; } /* * inverse linear evaluation */ __global__ void k_icopy_and_shift_vec(Chunk* d_chunk_src, Chunk* d_chunk_dst, len_t p_len) { const idx_t shared_len = max_block_size>>Chunk::log_threads_in_chunk; __shared__ Chunk c_shared[shared_len<<1]; chunk_cell_t t; idx_t idx = blockDim.x*blockIdx.x+threadIdx.x; const idx_t chunk_idx = (idx >> Chunk::log_threads_in_chunk); if(chunk_idx >= p_len){ return; } const idx_t in_chunk_idx = idx & andMask(Chunk::log_threads_in_chunk); const idx_t shared_chunk_idx = ((idx & andMask(log_max_block_size)) >> (Chunk::log_threads_in_chunk)); Chunk* my_shared_chunk = c_shared+(shared_chunk_idx<<1); for(unsigned int i =0 ; i < Chunk::ord ;i+=warp_size){ my_shared_chunk->v[in_chunk_idx+i]=d_chunk_src[chunk_idx].v[in_chunk_idx+i]; my_shared_chunk[1].v[in_chunk_idx+i]=d_ilinear_mul->v[in_chunk_idx+i]; } chunk_cell_t tmp; #pragma unroll for(unsigned int i = 0 ; i < Element::ord ; i+=warp_size){ tmp = my_shared_chunk->v[in_chunk_idx+i]; t = (tmp& 0x55555555) ^ ((tmp & 0xaaaaaaaa)>>1); my_shared_chunk->v[in_chunk_idx+i] = t^(t<<1); d_chunk_dst[chunk_idx].v[in_chunk_idx+i] = (tmp & 0x55555555) ; } Chunk::clmul_by_chunk(my_shared_chunk[0],my_shared_chunk[1],in_chunk_idx,my_shared_chunk); Chunk::chunk_reduce(d_chunk_src+chunk_idx,my_shared_chunk,in_chunk_idx); for(unsigned int i = 0 ; i < Element::ord ; i+=warp_size){ d_chunk_src[chunk_idx].v[in_chunk_idx+i] ^= d_chunk_dst[chunk_idx].v[in_chunk_idx+i]; } return; } void GPU_FFT::ilinearEvaluation(Chunk* d_chunk_p,Chunk* d_chunk_p_cpy, len_t p_len) { len_t threads = p_len << Chunk::log_threads_in_chunk; dim3 blockSize(max_block_size,1,1); dim3 gridSize(sizeCiel(threads,max_block_size),1,1); k_icopy_and_shift_vec<<<gridSize,blockSize>>>(d_chunk_p,d_chunk_p_cpy,p_len); } /* * UVFromW */ __global__ void k_isubspaceMult_chunk_and_add(Chunk* d_a, Chunk* d_b , len_t log_elements_in_b ,len_t a_len ){ idx_t idx = blockDim.x*blockIdx.x+threadIdx.x; const idx_t chunk_idx = (idx >> Chunk::log_threads_in_chunk); if(chunk_idx >= a_len){ return; } const idx_t in_chunk_idx = idx & andMask(Chunk::log_threads_in_chunk); Chunk::clmul_by_chunk_bShuffle_ixor_mask(d_a[chunk_idx],*d_b,in_chunk_idx,d_a[chunk_idx],1<<log_elements_in_b,log_elements_in_b); } /* * Dim = Size of U. */ void GPU_FFT::UVFromW(Chunk* d_chunk_p, len_t p_len, Chunk* subspace, len_t dim){ if(dim>=Chunk::log_elements_in_chunk){ len_t threadsMul = p_len << (Chunk::log_threads_in_chunk-1); len_t threadsAdd = p_len << (Chunk::log_cells_in_chunk-1); dim3 block(multThreadsInBlock,1,1); dim3 gridMul(sizeCiel(threadsMul,multThreadsInBlock),1,1); dim3 gridAdd(sizeCiel(threadsAdd,multThreadsInBlock),1,1); k_subspaceAdd_general<<<gridAdd,block>>>(d_chunk_p,p_len,1<<(dim-Chunk::log_elements_in_chunk)); k_subspaceMult_general<<<gridMul,block>>>(d_chunk_p, subspace, (1<<(dim-Chunk::log_elements_in_chunk)),p_len); } else { len_t threadsMul = p_len << Chunk::log_threads_in_chunk; dim3 block(multThreadsInBlock,1,1); dim3 gridMul(sizeCiel(threadsMul,multThreadsInBlock),1,1); k_isubspaceMult_chunk_and_add<<<gridMul,block>>>(d_chunk_p,subspace, dim , p_len); } } } #endif
the_stack
#include <iterator> #include <cstdio> #include <cub/agent/agent_three_way_partition.cuh> #include <cub/config.cuh> #include <cub/device/dispatch/dispatch_scan.cuh> #include <cub/thread/thread_operators.cuh> #include <cub/util_device.cuh> #include <cub/util_math.cuh> #include <thrust/system/cuda/detail/core/triple_chevron_launch.h> CUB_NAMESPACE_BEGIN /****************************************************************************** * Kernel entry points *****************************************************************************/ template <typename AgentThreeWayPartitionPolicyT, typename InputIteratorT, typename FirstOutputIteratorT, typename SecondOutputIteratorT, typename UnselectedOutputIteratorT, typename NumSelectedIteratorT, typename ScanTileStateT, typename SelectFirstPartOp, typename SelectSecondPartOp, typename OffsetT> __launch_bounds__(int(AgentThreeWayPartitionPolicyT::BLOCK_THREADS)) __global__ void DeviceThreeWayPartitionKernel(InputIteratorT d_in, FirstOutputIteratorT d_first_part_out, SecondOutputIteratorT d_second_part_out, UnselectedOutputIteratorT d_unselected_out, NumSelectedIteratorT d_num_selected_out, ScanTileStateT tile_status_1, ScanTileStateT tile_status_2, SelectFirstPartOp select_first_part_op, SelectSecondPartOp select_second_part_op, OffsetT num_items, int num_tiles) { // Thread block type for selecting data from input tiles using AgentThreeWayPartitionT = AgentThreeWayPartition<AgentThreeWayPartitionPolicyT, InputIteratorT, FirstOutputIteratorT, SecondOutputIteratorT, UnselectedOutputIteratorT, SelectFirstPartOp, SelectSecondPartOp, OffsetT>; // Shared memory for AgentSelectIf __shared__ typename AgentThreeWayPartitionT::TempStorage temp_storage; // Process tiles AgentThreeWayPartitionT(temp_storage, d_in, d_first_part_out, d_second_part_out, d_unselected_out, select_first_part_op, select_second_part_op, num_items) .ConsumeRange(num_tiles, tile_status_1, tile_status_2, d_num_selected_out); } /** * @brief Initialization kernel for tile status initialization (multi-block) * * @tparam ScanTileStateT * Tile status interface type * * @tparam NumSelectedIteratorT * Output iterator type for recording the number of items selected * * @param[in] tile_state_1 * Tile status interface * * @param[in] tile_state_2 * Tile status interface * * @param[in] num_tiles * Number of tiles * * @param[out] d_num_selected_out * Pointer to the total number of items selected * (i.e., length of @p d_selected_out) */ template <typename ScanTileStateT, typename NumSelectedIteratorT> __global__ void DeviceThreeWayPartitionInitKernel(ScanTileStateT tile_state_1, ScanTileStateT tile_state_2, int num_tiles, NumSelectedIteratorT d_num_selected_out) { // Initialize tile status tile_state_1.InitializeStatus(num_tiles); tile_state_2.InitializeStatus(num_tiles); // Initialize d_num_selected_out if (blockIdx.x == 0) { if (threadIdx.x < 2) { d_num_selected_out[threadIdx.x] = 0; } } } /****************************************************************************** * Dispatch ******************************************************************************/ template <typename InputIteratorT, typename FirstOutputIteratorT, typename SecondOutputIteratorT, typename UnselectedOutputIteratorT, typename NumSelectedIteratorT, typename SelectFirstPartOp, typename SelectSecondPartOp, typename OffsetT> struct DispatchThreeWayPartitionIf { /***************************************************************************** * Types and constants ****************************************************************************/ using InputT = cub::detail::value_t<InputIteratorT>; using ScanTileStateT = cub::ScanTileState<OffsetT>; constexpr static int INIT_KERNEL_THREADS = 256; /***************************************************************************** * Tuning policies ****************************************************************************/ /// SM35 struct Policy350 { constexpr static int ITEMS_PER_THREAD = Nominal4BItemsToItems<InputT>(9); using ThreeWayPartitionPolicy = cub::AgentThreeWayPartitionPolicy<256, ITEMS_PER_THREAD, cub::BLOCK_LOAD_DIRECT, cub::LOAD_DEFAULT, cub::BLOCK_SCAN_WARP_SCANS>; }; /***************************************************************************** * Tuning policies of current PTX compiler pass ****************************************************************************/ using PtxPolicy = Policy350; // "Opaque" policies (whose parameterizations aren't reflected in the type signature) struct PtxThreeWayPartitionPolicyT : PtxPolicy::ThreeWayPartitionPolicy {}; /***************************************************************************** * Utilities ****************************************************************************/ /** * Initialize kernel dispatch configurations with the policies corresponding * to the PTX assembly we will use */ template <typename KernelConfig> CUB_RUNTIME_FUNCTION __forceinline__ static void InitConfigs( int ptx_version, KernelConfig &select_if_config) { if (CUB_IS_DEVICE_CODE) { #if CUB_INCLUDE_DEVICE_CODE (void)ptx_version; // We're on the device, so initialize the kernel dispatch configurations // with the current PTX policy select_if_config.template Init<PtxThreeWayPartitionPolicyT>(); #endif } else { #if CUB_INCLUDE_HOST_CODE // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version // (There's only one policy right now) (void)ptx_version; select_if_config.template Init<typename Policy350::ThreeWayPartitionPolicy>(); #endif } } /** * Kernel dispatch configuration. */ struct KernelConfig { int block_threads; int items_per_thread; int tile_items; template <typename PolicyT> CUB_RUNTIME_FUNCTION __forceinline__ void Init() { block_threads = PolicyT::BLOCK_THREADS; items_per_thread = PolicyT::ITEMS_PER_THREAD; tile_items = block_threads * items_per_thread; } }; /***************************************************************************** * Dispatch entrypoints ****************************************************************************/ template <typename ScanInitKernelPtrT, typename SelectIfKernelPtrT> CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch(void *d_temp_storage, std::size_t &temp_storage_bytes, InputIteratorT d_in, FirstOutputIteratorT d_first_part_out, SecondOutputIteratorT d_second_part_out, UnselectedOutputIteratorT d_unselected_out, NumSelectedIteratorT d_num_selected_out, SelectFirstPartOp select_first_part_op, SelectSecondPartOp select_second_part_op, OffsetT num_items, cudaStream_t stream, bool debug_synchronous, int /*ptx_version*/, ScanInitKernelPtrT three_way_partition_init_kernel, SelectIfKernelPtrT three_way_partition_kernel, KernelConfig three_way_partition_config) { cudaError error = cudaSuccess; do { // Get device ordinal int device_ordinal; if (CubDebug(error = cudaGetDevice(&device_ordinal))) { break; } // Number of input tiles int tile_size = three_way_partition_config.block_threads * three_way_partition_config.items_per_thread; int num_tiles = static_cast<int>(DivideAndRoundUp(num_items, tile_size)); // Specify temporary storage allocation requirements size_t allocation_sizes[2]; // bytes needed for tile status descriptors if (CubDebug(error = ScanTileStateT::AllocationSize(num_tiles, allocation_sizes[0]))) { break; } allocation_sizes[1] = allocation_sizes[0]; // Compute allocation pointers into the single storage blob (or compute // the necessary size of the blob) void* allocations[2] = {}; if (CubDebug(error = cub::AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) { break; } if (d_temp_storage == nullptr) { // Return if the caller is simply requesting the size of the storage // allocation break; } // Return if empty problem if (num_items == 0) { break; } // Construct the tile status interface ScanTileStateT tile_status_1; ScanTileStateT tile_status_2; if (CubDebug(error = tile_status_1.Init(num_tiles, allocations[0], allocation_sizes[0]))) { break; } if (CubDebug(error = tile_status_2.Init(num_tiles, allocations[1], allocation_sizes[1]))) { break; } // Log three_way_partition_init_kernel configuration int init_grid_size = CUB_MAX(1, DivideAndRoundUp(num_tiles, INIT_KERNEL_THREADS)); if (debug_synchronous) { _CubLog("Invoking three_way_partition_init_kernel<<<%d, %d, 0, %lld>>>()\n", init_grid_size, INIT_KERNEL_THREADS, reinterpret_cast<long long>(stream)); } // Invoke three_way_partition_init_kernel to initialize tile descriptors THRUST_NS_QUALIFIER::cuda_cub::launcher::triple_chevron( init_grid_size, INIT_KERNEL_THREADS, 0, stream ).doit(three_way_partition_init_kernel, tile_status_1, tile_status_2, num_tiles, d_num_selected_out); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) { break; } // Sync the stream if specified to flush runtime errors if (debug_synchronous) { if (CubDebug(error = cub::SyncStream(stream))) { break; } } // Get max x-dimension of grid int max_dim_x; if (CubDebug(error = cudaDeviceGetAttribute(&max_dim_x, cudaDevAttrMaxGridDimX, device_ordinal))) { break; } // Get grid size for scanning tiles dim3 scan_grid_size; scan_grid_size.z = 1; scan_grid_size.y = DivideAndRoundUp(num_tiles, max_dim_x); scan_grid_size.x = CUB_MIN(num_tiles, max_dim_x); // Log select_if_kernel configuration if (debug_synchronous) { // Get SM occupancy for select_if_kernel int range_select_sm_occupancy; if (CubDebug(error = MaxSmOccupancy( range_select_sm_occupancy, // out three_way_partition_kernel, three_way_partition_config.block_threads))) { break; } _CubLog("Invoking three_way_partition_kernel<<<{%d,%d,%d}, %d, 0, %lld>>>(), %d " "items per thread, %d SM occupancy\n", scan_grid_size.x, scan_grid_size.y, scan_grid_size.z, three_way_partition_config.block_threads, reinterpret_cast<long long>(stream), three_way_partition_config.items_per_thread, range_select_sm_occupancy); } // Invoke select_if_kernel THRUST_NS_QUALIFIER::cuda_cub::launcher::triple_chevron( scan_grid_size, three_way_partition_config.block_threads, 0, stream ).doit(three_way_partition_kernel, d_in, d_first_part_out, d_second_part_out, d_unselected_out, d_num_selected_out, tile_status_1, tile_status_2, select_first_part_op, select_second_part_op, num_items, num_tiles); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) { break; } // Sync the stream if specified to flush runtime errors if (debug_synchronous) { if (CubDebug(error = cub::SyncStream(stream))) { break; } } } while (0); return error; } /** * Internal dispatch routine */ CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch( void* d_temp_storage, std::size_t& temp_storage_bytes, InputIteratorT d_in, FirstOutputIteratorT d_first_part_out, SecondOutputIteratorT d_second_part_out, UnselectedOutputIteratorT d_unselected_out, NumSelectedIteratorT d_num_selected_out, SelectFirstPartOp select_first_part_op, SelectSecondPartOp select_second_part_op, OffsetT num_items, cudaStream_t stream, bool debug_synchronous) { cudaError error = cudaSuccess; do { // Get PTX version int ptx_version = 0; if (CubDebug(error = cub::PtxVersion(ptx_version))) { break; } // Get kernel kernel dispatch configurations KernelConfig select_if_config; InitConfigs(ptx_version, select_if_config); // Dispatch if (CubDebug(error = Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_first_part_out, d_second_part_out, d_unselected_out, d_num_selected_out, select_first_part_op, select_second_part_op, num_items, stream, debug_synchronous, ptx_version, DeviceThreeWayPartitionInitKernel<ScanTileStateT, NumSelectedIteratorT>, DeviceThreeWayPartitionKernel<PtxThreeWayPartitionPolicyT, InputIteratorT, FirstOutputIteratorT, SecondOutputIteratorT, UnselectedOutputIteratorT, NumSelectedIteratorT, ScanTileStateT, SelectFirstPartOp, SelectSecondPartOp, OffsetT>, select_if_config))) { break; } } while (0); return error; } }; CUB_NAMESPACE_END
the_stack
#include "SortArray.h" #include <iostream> #include <cmath> using namespace std; // Kernel 函数: _bitonicSortByAscendKer(双调升序排序) // 实现并行双调排序,按照升序排序。套用 template 模板以便对多类型数据进行处理。 template < typename Type > static __global__ void _bitonicSortByAscendKer( Type *inarray, // 输入数组。 Type *outarray, // 排序后的输出数组。 int length // 数组长度,必须是 2 的整数次方。 ); // Kernel 函数: _bitonicSortByDescendKer(双调降序排序) // 实现并行双调排序,按照降序排序。套用 template 模板以便对多类型数据进行处理。 template < typename Type > static __global__ void _bitonicSortByDescendKer( Type *inarray, // 输入数组。 Type *outarray, // 排序后的输出数组。 int length // 数组长度,必须是 2 的整数次方。 ); // Host 静态方法:_bitonicSort(并行双调排序模板函数) // 并行双调排序模板函数,CLASS 成员方法调用此 Host 函数模板。 template < typename Type > static __host__ int _bitonicSort( Type *inarray, // 输出数组 Type *outarray, // 输入数组 int ishost, // 判断输入和输出数组位置 int sortflag, // 排序标记 int length // 排序数组长度 ); // Kernel 函数: _oddEvenMergeSortByAscendKer(Batcher's 奇偶合并升序排序) // 实现并行 Batcher's 奇偶合并排序,按照升序排序。套用 template 模板以便对 // 多类型数据进行处理。 template < typename Type > static __global__ void _oddEvenMergeSortByAscendKer( Type *inarray, // 输入数组。 Type *outarray, // 排序后的输出数组。 int length, // 数组长度,必须是 2 的幂次方。 int tempp, // 输入参数。 int tempq // 输入参数。 ); // Kernel 函数: _oddEvenMergeSortByDescendKer(Batcher's 奇偶合并降序排序) // 实现并行 Batcher's 奇偶合并排序,按照降序排序。套用 template 模板以便对 // 多类型数据进行处理。 template < typename Type > static __global__ void _oddEvenMergeSortByDescendKer( Type *inarray, // 输入数组。 Type *outarray, // 排序后的输出数组。 int length, // 数组长度,必须是 2 的幂次方。 int tempp, // 输入参数。 int tempq // 输入参数。 ); // Host 静态方法:_oddEvenMergeSort(Batcher's 奇偶合并降序排序模板函数) // Batcher's 奇偶合并降序排序模板函数,CLASS 成员方法调用此 Host 函数模板。 template < typename Type > static __host__ int _oddEvenMergeSort( Type *inarray, // 输出数组 Type *outarray, // 输入数组 int ishost, // 判断输入和输出数组位置 int sortflag, // 排序标记 int length // 排序数组长度 ); // Kernel 函数: _shearSortRowAscKer(行升序排序) // 对二维数据矩阵的每一行进行双调排序。套用 template 模板以便对多类型数据 // 进行处理。 template < typename Type > static __global__ void _shearSortRowAscKer( Type *inarray, // 输入数组。 int lensec // 矩阵行数。 ); // Kernel 函数: _shearSortRowDesKer(行降序排序) // 对二维数据矩阵的每一行进行双调排序。套用 template 模板以便对多类型数据 // 进行处理。 template < typename Type > static __global__ void _shearSortRowDesKer( Type *inarray, // 输入数组。 int lensec // 矩阵行数。 ); // Kernel 函数: _shearSortColAscKer(列升序排序) // 对二维数据矩阵的每一列进行双调排序。套用 template 模板以便对多类型数据 // 进行处理。 template < typename Type > static __global__ void _shearSortColAscKer( Type *inarray, // 输入数组。 int length, // 矩阵列数。 int lensec // 矩阵行数。 ); // Kernel 函数: _shearSortColDesKer(列降序排序) // 对二维数据矩阵的每一列进行双调排序。套用 template 模板以便对多类型数据 // 进行处理。 template < typename Type > static __global__ void _shearSortColDesKer( Type *inarray, // 输入数组。 int length, // 矩阵列数。 int lensec // 矩阵行数。 ); // Kernel 函数: _shearToPosKer(转换数据形式) // 将一维数据转换成二维矩阵。套用 template 模板以便对多类型数据进行处理。 template < typename Type > static __global__ void _shearToPosKer( Type *inarray, // 输入数组。 Type *outarray, // 矩阵列数。 int lensec // 矩阵行数。 ); // Host 静态方法:_shearSortLoop(shear 排序核心函数) // shear 排序的核心函数,需要判断是升序还是降序。 template < typename Type > static __host__ int _shearSortLoop( Type *inarray, // 输入数组。 Type *outarray, // 输出数组。 int length, // 矩阵列数。 int lensec, // 矩阵行数。 int sortflag // 排序标识。 ); // Host 静态方法:_shearSort(并行 shear 排序模板函数) // 并行双调排序模板函数,CLASS 成员方法调用此 Host 函数模板。 template < typename Type > static __host__ int _shearSort( Type *inarray, // 输出数组 Type *outarray, // 输入数组 int ishost, // 判断输入和输出数组位置 int sortflag, // 排序标记 int length, // 排序数组长度 int lensec // 排序矩阵的宽度 ); // Kernel 函数: _bitonicSortByAscendKer(双调升序排序) template < typename Type > static __global__ void _bitonicSortByAscendKer(Type *inarray, Type *outarray, int length) { // 读取线程号。 int tid = threadIdx.x; // 声明共享内存,加快数据存取速度。 extern __shared__ unsigned char sharedascend[]; // 转化为模板类型的共享内存。 Type *shareddata = (Type *)sharedascend; // 将全局内存中的数组拷贝到共享内存了。 shareddata[tid] = inarray[tid]; __syncthreads(); int k, ixj, j; Type temp; // 并行双调排序,升序排序。 for (k = 2; k <= length; k <<= 1) { // 双调合并。 for (j = k >> 1; j > 0; j >>= 1) { // ixj 是与当前位置 tid 进行比较交换的位置。 ixj = tid ^ j; if (ixj > tid) { // 如果 (tid & k) == 0,按照升序交换两项。 if ((tid & k) == 0 && (shareddata[tid] > shareddata[ixj])) { // 交换数组项。 temp = shareddata[tid]; shareddata[tid] = shareddata[ixj]; shareddata[ixj] = temp; // 如果 (tid & k) == 0,按照降序交换两项。 } else if ((tid & k) != 0 && shareddata[tid] < shareddata[ixj]) { // 交换数组项。 temp = shareddata[tid]; shareddata[tid] = shareddata[ixj]; shareddata[ixj] = temp; } } __syncthreads(); } } // 将共享内存中的排序后的数组拷贝到全局内存中。 outarray[tid] = shareddata[tid]; } // Kernel 函数: _bitonicSortByDescendKer(双调降序排序) template < typename Type > static __global__ void _bitonicSortByDescendKer(Type *inarray, Type *outarray, int length) { // 读取线程号。 int tid = threadIdx.x; // 声明共享内存,加快数据存取速度。 extern __shared__ unsigned char shareddescend[]; // 转化为模板类型的共享内存。 Type *shareddata = (Type *)shareddescend; // 将全局内存中的数组拷贝到共享内存了。 shareddata[tid] = inarray[tid]; __syncthreads(); int k, ixj, j; Type temp; // 并行双调排序,降序排序。 for (k = 2; k <= length; k <<= 1) { // 双调合并。 for (j = k >> 1; j > 0; j >>= 1) { // ixj 是与当前位置 tid 进行比较交换的位置。 ixj = tid ^ j; if (ixj > tid) { // 如果 (tid & k) == 0,按照降序交换两项。 if ((tid & k) == 0 && (shareddata[tid] < shareddata[ixj])) { // 交换数组项。 temp = shareddata[tid]; shareddata[tid] = shareddata[ixj]; shareddata[ixj] = temp; // 如果 (tid & k) == 0,按照升序交换两项。 } else if ((tid & k) != 0 && shareddata[tid] > shareddata[ixj]) { // 交换数组项。 temp = shareddata[tid]; shareddata[tid] = shareddata[ixj]; shareddata[ixj] = temp; } } __syncthreads(); } } // 将共享内存中的排序后的数组拷贝到全局内存中。 outarray[tid] = shareddata[tid]; } // Host 静态方法:_bitonicSort(并行双调排序模板函数) template < typename Type > static __host__ int _bitonicSort(Type *inarray, Type *outarray, int ishost, int sortflag, int length) { // 检查输入输出参数是否为空。 if (inarray == NULL || outarray == NULL) return NULL_POINTER; // 如果输入输出数组在 Host 端。 if (ishost) { // 在 Device 上分配空间。一次申请所有空间,然后通过偏移索引各个数组。 cudaError_t cudaerrcode; Type *alldevicedata, *devinarray, *devoutarray; cudaerrcode = cudaMalloc((void **)&alldevicedata, 2 * length * sizeof (Type)); if (cudaerrcode != cudaSuccess) return CUDA_ERROR; // 通过偏移读取 Device 端内存空间。 devinarray = alldevicedata; devoutarray = alldevicedata + length; //将 Host 上的 inarray 拷贝到 Device 上的 devinarray 中。 cudaerrcode = cudaMemcpy(devinarray, inarray, length * sizeof (Type), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicedata); return CUDA_ERROR; } if (sortflag == SORT_ARRAY_TYPE_ASC) { // 双调升序排序。 _bitonicSortByAscendKer<Type><<< 1, length, length * sizeof (Type)>>>( devinarray, devoutarray, length); } else if (sortflag == SORT_ARRAY_TYPE_DESC) { // 双调降序排序。 _bitonicSortByDescendKer<Type><<< 1, length, length * sizeof (Type)>>>( devinarray, devoutarray, length); } // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicedata); return CUDA_ERROR; } //将 Device上的 devoutarray 拷贝到 Host上。 cudaerrcode = cudaMemcpy(outarray, devoutarray, length * sizeof (Type), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicedata); return CUDA_ERROR; } // 释放显存上的临时空间。 cudaFree(alldevicedata); // 如果输入输出数组在 Device 端。 } else { if (sortflag == SORT_ARRAY_TYPE_ASC) { // 双调升序排序。 _bitonicSortByAscendKer<Type><<< 1, length, length * sizeof (Type)>>>( inarray, outarray, length); } else if (sortflag == SORT_ARRAY_TYPE_DESC) { // 双调降序排序。 _bitonicSortByDescendKer<Type><<< 1, length, length * sizeof (Type)>>>( inarray, outarray, length); } // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; } return NO_ERROR; } // 成员方法:bitonicSort(并行双调排序) __host__ int SortArray::bitonicSort(int *inarray, int *outarray) { // 调用模板函数并返回。 return _bitonicSort(inarray, outarray, this->ishost, this->sortflag, this->length); } // 成员方法:bitonicSort(并行双调排序) __host__ int SortArray::bitonicSort(float *inarray, float *outarray) { // 调用模板函数并返回。 return _bitonicSort(inarray, outarray, this->ishost, this->sortflag, this->length); } // 成员方法:bitonicSort(并行双调排序) __host__ int SortArray::bitonicSort(unsigned char *inarray, unsigned char *outarray) { // 调用模板函数并返回。 return _bitonicSort(inarray, outarray, this->ishost, this->sortflag, this->length); } // 成员方法:bitonicSort(并行双调排序) __host__ int SortArray::bitonicSort(char *inarray, char *outarray) { // 调用模板函数并返回。 return _bitonicSort(inarray, outarray, this->ishost, this->sortflag, this->length); } // 成员方法:bitonicSort(并行双调排序) __host__ int SortArray::bitonicSort(double *inarray, double *outarray) { // 调用模板函数并返回。 return _bitonicSort(inarray, outarray, this->ishost, this->sortflag, this->length); } // Kernel 函数: _oddEvenMergeSortByAscendKer(Batcher's 奇偶合并升序排序) template < typename Type > static __global__ void _oddEvenMergeSortByAscendKer( Type *inarray, Type *outarray, int length, int tempp, int tempq) { // 读取线程号CUDA_ERROR。 int tid = threadIdx.x; // 声明共享内存,加快数据存取速度。 extern __shared__ unsigned char sharedoddascend[]; // 转化为模板类型的共享内存。 Type *shared = (Type *)sharedoddascend; shared[tid] = inarray[tid]; __syncthreads(); // 声明临时变量。 int p, q, r, d; Type temp; // 并行Batcher's 奇偶合并排序,升序排序。 for (p = tempp; p >= 1; p >>= 1) { // r 是标记位。 r = 0; // d 是步长。 d = p; for (q = tempq; q >= p; q >>= 1) { if ((tid < length - d) && ((tid & p) == r) && shared[tid] > shared[tid + d]) { // 交换数据项。 temp = shared[tid]; shared[tid] = shared[tid + d]; shared[tid + d] = temp; } d = q - p; r = p; __syncthreads(); } } // 将共享内存中的排序后的数组拷贝到全局内存中。 outarray[tid] = shared[tid]; } // Kernel 函数: _oddEvenMergeSortByDescendKer(Batcher's 奇偶合并降序排序) template < typename Type > static __global__ void _oddEvenMergeSortByDescendKer( Type *inarray, Type *outarray, int length, int tempp, int tempq) { // 读取线程号。 int tid = threadIdx.x; // 声明共享内存,加快数据存取速度。 extern __shared__ unsigned char sharedodddescend[]; // 转化为模板类型的共享内存。 Type *shared = (Type *)sharedodddescend; shared[tid] = inarray[tid]; __syncthreads(); // 声明临时变量。 int p , q, r, d; Type temp; // 并行 Batcher's 奇偶合并排序,降序排序。 for (p = tempp; p >= 1; p >>= 1) { // r 是标记位。 r = 0; // d 是步长。 d = p; for (q = tempq; q >= p; q >>= 1) { if ((tid < length - d) && ((tid & p) == r) && shared[tid] < shared[tid + d]) { // 交换数据项。 temp = shared[tid]; shared[tid] = shared[tid + d]; shared[tid + d] = temp; } d = q - p; r = p; __syncthreads(); } } // 将共享内存中的排序后的数组拷贝到全局内存中。 outarray[tid] = shared[tid]; } // Host 静态方法:_oddEvenMergeSort(Batcher's 奇偶合并降序排序模板函数) template < typename Type > static __host__ int _oddEvenMergeSort(Type *inarray, Type *outarray, int ishost, int sortflag, int length) { // 检查输入输出参数是否为空。 if (inarray == NULL || outarray == NULL) return NULL_POINTER; // 奇偶合并排序参数。 int t, tempp, tempq; t = log((float)length) / log(2.0f); tempp = 1 << (t - 1); tempq = 1 << (t - 1); // 如果输入输出数组在 Host 端。 if (ishost) { // 在 Device 上分配空间。一次申请所有空间,然后通过偏移索引各个数组。 cudaError_t cudaerrcode; Type *alldevicedata, *devinarray, *devoutarray; cudaerrcode = cudaMalloc((void **)&alldevicedata, 2 * length * sizeof (Type)); if (cudaerrcode != cudaSuccess) return CUDA_ERROR; // 通过偏移读取 Device 端内存空间。 devinarray = alldevicedata; devoutarray = alldevicedata + length; //将 Host 上的 inarray 拷贝到 Device 上的 devinarray 中。 cudaerrcode = cudaMemcpy(devinarray, inarray, length * sizeof (Type), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicedata); return CUDA_ERROR; } if (sortflag == SORT_ARRAY_TYPE_ASC) { // Batcher's 奇偶升序排序。 _oddEvenMergeSortByAscendKer<Type><<< 1, length, length * sizeof (Type)>>>( devinarray, devoutarray, length, tempp, tempq); } else if (sortflag == SORT_ARRAY_TYPE_DESC) { // Batcher's 奇偶降序排序。 _oddEvenMergeSortByDescendKer<Type><<< 1, length, length * sizeof (Type)>>>( devinarray, devoutarray, length, tempp, tempq); } // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicedata); return CUDA_ERROR; } //将 Device上的 devoutarray 拷贝到 Host上。 cudaerrcode = cudaMemcpy(outarray, devoutarray, length * sizeof (Type), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicedata); return CUDA_ERROR; } // 释放显存上的临时空间。 cudaFree(alldevicedata); // 如果输入输出数组在 Device 端。 } else { if (sortflag == SORT_ARRAY_TYPE_ASC) { // Batcher's 奇偶升序排序。 _oddEvenMergeSortByAscendKer<Type><<< 1, length, length * sizeof (Type)>>>( inarray, outarray, length, tempp, tempq); } else if (sortflag == SORT_ARRAY_TYPE_DESC) { // Batcher's 奇偶降序排序。 _oddEvenMergeSortByDescendKer<Type><<< 1, length, length * sizeof (Type)>>>( inarray, outarray, length, tempp, tempq); } // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; } return NO_ERROR; } // 成员方法:oddEvenMergeSort(并行 Batcher's 奇偶合并排序) __host__ int SortArray::oddEvenMergeSort(int *inarray, int *outarray) { // 调用模板函数并返回。 return _oddEvenMergeSort(inarray, outarray, this->ishost, this->sortflag, this->length); } // 成员方法:oddEvenMergeSort(并行 Batcher's 奇偶合并排序) __host__ int SortArray::oddEvenMergeSort(float *inarray, float *outarray) { // 调用模板函数并返回。 return _oddEvenMergeSort(inarray, outarray, this->ishost, this->sortflag, this->length); } // 成员方法:oddEvenMergeSort(并行 Batcher's 奇偶合并排序) __host__ int SortArray::oddEvenMergeSort(unsigned char *inarray, unsigned char *outarray) { // 调用模板函数并返回。 return _oddEvenMergeSort(inarray, outarray, this->ishost, this->sortflag, this->length); } // 成员方法:oddEvenMergeSort(并行 Batcher's 奇偶合并排序) __host__ int SortArray::oddEvenMergeSort(char *inarray, char *outarray) { // 调用模板函数并返回。 return _oddEvenMergeSort(inarray, outarray, this->ishost, this->sortflag, this->length); } // 成员方法:oddEvenMergeSort(并行 Batcher's 奇偶合并排序) __host__ int SortArray::oddEvenMergeSort(double *inarray, double *outarray) { // 调用模板函数并返回。 return _oddEvenMergeSort(inarray, outarray, this->ishost, this->sortflag, this->length); } // Kernel 函数: _shearSortRowAscKer(行升序排序) template < typename Type > static __global__ void _shearSortRowAscKer(Type *inarray, int lensec) { // 读取线程号和块号。 int cid = threadIdx.x; int rid = blockIdx.x; // 将全局内存中的数组拷贝到共享内存了。 extern __shared__ unsigned char sharedrowasc[]; // 转化为模板类型的共享内存。 Type *shared = (Type *)sharedrowasc; if (cid < lensec) shared[cid] = inarray[rid * lensec + cid]; __syncthreads(); // 声明临时变量。 int ixj; Type temp; // 偶数行升序排序。 if (rid % 2 == 0) { for (int k = 2; k <= lensec; k <<= 1) { // 双调合并。 for (int j = k >> 1; j > 0; j >>= 1) { // ixj 是与当前位置 cid 进行比较交换的位置。 ixj = cid ^ j; if (ixj > cid) { // 如果 (cid & k) == 0,按照升序交换两项。 if ((cid & k) == 0 && (shared[cid] > shared[ixj])) { // 交换数组项。 temp = shared[cid]; shared[cid] = shared[ixj]; shared[ixj] = temp; // 如果 (cid & k) == 0,按照降序交换两项。 } else if ((cid & k) != 0 && shared[cid] < shared[ixj]) { // 交换数组项。 temp = shared[cid]; shared[cid] = shared[ixj]; shared[ixj] = temp; } } __syncthreads(); } } // 奇数行降序排序。 } else { for (int k = 2; k <= lensec; k <<= 1) { // 双调合并。 for (int j = k >> 1; j > 0; j >>= 1) { // ixj 是与当前位置 cid 进行比较交换的位置。 ixj = cid ^ j; if (ixj > cid) { // 如果 (cid & k) == 0,按照降序交换两项。 if ((cid & k) == 0 && (shared[cid] < shared[ixj])) { // 交换数组项。 temp = shared[cid]; shared[cid] = shared[ixj]; shared[ixj] = temp; // 如果 (cid & k) == 0,按照升序交换两项。 } else if ((cid & k) != 0 && shared[cid] > shared[ixj]) { // 交换数组项。 temp = shared[cid]; shared[cid] = shared[ixj]; shared[ixj] = temp; } } __syncthreads(); } } } // 将共享内存中的排序后的数组拷贝到全局内存中。 if (cid <lensec) inarray[rid * lensec + cid] = shared[cid]; } // Kernel 函数: _shearSortRowDesKer(行降序排序) template < typename Type > static __global__ void _shearSortRowDesKer(Type *inarray, int lensec) { // 读取线程号和块号。 int cid = threadIdx.x; int rid = blockIdx.x; // 将全局内存中的数组拷贝到共享内存了。 extern __shared__ unsigned char sharedrowdes[]; // 转化为模板类型的共享内存。 Type *shared = (Type *)sharedrowdes; if (cid < lensec) shared[cid] = inarray[rid * lensec + cid]; __syncthreads(); // 声明临时变量 int ixj; Type temp; // 偶数行降序排序。 if (rid % 2 == 0) { for (int k = 2; k <= lensec; k <<= 1) { // 双调合并。 for (int j = k >> 1; j > 0; j >>= 1) { // ixj 是与当前位置 cid 进行比较交换的位置。 ixj = cid ^ j; if (ixj > cid) { // 如果 (cid & k) == 0,按照降序交换两项。 if ((cid & k) == 0 && (shared[cid] < shared[ixj])) { // 交换数组项。 temp = shared[cid]; shared[cid] = shared[ixj]; shared[ixj] = temp; // 如果 (cid & k) == 0,按照升序交换两项。 } else if ((cid & k) != 0 && shared[cid] > shared[ixj]) { // 交换数组项。 temp = shared[cid]; shared[cid] = shared[ixj]; shared[ixj] = temp; } } __syncthreads(); } } // 奇数行升序排序。 } else { for (int k = 2; k <= lensec; k <<= 1) { // 双调合并。 for (int j = k >> 1; j > 0; j >>= 1) { // ixj 是与当前位置 cid 进行比较交换的位置。 ixj = cid ^ j; if (ixj > cid) { // 如果 (cid & k) == 0,按照降序交换两项。 if ((cid & k) == 0 && (shared[cid] > shared[ixj])) { // 交换数组项。 temp = shared[cid]; shared[cid] = shared[ixj]; shared[ixj] = temp; // 如果 (cid & k) == 0,按照升序交换两项。 } else if ((cid & k) != 0 && shared[cid] < shared[ixj]) { // 交换数组项。 temp = shared[cid]; shared[cid] = shared[ixj]; shared[ixj] = temp; } } __syncthreads(); } } } // 将共享内存中的排序后的数组拷贝到全局内存中。 if (cid < lensec) inarray[rid * lensec + cid] = shared[cid]; } // Kernel 函数: _shearSortColAscKer(列升序排序) template < typename Type > static __global__ void _shearSortColAscKer(Type *inarray, int length, int lensec) { // 读取线程号和块号。 int cid = threadIdx.x; int rid = blockIdx.x; if (rid >= lensec) return; // 将全局内存中的数组拷贝到共享内存了。 extern __shared__ unsigned char sharedcolasc[]; // 转化为模板类型的共享内存。 Type *shared = (Type *)sharedcolasc; if (cid < length) shared[cid] = inarray[rid + cid * lensec]; __syncthreads(); // 声明临时变量。 int ixj; Type temp; // 并行双调排序,升序排序。 for (int k = 2; k <= length; k <<= 1) { // 双调合并。 for (int j = k >> 1; j > 0; j >>= 1) { // ixj 是与当前位置 cid 进行比较交换的位置。 ixj = cid ^ j; if (ixj > cid) { // 如果 (cid & k) == 0,按照升序交换两项。 if ((cid & k) == 0 && (shared[cid] > shared[ixj])) { // 交换数组项。 temp = shared[cid]; shared[cid] = shared[ixj]; shared[ixj] = temp; // 如果 (cid & k) == 0,按照降序交换两项。 } else if ((cid & k) != 0 && shared[cid] < shared[ixj]) { // 交换数组项。 temp = shared[cid]; shared[cid] = shared[ixj]; shared[ixj] = temp; } } __syncthreads(); } } // 将共享内存中的排序后的数组拷贝到全局内存中。 if (cid < length) inarray[rid + cid * lensec] = shared[cid]; } // Kernel 函数: _shearSortColDesKer(列降序排序) template < typename Type > static __global__ void _shearSortColDesKer(Type *inarray, int length, int lensec) { // 读取线程号和块号。 int cid = threadIdx.x; int rid = blockIdx.x; if (rid >= lensec) return; // 将全局内存中的数组拷贝到共享内存了。 extern __shared__ unsigned char sharedcoldes[]; // 转化为模板类型的共享内存。 Type *shared = (Type *)sharedcoldes; if (cid < length) shared[cid] = inarray[rid + cid * lensec]; __syncthreads(); // 声明临时变量。 int ixj; Type temp; // 并行双调排序,降序排序。 for (int k = 2; k <= length; k <<= 1) { // 双调合并。 for (int j = k >> 1; j > 0; j >>= 1) { // ixj 是与当前位置 cid 进行比较交换的位置。 ixj = cid ^ j; if (ixj > cid) { // 如果 (cid & k) == 0,按照降序交换两项。 if ((cid & k) == 0 && (shared[cid] < shared[ixj])) { // 交换数组项。 temp = shared[cid]; shared[cid] = shared[ixj]; shared[ixj] = temp; // 如果 (cid & k) == 0,按照升序交换两项。 } else if ((cid & k) != 0 && shared[cid] > shared[ixj]) { // 交换数组项。 temp = shared[cid]; shared[cid] = shared[ixj]; shared[ixj] = temp; } } __syncthreads(); } } // 将共享内存中的排序后的数组拷贝到全局内存中。 if (cid < length) inarray[rid + cid * lensec] = shared[cid]; } // Kernel 函数: _shearToPosKer(转换数据形式) template < typename Type > static __global__ void _shearToPosKer(Type *inarray, Type *outarray, int lensec) { // 读取线程号和块号。 int cid = threadIdx.x; int rid = blockIdx.x; // 将全局内存中的数组拷贝到共享内存了。 extern __shared__ unsigned char sharedpos[]; // 转化为模板类型的共享内存。 Type *shared = (Type *)sharedpos; shared[cid] = inarray[rid * lensec + cid]; __syncthreads(); // 偶数行赋值。 if (rid % 2 == 0) outarray[rid * lensec + cid] = shared[cid]; // 奇数行赋值。 else outarray[rid * lensec + cid] = shared[lensec - 1 - cid]; } // Host 静态方法:_shearSortLoop(shear 排序核心函数) template < typename Type > static __host__ int _shearSortLoop(Type *inarray, Type * outarray, int length, int lensec, int sortflag) { // 计算二维数组中长和宽的较大值。 int judge = (length > lensec) ? length : lensec; // 将输入的一维数组转换成二维数组,便于后面的排序操作。 _shearToPosKer<Type><<<length, lensec, judge * sizeof (Type)>>>( inarray, outarray, lensec); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; for (int i = length; i >= 1; i >>= 1) { if (sortflag == 2) { // 首先进行列排序。 _shearSortColAscKer<Type><<<judge, judge, judge * sizeof (Type)>>>( outarray, length, lensec); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 然后进行行排序。 _shearSortRowAscKer<Type><<<judge, judge, judge * sizeof (Type)>>>( outarray, lensec); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; } else { // 首先进行列排序。 _shearSortColDesKer<Type><<<judge, judge, judge * sizeof (Type)>>>( outarray, length, lensec); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 然后进行行排序。 _shearSortRowDesKer<Type><<<judge, judge, judge * sizeof (Type)>>>( outarray, lensec); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; } } // 整理排序后的数组。 _shearToPosKer<Type><<<length, lensec, judge * sizeof (Type)>>>( outarray, outarray, lensec); // 若调用 CUDA 出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; return NO_ERROR; } // Host 静态方法:_shearSort(并行 shear 排序模板函数) template < typename Type > static __host__ int _shearSort(Type *inarray, Type *outarray, bool ishost, int sortflag, int length, int lensec) { // 检查输入输出数组是否为空。 if (inarray == NULL || outarray == NULL) return NULL_POINTER; // 检查算法参数的有效性。 if ((sortflag != 1 && sortflag != 2) || (length % 2 != 0) || lensec < 0) return INVALID_DATA; // 数据项总个数。 int datalength = length * lensec; // 局部变量,错误码。 int errcode; if (ishost) { // 在 Device 上分配空间。一次申请所有空间,然后通过偏移索引各个数组。 cudaError_t cudaerrcode; Type *alldevicedata, *devinarray, *devoutarray; cudaerrcode = cudaMalloc((void **)&alldevicedata, 2 * datalength * sizeof (Type)); if (cudaerrcode != cudaSuccess) return CUDA_ERROR; // 通过偏移读取 Device 端内存空间。 devinarray = alldevicedata; devoutarray = alldevicedata + datalength; //将 Host 上的 inarray 拷贝到 Device 上的 devinarray 中。 cudaerrcode = cudaMemcpy(devinarray, inarray, datalength * sizeof (Type), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicedata); return CUDA_ERROR; } // 调用排序核心函数。 errcode = _shearSortLoop<Type>(devinarray, devoutarray, length, lensec, sortflag); if (errcode != NO_ERROR) { cudaFree(alldevicedata); return errcode; } //将 Device上的 devoutarray 拷贝到 Host 上。 cudaerrcode = cudaMemcpy(outarray, devoutarray, datalength * sizeof (Type), cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicedata); return CUDA_ERROR; } // 释放显存上的临时空间。 cudaFree(alldevicedata); return NO_ERROR; } else { // 调用排序核心函数。 errcode = _shearSortLoop<Type>(inarray, outarray, length, lensec, sortflag); if (errcode != NO_ERROR) return errcode; } return NO_ERROR; } // 成员方法:shearSort(并行 shear 排序) __host__ int SortArray::shearSort(int *inarray, int *outarray) { // 调用模板函数并返回。 return _shearSort(inarray, outarray, this->ishost, this->sortflag, this->length, this->lensec); } // 成员方法:shearSort(并行 shear 排序) __host__ int SortArray::shearSort(float *inarray, float *outarray) { // 调用模板函数并返回。 return _shearSort(inarray, outarray, this->ishost, this->sortflag, this->length, this->lensec); } // 成员方法:shearSort(并行 shear 排序) __host__ int SortArray::shearSort(unsigned char *inarray, unsigned char *outarray) { // 调用模板函数并返回。 return _shearSort(inarray, outarray, this->ishost, this->sortflag, this->length, this->lensec); } // 成员方法:shearSort(并行 shear 排序) __host__ int SortArray::shearSort(char *inarray, char *outarray) { // 调用模板函数并返回。 return _shearSort(inarray, outarray, this->ishost, this->sortflag, this->length, this->lensec); } // 成员方法:shearSort(并行 shear 排序) __host__ int SortArray::shearSort(double *inarray, double *outarray) { // 调用模板函数并返回。 return _shearSort(inarray, outarray, this->ishost, this->sortflag, this->length, this->lensec); }
the_stack
#include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/functional.hpp" #include "opencv2/core/cuda/emulation.hpp" #include "opencv2/core/cuda/transform.hpp" using namespace cv::cuda; using namespace cv::cuda::device; namespace hist { __global__ void histogram256Kernel(const uchar* src, int cols, int rows, size_t step, int* hist) { __shared__ int shist[256]; const int y = blockIdx.x * blockDim.y + threadIdx.y; const int tid = threadIdx.y * blockDim.x + threadIdx.x; shist[tid] = 0; __syncthreads(); if (y < rows) { const unsigned int* rowPtr = (const unsigned int*) (src + y * step); const int cols_4 = cols / 4; for (int x = threadIdx.x; x < cols_4; x += blockDim.x) { unsigned int data = rowPtr[x]; Emulation::smem::atomicAdd(&shist[(data >> 0) & 0xFFU], 1); Emulation::smem::atomicAdd(&shist[(data >> 8) & 0xFFU], 1); Emulation::smem::atomicAdd(&shist[(data >> 16) & 0xFFU], 1); Emulation::smem::atomicAdd(&shist[(data >> 24) & 0xFFU], 1); } if (cols % 4 != 0 && threadIdx.x == 0) { for (int x = cols_4 * 4; x < cols; ++x) { unsigned int data = ((const uchar*)rowPtr)[x]; Emulation::smem::atomicAdd(&shist[data], 1); } } } __syncthreads(); const int histVal = shist[tid]; if (histVal > 0) ::atomicAdd(hist + tid, histVal); } void histogram256(PtrStepSzb src, int* hist, cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(src.rows, block.y)); histogram256Kernel<<<grid, block, 0, stream>>>(src.data, src.cols, src.rows, src.step, hist); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void histogram256Kernel(const uchar* src, int cols, int rows, size_t srcStep, const uchar* mask, size_t maskStep, int* hist) { __shared__ int shist[256]; const int y = blockIdx.x * blockDim.y + threadIdx.y; const int tid = threadIdx.y * blockDim.x + threadIdx.x; shist[tid] = 0; __syncthreads(); if (y < rows) { const unsigned int* rowPtr = (const unsigned int*) (src + y * srcStep); const unsigned int* maskRowPtr = (const unsigned int*) (mask + y * maskStep); const int cols_4 = cols / 4; for (int x = threadIdx.x; x < cols_4; x += blockDim.x) { unsigned int data = rowPtr[x]; unsigned int m = maskRowPtr[x]; if ((m >> 0) & 0xFFU) Emulation::smem::atomicAdd(&shist[(data >> 0) & 0xFFU], 1); if ((m >> 8) & 0xFFU) Emulation::smem::atomicAdd(&shist[(data >> 8) & 0xFFU], 1); if ((m >> 16) & 0xFFU) Emulation::smem::atomicAdd(&shist[(data >> 16) & 0xFFU], 1); if ((m >> 24) & 0xFFU) Emulation::smem::atomicAdd(&shist[(data >> 24) & 0xFFU], 1); } if (cols % 4 != 0 && threadIdx.x == 0) { for (int x = cols_4 * 4; x < cols; ++x) { unsigned int data = ((const uchar*)rowPtr)[x]; unsigned int m = ((const uchar*)maskRowPtr)[x]; if (m) Emulation::smem::atomicAdd(&shist[data], 1); } } } __syncthreads(); const int histVal = shist[tid]; if (histVal > 0) ::atomicAdd(hist + tid, histVal); } void histogram256(PtrStepSzb src, PtrStepSzb mask, int* hist, cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(src.rows, block.y)); histogram256Kernel<<<grid, block, 0, stream>>>(src.data, src.cols, src.rows, src.step, mask.data, mask.step, hist); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } } ///////////////////////////////////////////////////////////////////////// namespace hist { __device__ __forceinline__ void histEvenInc(int* shist, uint data, int binSize, int lowerLevel, int upperLevel) { if (data >= lowerLevel && data <= upperLevel) { const uint ind = (data - lowerLevel) / binSize; Emulation::smem::atomicAdd(shist + ind, 1); } } __global__ void histEven8u(const uchar* src, const size_t step, const int rows, const int cols, int* hist, const int binCount, const int binSize, const int lowerLevel, const int upperLevel) { extern __shared__ int shist[]; const int y = blockIdx.x * blockDim.y + threadIdx.y; const int tid = threadIdx.y * blockDim.x + threadIdx.x; if (tid < binCount) shist[tid] = 0; __syncthreads(); if (y < rows) { const uchar* rowPtr = src + y * step; const uint* rowPtr4 = (uint*) rowPtr; const int cols_4 = cols / 4; for (int x = threadIdx.x; x < cols_4; x += blockDim.x) { const uint data = rowPtr4[x]; histEvenInc(shist, (data >> 0) & 0xFFU, binSize, lowerLevel, upperLevel); histEvenInc(shist, (data >> 8) & 0xFFU, binSize, lowerLevel, upperLevel); histEvenInc(shist, (data >> 16) & 0xFFU, binSize, lowerLevel, upperLevel); histEvenInc(shist, (data >> 24) & 0xFFU, binSize, lowerLevel, upperLevel); } if (cols % 4 != 0 && threadIdx.x == 0) { for (int x = cols_4 * 4; x < cols; ++x) { const uchar data = rowPtr[x]; histEvenInc(shist, data, binSize, lowerLevel, upperLevel); } } } __syncthreads(); if (tid < binCount) { const int histVal = shist[tid]; if (histVal > 0) ::atomicAdd(hist + tid, histVal); } } void histEven8u(PtrStepSzb src, int* hist, int binCount, int lowerLevel, int upperLevel, cudaStream_t stream) { const dim3 block(32, 8); const dim3 grid(divUp(src.rows, block.y)); const int binSize = divUp(upperLevel - lowerLevel, binCount); const size_t smem_size = binCount * sizeof(int); histEven8u<<<grid, block, smem_size, stream>>>(src.data, src.step, src.rows, src.cols, hist, binCount, binSize, lowerLevel, upperLevel); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } } ///////////////////////////////////////////////////////////////////////// namespace hist { struct EqualizeHist : unary_function<uchar, uchar> { const uchar* lut; __host__ EqualizeHist(const uchar* _lut) : lut(_lut) {} __device__ __forceinline__ uchar operator ()(uchar val) const { return lut[val]; } }; } namespace cv { namespace cuda { namespace device { template <> struct TransformFunctorTraits<hist::EqualizeHist> : DefaultTransformFunctorTraits<hist::EqualizeHist> { enum { smart_shift = 4 }; }; }}} namespace hist { void equalizeHist(PtrStepSzb src, PtrStepSzb dst, const uchar* lut, cudaStream_t stream) { device::transform(src, dst, EqualizeHist(lut), WithOutMask(), stream); } __global__ void buildLutKernel(int* hist, unsigned char* lut, int size) { __shared__ int warp_smem[8]; __shared__ int hist_smem[8][33]; #define HIST_SMEM_NO_BANK_CONFLICT(idx) hist_smem[(idx) >> 5][(idx) & 31] const int tId = threadIdx.x; const int warpId = threadIdx.x / 32; const int laneId = threadIdx.x % 32; // Step1 - Find minimum non-zero value in hist and make it zero HIST_SMEM_NO_BANK_CONFLICT(tId) = hist[tId]; int nonZeroIdx = HIST_SMEM_NO_BANK_CONFLICT(tId) > 0 ? tId : 256; __syncthreads(); for (int delta = 16; delta > 0; delta /= 2) { #if __CUDACC_VER_MAJOR__ >= 9 int shflVal = __shfl_down_sync(0xFFFFFFFF, nonZeroIdx, delta); #else int shflVal = __shfl_down(nonZeroIdx, delta); #endif if (laneId < delta) nonZeroIdx = min(nonZeroIdx, shflVal); } if (laneId == 0) warp_smem[warpId] = nonZeroIdx; __syncthreads(); if (tId < 8) { int warpVal = warp_smem[tId]; for (int delta = 4; delta > 0; delta /= 2) { #if __CUDACC_VER_MAJOR__ >= 9 int shflVal = __shfl_down_sync(0x000000FF, warpVal, delta); #else int shflVal = __shfl_down(warpVal, delta); #endif if (tId < delta) warpVal = min(warpVal, shflVal); } if (tId == 0) { warp_smem[0] = warpVal; // warpVal - minimum index } } __syncthreads(); const int minNonZeroIdx = warp_smem[0]; const int minNonZeroVal = HIST_SMEM_NO_BANK_CONFLICT(minNonZeroIdx); if (minNonZeroVal == size) { // This is a special case: the whole image has the same color lut[tId] = 0; if (tId == minNonZeroIdx) lut[tId] = minNonZeroIdx; return; } if (tId == 0) HIST_SMEM_NO_BANK_CONFLICT(minNonZeroIdx) = 0; __syncthreads(); // Step2 - Inclusive sum // Algorithm from GPU Gems 3 (A Work-Efficient Parallel Scan) // https://developer.nvidia.com/gpugems/gpugems3/part-vi-gpu-computing/chapter-39-parallel-prefix-sum-scan-cuda // Step2 Phase1 - The Up-Sweep Phase for (int delta = 1; delta < 256; delta *= 2) { if (tId < 128 / delta) { int idx = 255 - 2 * tId * delta; HIST_SMEM_NO_BANK_CONFLICT(idx) += HIST_SMEM_NO_BANK_CONFLICT(idx - delta); } __syncthreads(); } // Step2 Phase2 - The Down-Sweep Phase if (tId == 0) HIST_SMEM_NO_BANK_CONFLICT(255) = 0; for (int delta = 128; delta >= 1; delta /= 2) { if (tId < 128 / delta) { int rootIdx = 255 - tId * delta * 2; int leftIdx = rootIdx - delta; int tmp = HIST_SMEM_NO_BANK_CONFLICT(leftIdx); HIST_SMEM_NO_BANK_CONFLICT(leftIdx) = HIST_SMEM_NO_BANK_CONFLICT(rootIdx); HIST_SMEM_NO_BANK_CONFLICT(rootIdx) += tmp; } __syncthreads(); } // Step2 Phase3 - Convert exclusive sum to inclusive sum int tmp = HIST_SMEM_NO_BANK_CONFLICT(tId); __syncthreads(); if (tId >= 1) HIST_SMEM_NO_BANK_CONFLICT(tId - 1) = tmp; if (tId == 255) HIST_SMEM_NO_BANK_CONFLICT(tId) = tmp + hist[tId]; __syncthreads(); // Step3 - Scale values to build lut lut[tId] = saturate_cast<unsigned char>(HIST_SMEM_NO_BANK_CONFLICT(tId) * (255.0f / (size - minNonZeroVal))); #undef HIST_SMEM_NO_BANK_CONFLICT } void buildLut(PtrStepSzi hist, PtrStepSzb lut, int size, cudaStream_t stream) { buildLutKernel<<<1, 256, 0, stream>>>(hist.data, lut.data, size); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } } #endif /* CUDA_DISABLER */
the_stack
#include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include "box_convolution.h" // for `enum class Parameter` #define BLOCK_SIZE 256 #define NUM_THREADS 1024 using std::min; using std::max; namespace gpu { template <typename T, size_t N> using CudaAcsr = const at::PackedTensorAccessor32<T, N, torch::RestrictPtrTraits>; // TODO switch to square blocks template <bool normalize, bool exact, typename scalar_t> __global__ void boxConvUpdateGradInputKernel( CudaAcsr<scalar_t,3> gradOutputInt, scalar_t * __restrict__ tmpArray, const int32_t * __restrict__ xMinInt , const int32_t * __restrict__ xMaxInt , const int32_t * __restrict__ yMinInt , const int32_t * __restrict__ yMaxInt , const scalar_t * __restrict__ xMinFrac, const scalar_t * __restrict__ xMaxFrac, const scalar_t * __restrict__ yMinFrac, const scalar_t * __restrict__ yMaxFrac, const scalar_t * __restrict__ area, const int nParams) { int32_t id = NUM_THREADS * blockIdx.x + threadIdx.x; tmpArray += id; const int32_t h = gradOutputInt.size(1) - 1; const int32_t w = gradOutputInt.size(2) - 1; const int32_t y = id % w; id /= w; const int32_t x = id % h; id /= h; const int32_t paramIdx = id % nParams; // `id` is now the current plane number auto gradOutputIntPlane = gradOutputInt[id]; if (id < gradOutputInt.size(0)) { const int32_t xMinCurr = xMinInt[paramIdx]; const int32_t xMaxCurr = xMaxInt[paramIdx]; const int32_t yMinCurr = yMinInt[paramIdx]; const int32_t yMaxCurr = yMaxInt[paramIdx]; const int t = max(0, min(x+xMinCurr, h)); const int b = max(0, min(x+xMaxCurr, h)); const int l = max(0, min(y+yMinCurr, w)); const int r = max(0, min(y+yMaxCurr, w)); scalar_t outValue; outValue = gradOutputIntPlane[b][r] - gradOutputIntPlane[t][r] - gradOutputIntPlane[b][l] + gradOutputIntPlane[t][l]; if (exact) { const scalar_t xMinCurrFrac = xMinFrac[paramIdx]; const scalar_t xMaxCurrFrac = xMaxFrac[paramIdx]; const scalar_t yMinCurrFrac = yMinFrac[paramIdx]; const scalar_t yMaxCurrFrac = yMaxFrac[paramIdx]; const int tAdv = x+xMinCurr-1 < h ? max(0, min(t-1, h)) : t; const int bAdv = x+xMaxCurr >= 0 ? max(0, min(b+1, h)) : b; const int lAdv = y+yMinCurr-1 < w ? max(0, min(l-1, w)) : l; const int rAdv = y+yMaxCurr >= 0 ? max(0, min(r+1, w)) : r; // -- xMax border outValue += ( gradOutputIntPlane[bAdv][r] - gradOutputIntPlane[b ][r] - gradOutputIntPlane[bAdv][l] + gradOutputIntPlane[b ][l] ) * xMaxCurrFrac; // -- yMax border outValue += ( gradOutputIntPlane[b][rAdv] - gradOutputIntPlane[b][r ] - gradOutputIntPlane[t][rAdv] + gradOutputIntPlane[t][r ] ) * yMaxCurrFrac; // -- xMin border outValue += ( gradOutputIntPlane[t ][r] - gradOutputIntPlane[tAdv][r] - gradOutputIntPlane[t ][l] + gradOutputIntPlane[tAdv][l] ) * xMinCurrFrac; // -- yMin border outValue += ( gradOutputIntPlane[b][l ] - gradOutputIntPlane[b][lAdv] - gradOutputIntPlane[t][l ] + gradOutputIntPlane[t][lAdv] ) * yMinCurrFrac; // -- corner pixels outValue += xMaxCurrFrac*yMaxCurrFrac * ( (x+xMaxCurr >= h or y+yMaxCurr >= w or x+xMaxCurr < 0 or y+yMaxCurr < 0 or b == bAdv or r == rAdv) ? static_cast<scalar_t>(0) : ( gradOutputIntPlane[b+1][r+1] - gradOutputIntPlane[b ][r+1] - gradOutputIntPlane[b+1][r ] + gradOutputIntPlane[b ][r ])); outValue += xMinCurrFrac*yMaxCurrFrac * ( (x+xMinCurr > h or y+yMaxCurr >= w or x+xMinCurr <= 0 or y+yMaxCurr < 0 or t == tAdv or r == rAdv) ? static_cast<scalar_t>(0) : ( gradOutputIntPlane[tAdv+1][r+1] - gradOutputIntPlane[tAdv+1][r ] - gradOutputIntPlane[tAdv ][r+1] + gradOutputIntPlane[tAdv ][r ])); outValue += xMaxCurrFrac*yMinCurrFrac * ( (x+xMaxCurr >= h or y+yMinCurr > w or x+xMaxCurr < 0 or y+yMinCurr <= 0 or b == bAdv or l == lAdv) ? static_cast<scalar_t>(0) : ( gradOutputIntPlane[b+1][lAdv+1] - gradOutputIntPlane[b ][lAdv+1] - gradOutputIntPlane[b+1][lAdv ] + gradOutputIntPlane[b ][lAdv ])); outValue += xMinCurrFrac*yMinCurrFrac * ( (x+xMinCurr > h or y+yMinCurr > w or x+xMinCurr <= 0 or y+yMinCurr <= 0 or t == tAdv or l == lAdv) ? static_cast<scalar_t>(0) : ( gradOutputIntPlane[tAdv+1][lAdv+1] - gradOutputIntPlane[tAdv+1][lAdv ] - gradOutputIntPlane[tAdv ][lAdv+1] + gradOutputIntPlane[tAdv ][lAdv ])); } *tmpArray = outValue * (normalize ? area[paramIdx] : static_cast<scalar_t>(1)); } } template <bool normalize, bool exact> void boxConvUpdateGradInput( at::Tensor & xMinInt , at::Tensor & xMaxInt , at::Tensor & yMinInt , at::Tensor & yMaxInt , at::Tensor & xMinFrac, at::Tensor & xMaxFrac, at::Tensor & yMinFrac, at::Tensor & yMaxFrac, at::Tensor & area, at::Tensor & grad_output_integrated, at::Tensor & tmpArray) { // TODO use square blocks as in `boxConvUpdateOutput`? const int threadsNeeded = tmpArray.numel(); int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS; AT_DISPATCH_FLOATING_TYPES_AND_HALF(tmpArray.scalar_type(), "gpu::boxConvUpdateGradInput", ([&] { auto gradOutputIntFlattened = grad_output_integrated.view( {-1, grad_output_integrated.size(-2), grad_output_integrated.size(-1)}); auto gradOutputIntAcsr = gradOutputIntFlattened.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(); boxConvUpdateGradInputKernel <normalize, exact> <<<numBlocks, NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>> ( gradOutputIntAcsr, tmpArray.data_ptr<scalar_t>(), xMinInt.data_ptr<int32_t>(), xMaxInt.data_ptr<int32_t>(), yMinInt.data_ptr<int32_t>(), yMaxInt.data_ptr<int32_t>(), xMinFrac.data_ptr<scalar_t>(), xMaxFrac.data_ptr<scalar_t>(), yMinFrac.data_ptr<scalar_t>(), yMaxFrac.data_ptr<scalar_t>(), normalize ? area.data_ptr<scalar_t>() : nullptr, xMinInt.numel()); THCudaCheck(cudaGetLastError()); })); } // explicitly instantiate template void boxConvUpdateGradInput<true, true>( at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); template void boxConvUpdateGradInput<false, true>( at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); template void boxConvUpdateGradInput<true, false>( at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); template void boxConvUpdateGradInput<false, false>( at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &); // TODO overload for exact/truncated mode // TODO accept only three pairs of parameter arrays, not four (one is always redundant) template <Parameter parameter, bool exact, typename scalar_t> __global__ void boxConvAccGradParametersKernel( CudaAcsr<scalar_t,3> inputInt, scalar_t * __restrict__ tmpArray, const int32_t * __restrict__ xMinInt , const int32_t * __restrict__ xMaxInt , const int32_t * __restrict__ yMinInt , const int32_t * __restrict__ yMaxInt , const scalar_t * __restrict__ xMinFrac, const scalar_t * __restrict__ xMaxFrac, const scalar_t * __restrict__ yMinFrac, const scalar_t * __restrict__ yMaxFrac, const int nParams) { int32_t id = NUM_THREADS * blockIdx.x + threadIdx.x; tmpArray += id; const int32_t h = inputInt.size(1) - 1; const int32_t w = inputInt.size(2) - 1; const int32_t y = id % w + 1; id /= w; const int32_t x = id % h + 1; id /= h; const int32_t paramIdx = id % nParams; id /= nParams; // `id` is now the current absolute input plane number auto inputIntPlane = inputInt[id]; if (id < inputInt.size(0)) { const int32_t xMinCurr = xMinInt[paramIdx]; const int32_t xMaxCurr = xMaxInt[paramIdx]; const int32_t yMinCurr = yMinInt[paramIdx]; const int32_t yMaxCurr = yMaxInt[paramIdx]; // TODO only define these if `exact == true` const scalar_t xMinCurrFrac = xMinFrac[paramIdx]; const scalar_t xMaxCurrFrac = xMaxFrac[paramIdx]; const scalar_t yMinCurrFrac = yMinFrac[paramIdx]; const scalar_t yMaxCurrFrac = yMaxFrac[paramIdx]; int valid; int cornerX, cornerY; scalar_t delta = 0; if (parameter == Parameter::xMin) { if (exact) { // TODO maybe use `input` instead of `inputInt` valid = not (y+yMinCurr < 1) & not (y+yMinCurr > w) & not (x+xMinCurr < 1); cornerX = max(0,min(h-1,x+xMinCurr-1)); cornerY = max(0,min(w-1,y+yMinCurr-1)); const scalar_t tlCorner = valid * ( inputIntPlane[cornerX+1][cornerY+1] - inputIntPlane[cornerX ][cornerY+1] - inputIntPlane[cornerX+1][cornerY ] + inputIntPlane[cornerX ][cornerY ]); valid = not (y+yMaxCurr < 0) & not (y+yMaxCurr >= w) & not (x+xMinCurr < 1); cornerX = max(0,min(h-1,x+xMinCurr -1)); cornerY = max(0,min(w-1,y+yMaxCurr )); const scalar_t trCorner = valid * ( inputIntPlane[cornerX+1][cornerY+1] - inputIntPlane[cornerX ][cornerY+1] - inputIntPlane[cornerX+1][cornerY ] + inputIntPlane[cornerX ][cornerY ]); delta += trCorner * yMaxCurrFrac; delta += tlCorner * yMinCurrFrac; } // if (exact) delta += inputIntPlane [max(0,min(x+xMinCurr , h))][max(0,min(y+yMaxCurr , w))]; delta -= inputIntPlane [max(0,min(x+xMinCurr -1, h))][max(0,min(y+yMaxCurr , w))]; delta -= inputIntPlane [max(0,min(x+xMinCurr , h))][max(0,min(y+yMinCurr , w))]; delta += inputIntPlane [max(0,min(x+xMinCurr -1, h))][max(0,min(y+yMinCurr , w))]; delta *= (x+xMinCurr >= 1) & (x+xMinCurr <= h); *tmpArray = -delta; } else if (parameter == Parameter::xMax) { if (exact) { valid = not (y+yMinCurr < 1) & not (y+yMinCurr > w) & not (x+xMaxCurr >= h); cornerX = max(0,min(h-1,x+xMaxCurr )); cornerY = max(0,min(w-1,y+yMinCurr -1)); const scalar_t blCorner = valid * ( inputIntPlane[cornerX+1][cornerY+1] - inputIntPlane[cornerX ][cornerY+1] - inputIntPlane[cornerX+1][cornerY ] + inputIntPlane[cornerX ][cornerY ]); valid = not (y+yMaxCurr < 0) & not (y+yMaxCurr >= w) & not (x+xMaxCurr >= h); cornerX = max(0,min(h-1,x+xMaxCurr )); cornerY = max(0,min(w-1,y+yMaxCurr )); const scalar_t brCorner = valid * ( inputIntPlane[cornerX+1][cornerY+1] - inputIntPlane[cornerX ][cornerY+1] - inputIntPlane[cornerX+1][cornerY ] + inputIntPlane[cornerX ][cornerY ]); delta += brCorner * yMaxCurrFrac; delta += blCorner * yMinCurrFrac; } // if (exact) delta += inputIntPlane [max(0,min(x+xMaxCurr +1, h))][max(0,min(y+yMaxCurr , w))]; delta -= inputIntPlane [max(0,min(x+xMaxCurr , h))][max(0,min(y+yMaxCurr , w))]; delta -= inputIntPlane [max(0,min(x+xMaxCurr +1, h))][max(0,min(y+yMinCurr , w))]; delta += inputIntPlane [max(0,min(x+xMaxCurr , h))][max(0,min(y+yMinCurr , w))]; delta *= (x+xMaxCurr >= 0) & (x+xMaxCurr < h); *tmpArray = delta; } else if (parameter == Parameter::yMin) { if (exact) { valid = not (y+yMinCurr < 1) & not (x+xMinCurr < 1) & not (x+xMinCurr > h); cornerX = max(0,min(h-1,x+xMinCurr -1)); cornerY = max(0,min(w-1,y+yMinCurr -1)); const scalar_t tlCorner = valid * ( inputIntPlane[cornerX+1][cornerY+1] - inputIntPlane[cornerX ][cornerY+1] - inputIntPlane[cornerX+1][cornerY ] + inputIntPlane[cornerX ][cornerY ]); valid = not (y+yMinCurr < 1) & not (x+xMaxCurr < 0) & not (x+xMaxCurr >= h); cornerX = max(0,min(h-1,x+xMaxCurr )); cornerY = max(0,min(w-1,y+yMinCurr -1)); const scalar_t blCorner = valid * ( inputIntPlane[cornerX+1][cornerY+1] - inputIntPlane[cornerX ][cornerY+1] - inputIntPlane[cornerX+1][cornerY ] + inputIntPlane[cornerX ][cornerY ]); delta += tlCorner * xMinCurrFrac; delta += blCorner * xMaxCurrFrac; } // if (exact) delta += inputIntPlane [max(0,min(x+xMaxCurr , h))][max(0,min(y+yMinCurr , w))]; delta -= inputIntPlane [max(0,min(x+xMaxCurr , h))][max(0,min(y+yMinCurr -1, w))]; delta -= inputIntPlane [max(0,min(x+xMinCurr , h))][max(0,min(y+yMinCurr , w))]; delta += inputIntPlane [max(0,min(x+xMinCurr , h))][max(0,min(y+yMinCurr -1, w))]; delta *= (y+yMinCurr >= 1) & (y+yMinCurr <= w); *tmpArray = -delta; } else if (parameter == Parameter::yMax) { if (exact) { valid = not (y+yMaxCurr >= w) & not (x+xMinCurr < 1) & not (x+xMinCurr > h); cornerX = max(0,min(h-1,x+xMinCurr -1)); cornerY = max(0,min(w-1,y+yMaxCurr )); const scalar_t trCorner = valid * ( inputIntPlane[cornerX+1][cornerY+1] - inputIntPlane[cornerX ][cornerY+1] - inputIntPlane[cornerX+1][cornerY ] + inputIntPlane[cornerX ][cornerY ]); valid = not (y+yMaxCurr >= w) & not (x+xMaxCurr < 0) & not (x+xMaxCurr >= h); cornerX = max(0,min(h-1,x+xMaxCurr )); cornerY = max(0,min(w-1,y+yMaxCurr )); const scalar_t brCorner = valid * ( inputIntPlane[cornerX+1][cornerY+1] - inputIntPlane[cornerX ][cornerY+1] - inputIntPlane[cornerX+1][cornerY ] + inputIntPlane[cornerX ][cornerY ]); delta += trCorner * xMinCurrFrac; delta += brCorner * xMaxCurrFrac; } // if (exact) delta += inputIntPlane [max(0,min(x+xMaxCurr , h))][max(0,min(y+yMaxCurr +1, w))]; delta -= inputIntPlane [max(0,min(x+xMaxCurr , h))][max(0,min(y+yMaxCurr , w))]; delta -= inputIntPlane [max(0,min(x+xMinCurr , h))][max(0,min(y+yMaxCurr +1, w))]; delta += inputIntPlane [max(0,min(x+xMinCurr , h))][max(0,min(y+yMaxCurr , w))]; delta *= (y+yMaxCurr >= 0) & (y+yMaxCurr < w); *tmpArray = delta; } } } template <bool exact> void boxConvAccGradParameters( // tmpArray size: {batchSize, nInputPlanes, numFilters, h, w} at::Tensor & xMinInt , at::Tensor & xMaxInt , at::Tensor & yMinInt , at::Tensor & yMaxInt , at::Tensor & xMinFrac, at::Tensor & xMaxFrac, at::Tensor & yMinFrac, at::Tensor & yMaxFrac, at::Tensor & input_integrated, at::Tensor & tmpArray, Parameter parameter) { // TODO switch to square blocks? const int threadsNeeded = tmpArray.numel(); int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS; AT_DISPATCH_FLOATING_TYPES_AND_HALF(tmpArray.scalar_type(), "gpu::boxConvAccGradParameters", ([&] { auto inputIntFlattened = input_integrated.view( {-1, input_integrated.size(-2), input_integrated.size(-1)}); auto inputIntAcsr = inputIntFlattened.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(); switch (parameter) { case Parameter::xMin: boxConvAccGradParametersKernel <Parameter::xMin, exact> <<<numBlocks, NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>> ( inputIntAcsr, tmpArray.data_ptr<scalar_t>(), xMinInt.data_ptr<int32_t>(), xMaxInt.data_ptr<int32_t>(), yMinInt.data_ptr<int32_t>(), yMaxInt.data_ptr<int32_t>(), xMinFrac.data_ptr<scalar_t>(), xMaxFrac.data_ptr<scalar_t>(), yMinFrac.data_ptr<scalar_t>(), yMaxFrac.data_ptr<scalar_t>(), xMinInt.numel()); break; case Parameter::xMax: boxConvAccGradParametersKernel <Parameter::xMax, exact> <<<numBlocks, NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>> ( inputIntAcsr, tmpArray.data_ptr<scalar_t>(), xMinInt.data_ptr<int32_t>(), xMaxInt.data_ptr<int32_t>(), yMinInt.data_ptr<int32_t>(), yMaxInt.data_ptr<int32_t>(), xMinFrac.data_ptr<scalar_t>(), xMaxFrac.data_ptr<scalar_t>(), yMinFrac.data_ptr<scalar_t>(), yMaxFrac.data_ptr<scalar_t>(), xMinInt.numel()); break; case Parameter::yMin: boxConvAccGradParametersKernel <Parameter::yMin, exact> <<<numBlocks, NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>> ( inputIntAcsr, tmpArray.data_ptr<scalar_t>(), xMinInt.data_ptr<int32_t>(), xMaxInt.data_ptr<int32_t>(), yMinInt.data_ptr<int32_t>(), yMaxInt.data_ptr<int32_t>(), xMinFrac.data_ptr<scalar_t>(), xMaxFrac.data_ptr<scalar_t>(), yMinFrac.data_ptr<scalar_t>(), yMaxFrac.data_ptr<scalar_t>(), xMinInt.numel()); break; case Parameter::yMax: boxConvAccGradParametersKernel <Parameter::yMax, exact> <<<numBlocks, NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>> ( inputIntAcsr, tmpArray.data_ptr<scalar_t>(), xMinInt.data_ptr<int32_t>(), xMaxInt.data_ptr<int32_t>(), yMinInt.data_ptr<int32_t>(), yMaxInt.data_ptr<int32_t>(), xMinFrac.data_ptr<scalar_t>(), xMaxFrac.data_ptr<scalar_t>(), yMinFrac.data_ptr<scalar_t>(), yMaxFrac.data_ptr<scalar_t>(), xMinInt.numel()); break; } THCudaCheck(cudaGetLastError()); })); } // explicitly instantiate template void boxConvAccGradParameters<true>( at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, Parameter); template void boxConvAccGradParameters<false>( at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &, Parameter); }
the_stack
#if ( MODEL == HYDRO ) /******************************************************** 1. Template of a user-defined EoS (EOS_USER) 2. This file is shared by both CPU and GPU GPU_EoS_User_Template.cu -> CPU_EoS_User_Template.cpp 3. Three steps are required to implement an EoS I. Set EoS auxiliary arrays II. Implement EoS conversion functions III. Set EoS initialization functions 4. All EoS conversion functions must be thread-safe and not use any global variable 5. When an EoS conversion function fails, it is recommended to return NAN in order to trigger auto-correction such as "OPT__1ST_FLUX_CORR" and "AUTO_REDUCE_DT" ********************************************************/ // ============================================= // I. Set EoS auxiliary arrays // ============================================= //------------------------------------------------------------------------------------------------------- // Function : EoS_SetAuxArray_User_Template // Description : Set the auxiliary arrays AuxArray_Flt/Int[] // // Note : 1. Invoked by EoS_Init_User_Template() // 2. AuxArray_Flt/Int[] have the size of EOS_NAUX_MAX defined in Macro.h (default = 20) // 3. Add "#ifndef __CUDACC__" since this routine is only useful on CPU // 4. Physical constants such as Const_amu/Const_kB should be set to unity when disabling OPT__UNIT // // Parameter : AuxArray_Flt/Int : Floating-point/Integer arrays to be filled up // // Return : AuxArray_Flt/Int[] //------------------------------------------------------------------------------------------------------- #ifndef __CUDACC__ void EoS_SetAuxArray_User_Template( double AuxArray_Flt[], int AuxArray_Int[] ) { /* AuxArray_Flt[0] = ...; AuxArray_Flt[1] = ...; AuxArray_Int[0] = ...; AuxArray_Int[1] = ...; */ } // FUNCTION : EoS_SetAuxArray_User_Template #endif // #ifndef __CUDACC__ // ============================================= // II. Implement EoS conversion functions // (1) EoS_DensEint2Pres_* // (2) EoS_DensPres2Eint_* // (3) EoS_DensPres2CSqr_* // (4) EoS_DensEint2Temp_* [OPTIONAL] // (5) EoS_DensTemp2Pres_* [OPTIONAL] // (6) EoS_General_* [OPTIONAL] // ============================================= //------------------------------------------------------------------------------------------------------- // Function : EoS_DensEint2Pres_User_Template // Description : Convert gas mass density and internal energy density to gas pressure // // Note : 1. Internal energy density here is per unit volume instead of per unit mass // 2. See EoS_SetAuxArray_User_Template() for the values stored in AuxArray_Flt/Int[] // // Parameter : Dens : Gas mass density // Eint : Gas internal energy density // Passive : Passive scalars // AuxArray_* : Auxiliary arrays (see the Note above) // Table : EoS tables // // Return : Gas pressure //------------------------------------------------------------------------------------------------------- GPU_DEVICE_NOINLINE static real EoS_DensEint2Pres_User_Template( const real Dens, const real Eint, const real Passive[], const double AuxArray_Flt[], const int AuxArray_Int[], const real *const Table[EOS_NTABLE_MAX] ) { // check # ifdef GAMER_DEBUG # if ( NCOMP_PASSIVE > 0 ) if ( Passive == NULL ) printf( "ERROR : Passive == NULL in %s !!\n", __FUNCTION__ ); # endif if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ ); if ( AuxArray_Int == NULL ) printf( "ERROR : AuxArray_Int == NULL in %s !!\n", __FUNCTION__ ); if ( Hydro_CheckNegative(Dens) ) printf( "ERROR : invalid input density (%14.7e) at file <%s>, line <%d>, function <%s>\n", Dens, __FILE__, __LINE__, __FUNCTION__ ); // note that some EoS may support Eint<0 if ( Hydro_CheckNegative(Eint) ) printf( "ERROR : invalid input internal energy density (%14.7e) at file <%s>, line <%d>, function <%s>\n", Eint, __FILE__, __LINE__, __FUNCTION__ ); # endif // GAMER_DEBUG real Pres = -1.0; /* Pres = ...; */ // check # ifdef GAMER_DEBUG if ( Hydro_CheckNegative(Pres) ) { printf( "ERROR : invalid output pressure (%13.7e) in %s() !!\n", Pres, __FUNCTION__ ); printf( " Dens=%13.7e, Eint=%13.7e\n", Dens, Eint ); # if ( NCOMP_PASSIVE > 0 ) printf( " Passive scalars:" ); for (int v=0; v<NCOMP_PASSIVE; v++) printf( " %d=%13.7e", v, Passive[v] ); printf( "\n" ); # endif } # endif // GAMER_DEBUG return Pres; } // FUNCTION : EoS_DensEint2Pres_User_Template //------------------------------------------------------------------------------------------------------- // Function : EoS_DensPres2Eint_User_Template // Description : Convert gas mass density and pressure to gas internal energy density // // Note : 1. See EoS_DensEint2Pres_User_Template() // // Parameter : Dens : Gas mass density // Pres : Gas pressure // Passive : Passive scalars // AuxArray_* : Auxiliary arrays (see the Note above) // Table : EoS tables // // Return : Gas internal energy density //------------------------------------------------------------------------------------------------------- GPU_DEVICE_NOINLINE static real EoS_DensPres2Eint_User_Template( const real Dens, const real Pres, const real Passive[], const double AuxArray_Flt[], const int AuxArray_Int[], const real *const Table[EOS_NTABLE_MAX] ) { // check # ifdef GAMER_DEBUG # if ( NCOMP_PASSIVE > 0 ) if ( Passive == NULL ) printf( "ERROR : Passive == NULL in %s !!\n", __FUNCTION__ ); # endif if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ ); if ( AuxArray_Int == NULL ) printf( "ERROR : AuxArray_Int == NULL in %s !!\n", __FUNCTION__ ); if ( Hydro_CheckNegative(Dens) ) printf( "ERROR : invalid input density (%14.7e) at file <%s>, line <%d>, function <%s>\n", Dens, __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(Pres) ) printf( "ERROR : invalid input pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n", Pres, __FILE__, __LINE__, __FUNCTION__ ); # endif // GAMER_DEBUG real Eint = -1.0; /* Eint = ...; */ // check # ifdef GAMER_DEBUG // note that some EoS may support Eint<0 if ( Hydro_CheckNegative(Eint) ) { printf( "ERROR : invalid output internal energy density (%13.7e) in %s() !!\n", Eint, __FUNCTION__ ); printf( " Dens=%13.7e, Pres=%13.7e\n", Dens, Pres ); # if ( NCOMP_PASSIVE > 0 ) printf( " Passive scalars:" ); for (int v=0; v<NCOMP_PASSIVE; v++) printf( " %d=%13.7e", v, Passive[v] ); printf( "\n" ); # endif } # endif // GAMER_DEBUG return Eint; } // FUNCTION : EoS_DensPres2Eint_User_Template //------------------------------------------------------------------------------------------------------- // Function : EoS_DensPres2CSqr_User_Template // Description : Convert gas mass density and pressure to sound speed squared // // Note : 1. See EoS_DensEint2Pres_User_Template() // // Parameter : Dens : Gas mass density // Pres : Gas pressure // Passive : Passive scalars // AuxArray_* : Auxiliary arrays (see the Note above) // Table : EoS tables // // Return : Sound speed squared //------------------------------------------------------------------------------------------------------- GPU_DEVICE_NOINLINE static real EoS_DensPres2CSqr_User_Template( const real Dens, const real Pres, const real Passive[], const double AuxArray_Flt[], const int AuxArray_Int[], const real *const Table[EOS_NTABLE_MAX] ) { // check # ifdef GAMER_DEBUG # if ( NCOMP_PASSIVE > 0 ) if ( Passive == NULL ) printf( "ERROR : Passive == NULL in %s !!\n", __FUNCTION__ ); # endif if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ ); if ( AuxArray_Int == NULL ) printf( "ERROR : AuxArray_Int == NULL in %s !!\n", __FUNCTION__ ); if ( Hydro_CheckNegative(Dens) ) printf( "ERROR : invalid input density (%14.7e) at file <%s>, line <%d>, function <%s>\n", Dens, __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(Pres) ) printf( "ERROR : invalid input pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n", Pres, __FILE__, __LINE__, __FUNCTION__ ); # endif // GAMER_DEBUG real Cs2 = -1.0; /* Cs2 = ...; */ // check # ifdef GAMER_DEBUG if ( Hydro_CheckNegative(Cs2) ) { printf( "ERROR : invalid output sound speed squared (%13.7e) in %s() !!\n", Cs2, __FUNCTION__ ); printf( " Dens=%13.7e, Pres=%13.7e\n", Dens, Pres ); # if ( NCOMP_PASSIVE > 0 ) printf( " Passive scalars:" ); for (int v=0; v<NCOMP_PASSIVE; v++) printf( " %d=%13.7e", v, Passive[v] ); printf( "\n" ); # endif } # endif // GAMER_DEBUG return Cs2; } // FUNCTION : EoS_DensPres2CSqr_User_Template //------------------------------------------------------------------------------------------------------- // Function : EoS_DensEint2Temp_User_Template // Description : Convert gas mass density and internal energy density to gas temperature // // Note : 1. Internal energy density here is per unit volume instead of per unit mass // 2. See EoS_SetAuxArray_User_Template() for the values stored in AuxArray_Flt/Int[] // 3. Temperature is in kelvin // // Parameter : Dens : Gas mass density // Eint : Gas internal energy density // Passive : Passive scalars (must not used here) // AuxArray_* : Auxiliary arrays (see the Note above) // Table : EoS tables // // Return : Gas temperature in kelvin //------------------------------------------------------------------------------------------------------- GPU_DEVICE_NOINLINE static real EoS_DensEint2Temp_User_Template( const real Dens, const real Eint, const real Passive[], const double AuxArray_Flt[], const int AuxArray_Int[], const real *const Table[EOS_NTABLE_MAX] ) { // check # ifdef GAMER_DEBUG if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ ); if ( Hydro_CheckNegative(Dens) ) printf( "ERROR : invalid input density (%14.7e) at file <%s>, line <%d>, function <%s>\n", Dens, __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(Eint) ) printf( "ERROR : invalid input internal energy density (%14.7e) at file <%s>, line <%d>, function <%s>\n", Eint, __FILE__, __LINE__, __FUNCTION__ ); # endif // GAMER_DEBUG real Temp = -1.0; /* Temp = ...; */ // check # ifdef GAMER_DEBUG if ( Hydro_CheckNegative(Temp) ) { printf( "ERROR : invalid output temperature (%13.7e) in %s() !!\n", Temp, __FUNCTION__ ); printf( " Dens=%13.7e, Eint=%13.7e\n", Dens, Eint ); # if ( NCOMP_PASSIVE > 0 ) printf( " Passive scalars:" ); for (int v=0; v<NCOMP_PASSIVE; v++) printf( " %d=%13.7e", v, Passive[v] ); printf( "\n" ); # endif } # endif // GAMER_DEBUG return Temp; } // FUNCTION : EoS_DensEint2Temp_User_Template //------------------------------------------------------------------------------------------------------- // Function : EoS_DensTemp2Pres_User_Template // Description : Convert gas mass density and temperature to gas pressure // // Note : 1. See EoS_SetAuxArray_User_Template() for the values stored in AuxArray_Flt/Int[] // 2. Temperature is in kelvin // // Parameter : Dens : Gas mass density // Temp : Gas temperature in kelvin // Passive : Passive scalars (must not used here) // AuxArray_* : Auxiliary arrays (see the Note above) // Table : EoS tables // // Return : Gas pressure //------------------------------------------------------------------------------------------------------- GPU_DEVICE_NOINLINE static real EoS_DensTemp2Pres_User_Template( const real Dens, const real Temp, const real Passive[], const double AuxArray_Flt[], const int AuxArray_Int[], const real *const Table[EOS_NTABLE_MAX] ) { // check # ifdef GAMER_DEBUG if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ ); if ( Hydro_CheckNegative(Dens) ) printf( "ERROR : invalid input density (%14.7e) at file <%s>, line <%d>, function <%s>\n", Dens, __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(Temp) ) printf( "ERROR : invalid input temperature (%14.7e) at file <%s>, line <%d>, function <%s>\n", Temp, __FILE__, __LINE__, __FUNCTION__ ); # endif // GAMER_DEBUG real Pres = -1.0; /* Pres = ...; */ // check # ifdef GAMER_DEBUG if ( Hydro_CheckNegative(Pres) ) { printf( "ERROR : invalid output pressure (%13.7e) in %s() !!\n", Pres, __FUNCTION__ ); printf( " Dens=%13.7e, Temp=%13.7e\n", Dens, Temp ); # if ( NCOMP_PASSIVE > 0 ) printf( " Passive scalars:" ); for (int v=0; v<NCOMP_PASSIVE; v++) printf( " %d=%13.7e", v, Passive[v] ); printf( "\n" ); # endif } # endif // GAMER_DEBUG return Pres; } // FUNCTION : EoS_DensTemp2Pres_User_Template //------------------------------------------------------------------------------------------------------- // Function : EoS_General_User_Template // Description : General EoS converter: In_*[] -> Out[] // // Note : 1. See EoS_DensEint2Pres_User_Template() // 2. In_*[] and Out[] must NOT overlap // // Parameter : Mode : To support multiple modes in this general converter // Out : Output array // In_* : Input array // AuxArray_* : Auxiliary arrays (see the Note above) // Table : EoS tables // // Return : Out[] //------------------------------------------------------------------------------------------------------- GPU_DEVICE_NOINLINE static void EoS_General_User_Template( const int Mode, real Out[], const real In_Flt[], const int In_Int[], const double AuxArray_Flt[], const int AuxArray_Int[], const real *const Table[EOS_NTABLE_MAX] ) { // check # ifdef GAMER_DEBUG if ( Out == NULL ) printf( "ERROR : Out == NULL in %s !!\n", __FUNCTION__ ); if ( In_Flt == NULL ) printf( "ERROR : In_Flt == NULL in %s !!\n", __FUNCTION__ ); if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ ); if ( AuxArray_Int == NULL ) printf( "ERROR : AuxArray_Int == NULL in %s !!\n", __FUNCTION__ ); # endif // GAMER_DEBUG /* if ( Mode == ... ) Out[...] = ...; */ } // FUNCTION : EoS_General_User_Template // ============================================= // III. Set EoS initialization functions // ============================================= #ifdef __CUDACC__ # define FUNC_SPACE __device__ static #else # define FUNC_SPACE static #endif FUNC_SPACE EoS_DE2P_t EoS_DensEint2Pres_Ptr = EoS_DensEint2Pres_User_Template; FUNC_SPACE EoS_DP2E_t EoS_DensPres2Eint_Ptr = EoS_DensPres2Eint_User_Template; FUNC_SPACE EoS_DP2C_t EoS_DensPres2CSqr_Ptr = EoS_DensPres2CSqr_User_Template; FUNC_SPACE EoS_DE2T_t EoS_DensEint2Temp_Ptr = EoS_DensEint2Temp_User_Template; FUNC_SPACE EoS_DT2P_t EoS_DensTemp2Pres_Ptr = EoS_DensTemp2Pres_User_Template; FUNC_SPACE EoS_GENE_t EoS_General_Ptr = EoS_General_User_Template; //----------------------------------------------------------------------------------------- // Function : EoS_SetCPU/GPUFunc_User_Template // Description : Return the function pointers of the CPU/GPU EoS routines // // Note : 1. Invoked by EoS_Init_User_Template() // 2. Must obtain the CPU and GPU function pointers by **separate** routines // since CPU and GPU functions are compiled completely separately in GAMER // --> In other words, a unified routine like the following won't work // // EoS_SetFunc_User_Template( CPU_FuncPtr, GPU_FuncPtr ); // // 3. Call-by-reference // // Parameter : EoS_DensEint2Pres_CPU/GPUPtr : CPU/GPU function pointers to be set // EoS_DensPres2Eint_CPU/GPUPtr : ... // EoS_DensPres2CSqr_CPU/GPUPtr : ... // EoS_DensEint2Temp_CPU/GPUPtr : ... // EoS_DensTemp2Pres_CPU/GPUPtr : ... // EoS_General_CPU/GPUPtr : ... // // Return : EoS_DensEint2Pres_CPU/GPUPtr, EoS_DensPres2Eint_CPU/GPUPtr, // EoS_DensPres2CSqr_CPU/GPUPtr, EoS_DensEint2Temp_CPU/GPUPtr, // EoS_DensTemp2Pres_CPU/GPUPtr, EoS_General_CPU/GPUPtr //----------------------------------------------------------------------------------------- #ifdef __CUDACC__ __host__ void EoS_SetGPUFunc_User_Template( EoS_DE2P_t &EoS_DensEint2Pres_GPUPtr, EoS_DP2E_t &EoS_DensPres2Eint_GPUPtr, EoS_DP2C_t &EoS_DensPres2CSqr_GPUPtr, EoS_DE2T_t &EoS_DensEint2Temp_GPUPtr, EoS_DT2P_t &EoS_DensTemp2Pres_GPUPtr, EoS_GENE_t &EoS_General_GPUPtr ) { CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_DensEint2Pres_GPUPtr, EoS_DensEint2Pres_Ptr, sizeof(EoS_DE2P_t) ) ); CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_DensPres2Eint_GPUPtr, EoS_DensPres2Eint_Ptr, sizeof(EoS_DP2E_t) ) ); CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_DensPres2CSqr_GPUPtr, EoS_DensPres2CSqr_Ptr, sizeof(EoS_DP2C_t) ) ); CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_DensEint2Temp_GPUPtr, EoS_DensEint2Temp_Ptr, sizeof(EoS_DE2T_t) ) ); CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_DensTemp2Pres_GPUPtr, EoS_DensTemp2Pres_Ptr, sizeof(EoS_DT2P_t) ) ); CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_General_GPUPtr, EoS_General_Ptr, sizeof(EoS_GENE_t) ) ); } #else // #ifdef __CUDACC__ void EoS_SetCPUFunc_User_Template( EoS_DE2P_t &EoS_DensEint2Pres_CPUPtr, EoS_DP2E_t &EoS_DensPres2Eint_CPUPtr, EoS_DP2C_t &EoS_DensPres2CSqr_CPUPtr, EoS_DE2T_t &EoS_DensEint2Temp_CPUPtr, EoS_DT2P_t &EoS_DensTemp2Pres_CPUPtr, EoS_GENE_t &EoS_General_CPUPtr ) { EoS_DensEint2Pres_CPUPtr = EoS_DensEint2Pres_Ptr; EoS_DensPres2Eint_CPUPtr = EoS_DensPres2Eint_Ptr; EoS_DensPres2CSqr_CPUPtr = EoS_DensPres2CSqr_Ptr; EoS_DensEint2Temp_CPUPtr = EoS_DensEint2Temp_Ptr; EoS_DensTemp2Pres_CPUPtr = EoS_DensTemp2Pres_Ptr; EoS_General_CPUPtr = EoS_General_Ptr; } #endif // #ifdef __CUDACC__ ... else ... #ifndef __CUDACC__ // local function prototypes void EoS_SetAuxArray_User_Template( double [], int [] ); void EoS_SetCPUFunc_User_Template( EoS_DE2P_t &, EoS_DP2E_t &, EoS_DP2C_t &, EoS_DE2T_t &, EoS_DT2P_t &, EoS_GENE_t & ); #ifdef GPU void EoS_SetGPUFunc_User_Template( EoS_DE2P_t &, EoS_DP2E_t &, EoS_DP2C_t &, EoS_DE2T_t &, EoS_DT2P_t &, EoS_GENE_t & ); #endif //----------------------------------------------------------------------------------------- // Function : EoS_Init_User_Template // Description : Initialize EoS // // Note : 1. Set auxiliary arrays by invoking EoS_SetAuxArray_*() // --> It will be copied to GPU automatically in CUAPI_SetConstMemory() // 2. Set the CPU/GPU EoS routines by invoking EoS_SetCPU/GPUFunc_*() // 3. Invoked by EoS_Init() // --> Enable it by linking to the function pointer "EoS_Init_Ptr" // 4. Add "#ifndef __CUDACC__" since this routine is only useful on CPU // // Parameter : None // // Return : None //----------------------------------------------------------------------------------------- void EoS_Init_User_Template() { EoS_SetAuxArray_User_Template( EoS_AuxArray_Flt, EoS_AuxArray_Int ); EoS_SetCPUFunc_User_Template( EoS_DensEint2Pres_CPUPtr, EoS_DensPres2Eint_CPUPtr, EoS_DensPres2CSqr_CPUPtr, EoS_DensEint2Temp_CPUPtr, EoS_DensTemp2Pres_CPUPtr, EoS_General_CPUPtr ); # ifdef GPU EoS_SetGPUFunc_User_Template( EoS_DensEint2Pres_GPUPtr, EoS_DensPres2Eint_GPUPtr, EoS_DensPres2CSqr_GPUPtr, EoS_DensEint2Temp_GPUPtr, EoS_DensTemp2Pres_GPUPtr, EoS_General_GPUPtr ); # endif } // FUNCTION : EoS_Init_User_Template #endif // #ifndef __CUDACC__ #endif // #if ( MODEL == HYDRO )
the_stack
__constant__ float EPS2; typedef float2 DS; // double single; struct DS4 { DS x, y, z, w; }; struct DS2 { DS x, y; }; // This function computes c = a + b. __device__ DS dsadd(DS a, DS b) { // Compute dsa + dsb using Knuth's trick. float t1 = a.x + b.x; float e = t1 - a.x; float t2 = ((b.x - e) + (a.x - (t1 - e))) + a.y + b.y; // The result is t1 + t2, after normalization. DS c; c.x = e = t1 + t2; c.y = t2 - (e - t1); return c; } // dsadd // This function computes c = a + b. __device__ DS dsadd(DS a, float b) { // Compute dsa + dsb using Knuth's trick. float t1 = a.x + b; float e = t1 - a.x; float t2 = ((b - e) + (a.x - (t1 - e))) + a.y; // The result is t1 + t2, after normalization. DS c; c.x = e = t1 + t2; c.y = t2 - (e - t1); return c; } // dsadd template<bool ngb> __device__ void body_body_interaction(float &ds_min, int &n_ngb, int *ngb_list, float4 &acc_i, float4 &jrk_i, DS4 pos_i, float4 vel_i, DS4 pos_j, float4 vel_j) { float3 dr = {(pos_j.x.x - pos_i.x.x) + (pos_j.x.y - pos_i.x.y), (pos_j.y.x - pos_i.y.x) + (pos_j.y.y - pos_i.y.y), (pos_j.z.x - pos_i.z.x) + (pos_j.z.y - pos_i.z.y)}; // 3x3 = 9 FLOP float ds2 = ((dr.x*dr.x + (dr.y*dr.y)) + dr.z*dr.z); if (ngb) { if (ds2 <= pos_i.w.x) { if (n_ngb < NGB_PB) { if(__float_as_int(pos_i.w.y) != __float_as_int(pos_j.w.y)) //Jeroen, prevent self on neighbour list ngb_list[n_ngb++] = __float_as_int(pos_j.w.y); } } if (ds2 < ds_min*(__float_as_int(pos_i.w.y) != __float_as_int(pos_j.w.y))) { ds_min = ds2; ngb_list[NGB_PB] = __float_as_int(pos_j.w.y); } } float inv_ds = rsqrt(ds2 + EPS2) * (__float_as_int(pos_i.w.y) != __float_as_int(pos_j.w.y)); float mass = pos_j.w.x; float inv_ds2 = inv_ds*inv_ds; // 1 FLOP float inv_ds3 = mass * inv_ds*inv_ds2; // 2 FLOP // 3*4 + 3 = 15 FLOP acc_i.x = ((inv_ds3 * dr.x) + acc_i.x); acc_i.y = ((inv_ds3 * dr.y) + acc_i.y); acc_i.z = ((inv_ds3 * dr.z) + acc_i.z); acc_i.w = (mass * inv_ds + acc_i.w); float3 dv; // 3 FLOP dv.x = vel_j.x - vel_i.x; dv.y = vel_j.y - vel_i.y; dv.z = vel_j.z - vel_i.z; float drdv = -3.0f * (inv_ds3*inv_ds2) * (((dr.x*dv.x) + dr.y*dv.y) + dr.z*dv.z); jrk_i.x = (jrk_i.x + inv_ds3 * dv.x) + drdv * dr.x; jrk_i.y = (jrk_i.y + inv_ds3 * dv.y) + drdv * dr.y; jrk_i.z = (jrk_i.z + inv_ds3 * dv.z) + drdv * dr.z; // TOTAL 50 FLOP (or 60 FLOP if compared against GRAPE6) } /* * blockDim.x = ni * gridDim.x = 16, 32, 64, 128, etc. */ #define ajc(i, j) (i + __mul24(blockDim.x,j)) template<bool ngb> __global__ void dev_evaluate_gravity(int nj_total, int nj, int offset, DS4 *pos_j, float4 *vel_j, DS4 *pos_i, float4 *vel_i, float4 *acc_i, float4 *jrk_i, int *ngb_list) { extern __shared__ DS4 shared_pos[]; float4 *shared_vel = (float4*)&shared_pos[blockDim.x*blockDim.y]; int local_ngb_list[NGB_PB + 1]; int n_ngb = 0; DS4 pos = pos_i[threadIdx.x]; float4 vel = vel_i[threadIdx.x]; #define LARGEnum 1e10f float ds_min = LARGEnum; float4 acc = {0.0f, 0.0f, 0.0f, 0.0f}; float4 jrk = {0.0f, 0.0f, 0.0f, 0.0f}; int i = blockIdx.x * (nj*blockDim.y) + nj*threadIdx.y; int tile = 0; while (i < blockIdx.x * (nj*blockDim.y) + nj*threadIdx.y + nj) { if (i + threadIdx.x < nj_total) { shared_pos[ajc(threadIdx.x, threadIdx.y)] = pos_j[i + threadIdx.x]; shared_vel[ajc(threadIdx.x, threadIdx.y)] = vel_j[i + threadIdx.x]; } else { shared_pos[ajc(threadIdx.x, threadIdx.y)].x = (float2){LARGEnum, 0.0f}; shared_pos[ajc(threadIdx.x, threadIdx.y)].y = (float2){LARGEnum, 0.0f}; shared_pos[ajc(threadIdx.x, threadIdx.y)].z = (float2){LARGEnum, 0.0f}; shared_pos[ajc(threadIdx.x, threadIdx.y)].w = (float2){0.0f, -1.0f}; shared_vel[ajc(threadIdx.x, threadIdx.y)] = (float4){0.0f, 0.0f, 0.0f, 0.0f}; } __syncthreads(); int j = min(nj - tile*blockDim.x, blockDim.x); int j1 = (j/16)*16; // #pragma unroll 16 for (int k = 0; k < j1; k++) { body_body_interaction<ngb>(ds_min, n_ngb, local_ngb_list, acc, jrk, pos, vel, shared_pos[ajc(k, threadIdx.y)], shared_vel[ajc(k, threadIdx.y)]); } for (int k = j1; k < j; k++) { body_body_interaction<ngb>(ds_min, n_ngb, local_ngb_list, acc, jrk, pos, vel, shared_pos[ajc(k, threadIdx.y)], shared_vel[ajc(k, threadIdx.y)]); } __syncthreads(); i += blockDim.x; tile++; } float4 *shared_acc = (float4*)&shared_pos[0]; float4 *shared_jrk = (float4*)&shared_acc[blockDim.x*blockDim.y]; int *shared_ngb = (int* )&shared_jrk[blockDim.x*blockDim.y]; int *shared_ofs = (int* )&shared_ngb[blockDim.x*blockDim.y]; float *shared_ds = (float* )&shared_ofs[blockDim.x*blockDim.y]; acc.w = -acc.w; jrk.w = __int_as_float(local_ngb_list[NGB_PB]); shared_acc[ajc(threadIdx.x, threadIdx.y)] = acc; shared_jrk[ajc(threadIdx.x, threadIdx.y)] = jrk; shared_ngb[ajc(threadIdx.x, threadIdx.y)] = n_ngb; shared_ofs[ajc(threadIdx.x, threadIdx.y)] = 0; shared_ds [ajc(threadIdx.x, threadIdx.y)] = ds_min; __syncthreads(); if (threadIdx.y == 0) { for (int i = 1; i < blockDim.y; i++) { float4 acc1 = shared_acc[ajc(threadIdx.x, i)]; float4 jrk1 = shared_jrk[ajc(threadIdx.x, i)]; float ds1 = shared_ds [ajc(threadIdx.x, i)]; acc.x += acc1.x; acc.y += acc1.y; acc.z += acc1.z; acc.w += acc1.w; jrk.x += jrk1.x; jrk.y += jrk1.y; jrk.z += jrk1.z; if (ds1 < ds_min) { jrk.w = jrk1.w; ds_min = ds1; } shared_ofs[ajc(threadIdx.x, i)] = min(n_ngb + 1, NGB_PB); n_ngb += shared_ngb[ajc(threadIdx.x, i)]; } n_ngb = min(n_ngb, NGB_PB); } __syncthreads(); if (threadIdx.y == 0) { vel_i[offset + blockIdx.x * blockDim.x + threadIdx.x].w = ds_min; acc_i[blockIdx.x * blockDim.x + threadIdx.x] = acc; jrk_i[blockIdx.x * blockDim.x + threadIdx.x] = jrk; } offset = threadIdx.x * NBLOCKS*NGB_PB + blockIdx.x * NGB_PB; offset += shared_ofs[ajc(threadIdx.x, threadIdx.y)]; if (threadIdx.y == 0) ngb_list[offset++] = n_ngb; n_ngb = shared_ngb[ajc(threadIdx.x, threadIdx.y)]; for (int i = 0; i < n_ngb; i++) ngb_list[offset + i] = local_ngb_list[i]; } /* * blockDim.x = #of block in previous kernel * gridDim.x = ni */ __global__ void dev_reduce_forces(float4 *acc_i, float4 *jrk_i, float *ds_i, float4 *vel_i, int offset_ds, int offset, int *ngb_list) { extern __shared__ float4 shared_acc[]; float4 *shared_jrk = (float4*)&shared_acc[blockDim.x]; int *shared_ngb = (int* )&shared_jrk[blockDim.x]; int *shared_ofs = (int* )&shared_ngb[blockDim.x]; float *shared_ds = (float* )&shared_ofs[blockDim.x]; int index = threadIdx.x * gridDim.x + blockIdx.x; shared_acc[threadIdx.x] = acc_i[index]; shared_jrk[threadIdx.x] = jrk_i[index]; shared_ds [threadIdx.x] = vel_i[offset_ds + index].w; int ngb_index = threadIdx.x * NGB_PB + blockIdx.x * NGB_PB*NBLOCKS; shared_ngb[threadIdx.x] = ngb_list[ngb_index]; shared_ofs[threadIdx.x] = 0; __syncthreads(); int n_ngb = shared_ngb[threadIdx.x]; if (threadIdx.x == 0) { float4 acc0 = shared_acc[0]; float4 jrk0 = shared_jrk[0]; float ds0 = shared_ds [0]; for (int i = 1; i < blockDim.x; i++) { acc0.x += shared_acc[i].x; acc0.y += shared_acc[i].y; acc0.z += shared_acc[i].z; acc0.w += shared_acc[i].w; jrk0.x += shared_jrk[i].x; jrk0.y += shared_jrk[i].y; jrk0.z += shared_jrk[i].z; if (shared_ds[i] < ds0) { ds0 = shared_ds[i]; jrk0.w = shared_jrk[i].w; } shared_ofs[i] = min(n_ngb + 1, NGB_PP); n_ngb += shared_ngb[i]; } n_ngb = min(n_ngb, NGB_PP); jrk0.w = (int)__float_as_int(jrk0.w); acc_i[blockIdx.x] = acc0; jrk_i[blockIdx.x] = jrk0; ds_i [blockIdx.x] = ds0; } __syncthreads(); offset += blockIdx.x * NGB_PP + shared_ofs[threadIdx.x]; int offset_end; if (threadIdx.x == 0) { shared_ofs[0] = offset + NGB_PP; ngb_list[offset++] = n_ngb; } __syncthreads(); offset_end = shared_ofs[0]; n_ngb = shared_ngb[threadIdx.x]; for (int i = 0; i < n_ngb; i++) if (offset + i < offset_end) ngb_list[offset + i] = ngb_list[ngb_index + 1 + i]; } __global__ void dev_copy_particles(int nj, int nj_max, int *address_j, DS2 *t_j, DS4 *Ppos_j, float4 *Pvel_j, DS4 *pos_j, float4 *vel_j, float4 *acc_j, float4 *jrk_j) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nj) { t_j [address_j[index]] = t_j [nj_max + index]; DS4 pos = pos_j[nj_max + index]; float4 vel = vel_j[nj_max + index]; Ppos_j[address_j[index]] = pos; pos_j[address_j[index]] = pos; Pvel_j[address_j[index]] = vel; vel_j[address_j[index]] = vel; acc_j[address_j[index]] = acc_j[nj_max + index]; jrk_j[address_j[index]] = jrk_j[nj_max + index]; } __syncthreads(); }; __global__ void dev_predictor(int nj, DS t_i, DS2 *t_j, DS4 *Ppos_j, float4 *Pvel_j, DS4 *pos_j, float4 *vel_j, float4 *acc_j, float4 *jrk_j) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nj) { DS2 t = t_j [index]; DS4 pos = pos_j[index]; float4 vel = vel_j[index]; float4 acc = acc_j[index]; float4 jrk = jrk_j[index]; float dt = (t_i.x - t.x.x) + (t_i.y - t.x.y); float dt2 = dt*dt/2.0f; float dt3 = dt2*dt/3.0f; pos.x = dsadd(pos.x, vel.x * dt + acc.x * dt2 + jrk.x * dt3); pos.y = dsadd(pos.y, vel.y * dt + acc.y * dt2 + jrk.y * dt3); pos.z = dsadd(pos.z, vel.z * dt + acc.z * dt2 + jrk.z * dt3); vel.x += acc.x * dt + jrk.x * dt2; vel.y += acc.y * dt + jrk.y * dt2; vel.z += acc.z * dt + jrk.z * dt2; Ppos_j[index] = pos; Pvel_j[index] = vel; } __syncthreads(); } #endif
the_stack
#pragma once #include <cuda_runtime.h> #include <libvis/libvis.h> #include "badslam/cuda_util.cuh" #include "badslam/util.cuh" #include "badslam/robust_weighting.cuh" namespace vis { // --- Depth (geometric) residual --- // Weight factor on the depth residual in the cost term. constexpr float kDepthResidualWeight = 1.f; // Default Tukey parameter (= factor on standard deviation at which the // residuals have zero weight). This gets scaled for multi-res pose estimation. constexpr float kDepthResidualDefaultTukeyParam = 10.f; // Expected stereo matching uncertainty in pixels in the depth estimation // process. Determines the final propagated depth uncertainty. constexpr float kDepthUncertaintyEmpiricalFactor = 0.1f; // Computes the "raw" depth (geometric) residual, i.e., without any weighting. __forceinline__ __device__ void ComputeRawDepthResidual( const PixelCenterUnprojector& unprojector, int px, int py, float pixel_calibrated_depth, float raw_residual_inv_stddev_estimate, const float3& surfel_local_position, const float3& surfel_local_normal, float3* local_unproj, float* raw_residual) { *local_unproj = unprojector.UnprojectPoint(px, py, pixel_calibrated_depth); *raw_residual = raw_residual_inv_stddev_estimate * Dot(surfel_local_normal, *local_unproj - surfel_local_position); } // Computes the "raw" depth (geometric) residual, i.e., without any weighting. __forceinline__ __device__ void ComputeRawDepthResidual( float raw_residual_inv_stddev_estimate, const float3& surfel_local_position, const float3& surfel_local_normal, const float3& local_unproj, float* raw_residual) { *raw_residual = raw_residual_inv_stddev_estimate * Dot(surfel_local_normal, local_unproj - surfel_local_position); } // Computes the propagated standard deviation estimate for the depth residual. __forceinline__ __device__ float ComputeDepthResidualStddevEstimate(float nx, float ny, float depth, const float3& surfel_local_normal, float baseline_fx) { return (kDepthUncertaintyEmpiricalFactor * fabs(surfel_local_normal.x * nx + surfel_local_normal.y * ny + surfel_local_normal.z) * (depth * depth)) / baseline_fx; } // Computes the propagated inverse standard deviation estimate for the depth residual. __forceinline__ __device__ float ComputeDepthResidualInvStddevEstimate(float nx, float ny, float depth, const float3& surfel_local_normal, float baseline_fx) { return baseline_fx / (kDepthUncertaintyEmpiricalFactor * fabs(surfel_local_normal.x * nx + surfel_local_normal.y * ny + surfel_local_normal.z) * (depth * depth)); } // Computes the weight of the depth residual in the optimization. __forceinline__ __device__ float ComputeDepthResidualWeight(float raw_residual, float scaling = 1.f) { return kDepthResidualWeight * TukeyWeight(raw_residual, scaling * kDepthResidualDefaultTukeyParam); } // Computes the weighted depth residual for summing up the optimization cost. __forceinline__ __device__ float ComputeWeightedDepthResidual(float raw_residual, float scaling = 1.f) { return kDepthResidualWeight * TukeyResidual(raw_residual, scaling * kDepthResidualDefaultTukeyParam); } // --- Descriptor (photometric) residual --- // Weight factor from the cost term. // TODO: Tune further. Make parameter? constexpr float kDescriptorResidualWeight = 1e-2f; // Parameter for the Huber robust loss function for photometric residuals. // TODO: Make parameter? constexpr float kDescriptorResidualHuberParameter = 10.f; // Computes the projections in an image of two (mostly) fixed points on the // border of a surfel, whose direction to the surfel center differs by 90 // degrees. These points are used to compute the descriptor residual. __forceinline__ __device__ void ComputeTangentProjections( const float3& surfel_global_position, const float3& surfel_global_normal, const float surfel_radius_squared, const CUDAMatrix3x4& frame_T_global, const PixelCornerProjector& color_corner_projector, float2* t1_pxy, float2* t2_pxy) { // With scaling 1, the tangent sample points are ca. 0.5 pixels away from the // center point when looking at the surfel from directly above. // TODO: Tune this! I think this has received very little tuning, if any at all. constexpr float kTangentScaling = 2.0f; float3 t1; CrossProduct(surfel_global_normal, (fabs(surfel_global_normal.x) > 0.9f) ? make_float3(0, 1, 0) : make_float3(1, 0, 0), &t1); t1 = t1 * kTangentScaling * sqrtf(surfel_radius_squared / max(1e-12f, SquaredLength(t1))); *t1_pxy = color_corner_projector.Project(frame_T_global * (surfel_global_position + t1)); float3 t2; CrossProduct(surfel_global_normal, t1, &t2); t2 = t2 * kTangentScaling * sqrtf(surfel_radius_squared / max(1e-12f, SquaredLength(t2))); *t2_pxy = color_corner_projector.Project(frame_T_global * (surfel_global_position + t2)); } // Computes the "raw" descriptor (photometric) residual, i.e., without any // weighting. __forceinline__ __device__ void ComputeRawDescriptorResidual( cudaTextureObject_t color_texture, const float2& pxy, const float2& t1_pxy, const float2& t2_pxy, float surfel_descriptor_1, float surfel_descriptor_2, float* raw_residual_1, float* raw_residual_2) { float intensity = tex2D<float4>(color_texture, pxy.x, pxy.y).w; float t1_intensity = tex2D<float4>(color_texture, t1_pxy.x, t1_pxy.y).w; float t2_intensity = tex2D<float4>(color_texture, t2_pxy.x, t2_pxy.y).w; *raw_residual_1 = (180.f * (t1_intensity - intensity)) - surfel_descriptor_1; *raw_residual_2 = (180.f * (t2_intensity - intensity)) - surfel_descriptor_2; } __forceinline__ __device__ void ComputeRawDescriptorResidualWithFloatTexture( cudaTextureObject_t color_texture, const float2& pxy, const float2& t1_pxy, const float2& t2_pxy, float surfel_descriptor_1, float surfel_descriptor_2, float* raw_residual_1, float* raw_residual_2) { float intensity = tex2D<float>(color_texture, pxy.x, pxy.y); float t1_intensity = tex2D<float>(color_texture, t1_pxy.x, t1_pxy.y); float t2_intensity = tex2D<float>(color_texture, t2_pxy.x, t2_pxy.y); *raw_residual_1 = (180.f * (t1_intensity - intensity)) - surfel_descriptor_1; *raw_residual_2 = (180.f * (t2_intensity - intensity)) - surfel_descriptor_2; } // Computes the weight of the descriptor residual in the optimization. __forceinline__ __device__ float ComputeDescriptorResidualWeight(float raw_residual, float scaling = 1.f) { return scaling * kDescriptorResidualWeight * HuberWeight(raw_residual, kDescriptorResidualHuberParameter); } // Computes the weighted descriptor residual for summing up the optimization // cost. __forceinline__ __device__ float ComputeWeightedDescriptorResidual(float raw_residual, float scaling = 1.f) { return scaling * kDescriptorResidualWeight * HuberResidual(raw_residual, kDescriptorResidualHuberParameter); } // Computes the Jacobian of a surfel descriptor with regard to changes in the // projected pixel position of the surfel. This function makes the approximation that // the projected positions of all points on the surfel move equally. This should // be valid since those points should all be very close together. __forceinline__ __device__ void DescriptorJacobianWrtProjectedPosition( cudaTextureObject_t color_texture, const float2& color_pxy, const float2& t1_pxy, const float2& t2_pxy, float* grad_x_1, float* grad_y_1, float* grad_x_2, float* grad_y_2) { int ix = static_cast<int>(::max(0.f, color_pxy.x - 0.5f)); int iy = static_cast<int>(::max(0.f, color_pxy.y - 0.5f)); float tx = ::max(0.f, ::min(1.f, color_pxy.x - 0.5f - ix)); // truncated x = trunc(cx + fx*ls.x/ls.z) float ty = ::max(0.f, ::min(1.f, color_pxy.y - 0.5f - iy)); // truncated y = trunc(cy + fy*ls.y/ls.z) float top_left = tex2D<float4>(color_texture, ix + 0.5f, iy + 0.5f).w; float top_right = tex2D<float4>(color_texture, ix + 1.5f, iy + 0.5f).w; float bottom_left = tex2D<float4>(color_texture, ix + 0.5f, iy + 1.5f).w; float bottom_right = tex2D<float4>(color_texture, ix + 1.5f, iy + 1.5f).w; float center_dx = (bottom_right - bottom_left) * ty + (top_right - top_left) * (1 - ty); float center_dy = (bottom_right - top_right) * tx + (bottom_left - top_left) * (1 - tx); ix = static_cast<int>(::max(0.f, t1_pxy.x - 0.5f)); iy = static_cast<int>(::max(0.f, t1_pxy.y - 0.5f)); tx = ::max(0.f, ::min(1.f, t1_pxy.x - 0.5f - ix)); // truncated x = trunc(cx + fx*ls.x/ls.z) ty = ::max(0.f, ::min(1.f, t1_pxy.y - 0.5f - iy)); // truncated y = trunc(cy + fy*ls.y/ls.z) top_left = tex2D<float4>(color_texture, ix + 0.5f, iy + 0.5f).w; top_right = tex2D<float4>(color_texture, ix + 1.5f, iy + 0.5f).w; bottom_left = tex2D<float4>(color_texture, ix + 0.5f, iy + 1.5f).w; bottom_right = tex2D<float4>(color_texture, ix + 1.5f, iy + 1.5f).w; float t1_dx = (bottom_right - bottom_left) * ty + (top_right - top_left) * (1 - ty); float t1_dy = (bottom_right - top_right) * tx + (bottom_left - top_left) * (1 - tx); ix = static_cast<int>(::max(0.f, t2_pxy.x - 0.5f)); iy = static_cast<int>(::max(0.f, t2_pxy.y - 0.5f)); tx = ::max(0.f, ::min(1.f, t2_pxy.x - 0.5f - ix)); // truncated x = trunc(cx + fx*ls.x/ls.z) ty = ::max(0.f, ::min(1.f, t2_pxy.y - 0.5f - iy)); // truncated y = trunc(cy + fy*ls.y/ls.z) top_left = tex2D<float4>(color_texture, ix + 0.5f, iy + 0.5f).w; top_right = tex2D<float4>(color_texture, ix + 1.5f, iy + 0.5f).w; bottom_left = tex2D<float4>(color_texture, ix + 0.5f, iy + 1.5f).w; bottom_right = tex2D<float4>(color_texture, ix + 1.5f, iy + 1.5f).w; float t2_dx = (bottom_right - bottom_left) * ty + (top_right - top_left) * (1 - ty); float t2_dy = (bottom_right - top_right) * tx + (bottom_left - top_left) * (1 - tx); float intensity = tex2D<float4>(color_texture, color_pxy.x, color_pxy.y).w; float t1_intensity = tex2D<float4>(color_texture, t1_pxy.x, t1_pxy.y).w; float t2_intensity = tex2D<float4>(color_texture, t2_pxy.x, t2_pxy.y).w; // NOTE: It is approximate to mix all the center, t1, t2 derivatives // directly since the points would move slightly differently on most // pose changes. However, the approximation is possibly pretty good since // the points are all close to each other. *grad_x_1 = 180.f * (t1_dx - center_dx); *grad_y_1 = 180.f * (t1_dy - center_dy); *grad_x_2 = 180.f * (t2_dx - center_dx); *grad_y_2 = 180.f * (t2_dy - center_dy); } __forceinline__ __device__ void DescriptorJacobianWrtProjectedPositionWithFloatTexture( cudaTextureObject_t color_texture, const float2& color_pxy, const float2& t1_pxy, const float2& t2_pxy, float* grad_x_fx_1, float* grad_y_fy_1, float* grad_x_fx_2, float* grad_y_fy_2) { int ix = static_cast<int>(::max(0.f, color_pxy.x - 0.5f)); int iy = static_cast<int>(::max(0.f, color_pxy.y - 0.5f)); float tx = ::max(0.f, ::min(1.f, color_pxy.x - 0.5f - ix)); // truncated x = trunc(cx + fx*ls.x/ls.z) float ty = ::max(0.f, ::min(1.f, color_pxy.y - 0.5f - iy)); // truncated y = trunc(cy + fy*ls.y/ls.z) float top_left = tex2D<float>(color_texture, ix + 0.5f, iy + 0.5f); float top_right = tex2D<float>(color_texture, ix + 1.5f, iy + 0.5f); float bottom_left = tex2D<float>(color_texture, ix + 0.5f, iy + 1.5f); float bottom_right = tex2D<float>(color_texture, ix + 1.5f, iy + 1.5f); float center_dx = (bottom_right - bottom_left) * ty + (top_right - top_left) * (1 - ty); float center_dy = (bottom_right - top_right) * tx + (bottom_left - top_left) * (1 - tx); ix = static_cast<int>(::max(0.f, t1_pxy.x - 0.5f)); iy = static_cast<int>(::max(0.f, t1_pxy.y - 0.5f)); tx = ::max(0.f, ::min(1.f, t1_pxy.x - 0.5f - ix)); // truncated x = trunc(cx + fx*ls.x/ls.z) ty = ::max(0.f, ::min(1.f, t1_pxy.y - 0.5f - iy)); // truncated y = trunc(cy + fy*ls.y/ls.z) top_left = tex2D<float>(color_texture, ix + 0.5f, iy + 0.5f); top_right = tex2D<float>(color_texture, ix + 1.5f, iy + 0.5f); bottom_left = tex2D<float>(color_texture, ix + 0.5f, iy + 1.5f); bottom_right = tex2D<float>(color_texture, ix + 1.5f, iy + 1.5f); float t1_dx = (bottom_right - bottom_left) * ty + (top_right - top_left) * (1 - ty); float t1_dy = (bottom_right - top_right) * tx + (bottom_left - top_left) * (1 - tx); ix = static_cast<int>(::max(0.f, t2_pxy.x - 0.5f)); iy = static_cast<int>(::max(0.f, t2_pxy.y - 0.5f)); tx = ::max(0.f, ::min(1.f, t2_pxy.x - 0.5f - ix)); // truncated x = trunc(cx + fx*ls.x/ls.z) ty = ::max(0.f, ::min(1.f, t2_pxy.y - 0.5f - iy)); // truncated y = trunc(cy + fy*ls.y/ls.z) top_left = tex2D<float>(color_texture, ix + 0.5f, iy + 0.5f); top_right = tex2D<float>(color_texture, ix + 1.5f, iy + 0.5f); bottom_left = tex2D<float>(color_texture, ix + 0.5f, iy + 1.5f); bottom_right = tex2D<float>(color_texture, ix + 1.5f, iy + 1.5f); float t2_dx = (bottom_right - bottom_left) * ty + (top_right - top_left) * (1 - ty); float t2_dy = (bottom_right - top_right) * tx + (bottom_left - top_left) * (1 - tx); float intensity = tex2D<float>(color_texture, color_pxy.x, color_pxy.y); float t1_intensity = tex2D<float>(color_texture, t1_pxy.x, t1_pxy.y); float t2_intensity = tex2D<float>(color_texture, t2_pxy.x, t2_pxy.y); // NOTE: It is approximate to mix all the center, t1, t2 derivatives // directly since the points would move slightly differently on most // pose changes. However, the approximation is possibly pretty good since // the points are all close to each other. *grad_x_fx_1 = 180.f * (t1_dx - center_dx); *grad_y_fy_1 = 180.f * (t1_dy - center_dy); *grad_x_fx_2 = 180.f * (t2_dx - center_dx); *grad_y_fy_2 = 180.f * (t2_dy - center_dy); } // --- Color (photometric) residual for frame-to-frame tracking on precomputed gradient magnitudes --- // Computes the "raw" color residual, i.e., without any weighting. __forceinline__ __device__ void ComputeRawColorResidual( cudaTextureObject_t color_texture, const float2& pxy, float surfel_gradmag, float* raw_residual) { *raw_residual = 255.f * tex2D<float>(color_texture, pxy.x, pxy.y) - surfel_gradmag; } // Computes the Jacobian of the color residual with regard to changes in the // projected position of a 3D point. __forceinline__ __device__ void ColorJacobianWrtProjectedPosition( cudaTextureObject_t color_texture, const float2& color_pxy, float* grad_x_fx, float* grad_y_fy) { int ix = static_cast<int>(::max(0.f, color_pxy.x - 0.5f)); int iy = static_cast<int>(::max(0.f, color_pxy.y - 0.5f)); float tx = ::max(0.f, ::min(1.f, color_pxy.x - 0.5f - ix)); // truncated x = trunc(cx + fx*ls.x/ls.z) float ty = ::max(0.f, ::min(1.f, color_pxy.y - 0.5f - iy)); // truncated y = trunc(cy + fy*ls.y/ls.z) float top_left = 255.f * tex2D<float>(color_texture, ix + 0.5f, iy + 0.5f); float top_right = 255.f * tex2D<float>(color_texture, ix + 1.5f, iy + 0.5f); float bottom_left = 255.f * tex2D<float>(color_texture, ix + 0.5f, iy + 1.5f); float bottom_right = 255.f * tex2D<float>(color_texture, ix + 1.5f, iy + 1.5f); *grad_x_fx = (bottom_right - bottom_left) * ty + (top_right - top_left) * (1 - ty); *grad_y_fy = (bottom_right - top_right) * tx + (bottom_left - top_left) * (1 - tx); } }
the_stack
#include "cunumeric/fft/fft.h" #include "cunumeric/fft/fft_template.inl" #include "cunumeric/cuda_help.h" #include "cunumeric/pitches.h" namespace cunumeric { using namespace Legion; using namespace legate; using dim_t = long long int32_t; template <int32_t DIM, typename TYPE> __global__ static void copy_kernel(size_t volume, Buffer<TYPE, DIM> buffer, AccessorRO<TYPE, DIM> acc, Pitches<DIM - 1> pitches, Point<DIM> lo) { size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= volume) return; auto p = pitches.unflatten(offset, Point<DIM>::ZEROES()); buffer[p] = acc[p + lo]; } template <int32_t DIM, typename TYPE> __host__ static inline void copy_into_buffer(Buffer<TYPE, DIM>& buffer, AccessorRO<TYPE, DIM>& acc, const Rect<DIM>& rect, size_t volume, cudaStream_t stream) { if (acc.accessor.is_dense_row_major(rect)) { auto zero = Point<DIM>::ZEROES(); CHECK_CUDA(cudaMemcpyAsync( buffer.ptr(zero), acc.ptr(zero), volume * sizeof(TYPE), cudaMemcpyDefault, stream)); } else { Pitches<DIM - 1> pitches; pitches.flatten(rect); const size_t num_blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; copy_kernel<<<num_blocks, THREADS_PER_BLOCK, 0, stream>>>( volume, buffer, acc, pitches, rect.lo); CHECK_CUDA_STREAM(stream); } } template <int32_t DIM, typename OUTPUT_TYPE, typename INPUT_TYPE> __host__ static inline void cufft_operation(AccessorWO<OUTPUT_TYPE, DIM> out, AccessorRO<INPUT_TYPE, DIM> in, const Rect<DIM>& out_rect, const Rect<DIM>& in_rect, std::vector<int64_t>& axes, CuNumericFFTType type, CuNumericFFTDirection direction) { auto stream = get_cached_stream(); size_t workarea_size = 0; size_t num_elements; dim_t n[DIM]; dim_t inembed[DIM]; dim_t onembed[DIM]; const Point<DIM> zero = Point<DIM>::ZEROES(); const Point<DIM> one = Point<DIM>::ONES(); Point<DIM> fft_size_in = in_rect.hi - in_rect.lo + one; Point<DIM> fft_size_out = out_rect.hi - out_rect.lo + one; num_elements = 1; for (int32_t i = 0; i < DIM; ++i) { n[i] = (type == CUNUMERIC_FFT_R2C || type == CUNUMERIC_FFT_D2Z) ? fft_size_in[i] : fft_size_out[i]; inembed[i] = fft_size_in[i]; onembed[i] = fft_size_out[i]; num_elements *= n[i]; } // Create the plan cufftHandle plan; CHECK_CUFFT(cufftCreate(&plan)); CHECK_CUFFT(cufftSetAutoAllocation(plan, 0 /*we'll do the allocation*/)); CHECK_CUFFT(cufftSetStream(plan, stream)); // Create the plan and allocate a temporary buffer for it if it needs one CHECK_CUFFT(cufftMakePlanMany64( plan, DIM, n, inembed, 1, 1, onembed, 1, 1, static_cast<cufftType>(type), 1, &workarea_size)); if (workarea_size > 0) { auto workarea_buffer = create_buffer<uint8_t>(workarea_size, Legion::Memory::Kind::GPU_FB_MEM); CHECK_CUFFT(cufftSetWorkArea(plan, workarea_buffer.ptr(0))); } const void* in_ptr{nullptr}; if (in.accessor.is_dense_row_major(in_rect)) in_ptr = in.ptr(in_rect.lo); else { auto buffer = create_buffer<INPUT_TYPE, DIM>(fft_size_in, Memory::Kind::GPU_FB_MEM); copy_into_buffer(buffer, in, in_rect, in_rect.volume(), stream); in_ptr = buffer.ptr(zero); } // FFT the input data CHECK_CUFFT(cufftXtExec(plan, const_cast<void*>(in_ptr), static_cast<void*>(out.ptr(out_rect.lo)), static_cast<int32_t>(direction))); // Clean up our resources, Buffers are cleaned up by Legion CHECK_CUFFT(cufftDestroy(plan)); } template <int32_t DIM, typename OUTPUT, typename INPUT_TYPE> struct cufft_axes_plan { __host__ static inline void execute(cufftHandle plan, OUTPUT& out, Buffer<INPUT_TYPE, DIM>& in, const Rect<DIM>& out_rect, const Rect<DIM>& in_rect, int32_t axis, CuNumericFFTDirection direction) { const auto zero = Point<DIM>::ZEROES(); CHECK_CUFFT(cufftXtExec(plan, static_cast<void*>(in.ptr(zero)), static_cast<void*>(out.ptr(out_rect.lo)), static_cast<int32_t>(direction))); } }; // For dimensions higher than 2D, we need to iterate through the input volume as 2D slices due to // limitations of cuFFT indexing in 1D template <typename OUTPUT, typename INPUT_TYPE> struct cufft_axes_plan<3, OUTPUT, INPUT_TYPE> { __host__ static inline void execute(cufftHandle plan, OUTPUT& out, Buffer<INPUT_TYPE, 3>& in, const Rect<3>& out_rect, const Rect<3>& in_rect, int32_t axis, CuNumericFFTDirection direction) { bool is_inner_axis = (axis == 1); if (is_inner_axis) { // TODO: use PointInRectIterator<DIM> auto num_slices = in_rect.hi[0] - in_rect.lo[0] + 1; for (uint32_t n = 0; n < num_slices; ++n) { const auto offset = Point<3>(n, 0, 0); CHECK_CUFFT(cufftXtExec(plan, static_cast<void*>(in.ptr(offset)), static_cast<void*>(out.ptr(out_rect.lo + offset)), static_cast<int32_t>(direction))); } } else { const auto zero = Point<3>::ZEROES(); CHECK_CUFFT(cufftXtExec(plan, static_cast<void*>(in.ptr(zero)), static_cast<void*>(out.ptr(out_rect.lo)), static_cast<int32_t>(direction))); } } }; // Perform the FFT operation as multiple 1D FFTs along the specified axes (Complex-to-complex case). // For now, it only supports up to 3D FFTs, but final plan is having support for // N-dimensional FFTs using this approach. // See cufft_over_axis_r2c_c2r for the equivalent on a single R2C/C2R axis. template <int32_t DIM, typename OUTPUT_TYPE, typename INPUT_TYPE> __host__ static inline void cufft_over_axes_c2c(AccessorWO<OUTPUT_TYPE, DIM> out, AccessorRO<INPUT_TYPE, DIM> in, const Rect<DIM>& out_rect, const Rect<DIM>& in_rect, std::vector<int64_t>& axes, CuNumericFFTType type, CuNumericFFTDirection direction) { auto stream = get_cached_stream(); size_t workarea_size = 0; dim_t n[DIM]; dim_t inembed[DIM]; dim_t onembed[DIM]; // Full volume dimensions / strides const Point<DIM> zero = Point<DIM>::ZEROES(); const Point<DIM> one = Point<DIM>::ONES(); Point<DIM> fft_size_in = in_rect.hi - in_rect.lo + one; Point<DIM> fft_size_out = out_rect.hi - out_rect.lo + one; size_t num_elements_in = 1; size_t num_elements_out = 1; for (int32_t i = 0; i < DIM; ++i) { n[i] = fft_size_out[i]; inembed[i] = fft_size_in[i]; onembed[i] = fft_size_out[i]; num_elements_in *= fft_size_in[i]; num_elements_out *= fft_size_out[i]; } // Copy input to temporary buffer to perform FFTs one by one auto input_buffer = create_buffer<INPUT_TYPE, DIM>(fft_size_in, Legion::Memory::Kind::GPU_FB_MEM); copy_into_buffer<DIM, INPUT_TYPE>(input_buffer, in, in_rect, num_elements_in, stream); Buffer<uint8_t> workarea_buffer; size_t last_workarea_size = 0; for (auto& ax : axes) { // Create the plan cufftHandle plan; CHECK_CUFFT(cufftCreate(&plan)); CHECK_CUFFT(cufftSetAutoAllocation(plan, 0 /*we'll do the allocation*/)); CHECK_CUFFT(cufftSetStream(plan, stream)); // Single axis dimensions / stridfes dim_t size_1d = n[ax]; // TODO: batches only correct for DIM <= 3. Fix for N-DIM case dim_t batches = (DIM == 3 && ax == 1) ? n[2] : num_elements_in / n[ax]; dim_t istride = 1; dim_t ostride = 1; for (int32_t i = ax + 1; i < DIM; ++i) { istride *= fft_size_in[i]; ostride *= fft_size_out[i]; } dim_t idist = (ax == DIM - 1) ? fft_size_in[ax] : 1; dim_t odist = (ax == DIM - 1) ? fft_size_out[ax] : 1; // Create the plan and allocate a temporary buffer for it if it needs one CHECK_CUFFT(cufftMakePlanMany64(plan, 1, &size_1d, inembed, istride, idist, onembed, ostride, odist, (cufftType)type, batches, &workarea_size)); if (workarea_size > 0) { if (workarea_size > last_workarea_size) { if (last_workarea_size > 0) workarea_buffer.destroy(); workarea_buffer = create_buffer<uint8_t>(workarea_size, Legion::Memory::Kind::GPU_FB_MEM); last_workarea_size = workarea_size; } CHECK_CUFFT(cufftSetWorkArea(plan, workarea_buffer.ptr(0))); } // TODO: following function only correct for DIM <= 3. Fix for N-DIM case cufft_axes_plan<DIM, Buffer<INPUT_TYPE, DIM>, INPUT_TYPE>::execute( plan, input_buffer, input_buffer, out_rect, in_rect, ax, direction); // Clean up our resources, Buffers are cleaned up by Legion CHECK_CUFFT(cufftDestroy(plan)); } CHECK_CUDA(cudaMemcpyAsync(out.ptr(zero), input_buffer.ptr(zero), num_elements_out * sizeof(OUTPUT_TYPE), cudaMemcpyDefault, stream)); } // Perform the FFT operation as multiple 1D FFTs along the specified axes, single R2C/C2R operation. template <int32_t DIM, typename OUTPUT_TYPE, typename INPUT_TYPE> __host__ static inline void cufft_over_axis_r2c_c2r(AccessorWO<OUTPUT_TYPE, DIM> out, AccessorRO<INPUT_TYPE, DIM> in, const Rect<DIM>& out_rect, const Rect<DIM>& in_rect, std::vector<int64_t>& axes, CuNumericFFTType type, CuNumericFFTDirection direction) { auto stream = get_cached_stream(); size_t workarea_size = 0; dim_t n[DIM]; dim_t inembed[DIM]; dim_t onembed[DIM]; // Full volume dimensions / strides const Point<DIM> zero = Point<DIM>::ZEROES(); const Point<DIM> one = Point<DIM>::ONES(); Point<DIM> fft_size_in = in_rect.hi - in_rect.lo + one; Point<DIM> fft_size_out = out_rect.hi - out_rect.lo + one; size_t num_elements_in = 1; size_t num_elements_out = 1; for (int32_t i = 0; i < DIM; ++i) { n[i] = (direction == CUNUMERIC_FFT_FORWARD) ? fft_size_in[i] : fft_size_out[i]; inembed[i] = fft_size_in[i]; onembed[i] = fft_size_out[i]; num_elements_in *= fft_size_in[i]; num_elements_out *= fft_size_out[i]; } // cuFFT out-of-place C2R always overwrites the input buffer, // which is not what we want here, so copy // Copy input to temporary buffer to perform FFTs one by one auto input_buffer = create_buffer<INPUT_TYPE, DIM>(fft_size_in, Legion::Memory::Kind::GPU_FB_MEM); copy_into_buffer<DIM, INPUT_TYPE>(input_buffer, in, in_rect, num_elements_in, stream); // Create the plan cufftHandle plan; CHECK_CUFFT(cufftCreate(&plan)); CHECK_CUFFT(cufftSetAutoAllocation(plan, 0 /*we'll do the allocation*/)); CHECK_CUFFT(cufftSetStream(plan, stream)); // Operate over the R2C or C2R axis, which should be the only one in the list assert(axes.size() == 1); auto axis = axes.front(); // Batched 1D dimension dim_t size_1d = n[axis]; // TODO: batch only correct for DIM <= 3. Fix for N-DIM case dim_t batches = (direction == CUNUMERIC_FFT_FORWARD) ? num_elements_in : num_elements_out; batches = (DIM == 3 && axis == 1) ? n[2] : batches / n[axis]; dim_t istride = 1; dim_t ostride = 1; for (int32_t i = axis + 1; i < DIM; ++i) { istride *= fft_size_in[i]; ostride *= fft_size_out[i]; } dim_t idist = (axis == DIM - 1) ? fft_size_in[axis] : 1; dim_t odist = (axis == DIM - 1) ? fft_size_out[axis] : 1; // Create the plan and allocate a temporary buffer for it if it needs one CHECK_CUFFT(cufftMakePlanMany64(plan, 1, &size_1d, inembed, istride, idist, onembed, ostride, odist, (cufftType)type, batches, &workarea_size)); if (workarea_size > 0) { auto workarea_buffer = create_buffer<uint8_t>(workarea_size, Legion::Memory::Kind::GPU_FB_MEM); CHECK_CUFFT(cufftSetWorkArea(plan, workarea_buffer.ptr(0))); } cufft_axes_plan<DIM, AccessorWO<OUTPUT_TYPE, DIM>, INPUT_TYPE>::execute( plan, out, input_buffer, out_rect, in_rect, axis, direction); // Clean up our resources, Buffers are cleaned up by Legion CHECK_CUFFT(cufftDestroy(plan)); } template <CuNumericFFTType FFT_TYPE, LegateTypeCode CODE_OUT, LegateTypeCode CODE_IN, int32_t DIM> struct FFTImplBody<VariantKind::GPU, FFT_TYPE, CODE_OUT, CODE_IN, DIM> { using INPUT_TYPE = legate_type_of<CODE_IN>; using OUTPUT_TYPE = legate_type_of<CODE_OUT>; __host__ void operator()(AccessorWO<OUTPUT_TYPE, DIM> out, AccessorRO<INPUT_TYPE, DIM> in, const Rect<DIM>& out_rect, const Rect<DIM>& in_rect, std::vector<int64_t>& axes, CuNumericFFTDirection direction, bool operate_over_axes) const { // FFTs are computed as 1D over different axes. Slower than performing the full FFT in a single // step if (operate_over_axes) { // R2C / C2R always only 1D on a single axis (when performed over axes) if constexpr (FFT_TYPE != CUNUMERIC_FFT_Z2Z && FFT_TYPE != CUNUMERIC_FFT_C2C) { cufft_over_axis_r2c_c2r<DIM, OUTPUT_TYPE, INPUT_TYPE>( out, in, out_rect, in_rect, axes, FFT_TYPE, direction); } // C2C can be multiple 1D dimensions over axes else { cufft_over_axes_c2c<DIM, OUTPUT_TYPE, INPUT_TYPE>( out, in, out_rect, in_rect, axes, FFT_TYPE, direction); } } // If we have one axis per dimension, then it can be done as a single operation (more // performant) else { // FFTs are computed as a single step of DIM cufft_operation<DIM, OUTPUT_TYPE, INPUT_TYPE>( out, in, out_rect, in_rect, axes, FFT_TYPE, direction); } } }; /*static*/ void FFTTask::gpu_variant(TaskContext& context) { fft_template<VariantKind::GPU>(context); }; namespace // unnamed { static void __attribute__((constructor)) register_tasks(void) { FFTTask::register_variants(); } } // namespace } // namespace cunumeric
the_stack
#include <ops/meta_ops.h> #include <loops/legacy_ops.h> #define GRID_WIDTH 19 // number of pointers within single grid row template <typename T> __device__ inline static void metaPredicateShapeGeneric(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *dz, Nd4jLong *zShapeInfo, T *extraA, T *extraB, T scalarA, T scalarB) { __shared__ Nd4jPointer params[2]; __shared__ T *paramsPtr; if (threadIdx.x == 0) { if (opTypeA == 0) { params[0] = (Nd4jPointer *) &scalarA; } else params[0] = (Nd4jPointer *) extraA; if (opTypeB == 0) { params[1] = (Nd4jPointer *) &scalarB; } else params[1] = (Nd4jPointer *) extraB; paramsPtr = (T *) params; } __syncthreads(); if (opTypeA == 2) { if (opTypeB == 0) { // DISPATCH_METAOP(functions::pairwise_transforms::PairWiseTransform<T>::template transformCuda, PARAMS(dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, paramsPtr, nullptr, nullptr, nullptr), InvertedMetaOp, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); // functions::pairwise_transforms::PairWiseTransform<T>::template transformCuda<simdOps::InvertedMetaOp<T, simdOps::Copy<T>, simdOps::Multiply<T>>>(dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, paramsPtr, nullptr, nullptr, nullptr); } } } template<typename T, typename OpClass> __device__ static inline void invertedMetaPairwiseShapedGeneric(const int opTypeA, const int opTypeB, Nd4jLong N, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *dz, Nd4jLong *zShapeInfo, T *extraA, T *extraB, T scalarA, T scalarB) { __shared__ Nd4jPointer params[2]; __shared__ T *paramsPtr; if (threadIdx.x == 0) { if (opTypeA == 0) { params[0] = (Nd4jPointer *) &scalarA; } else params[0] = (Nd4jPointer *) extraA; if (opTypeB == 0) { params[1] = (Nd4jPointer *) &scalarB; } else params[1] = (Nd4jPointer *) extraB; paramsPtr = (T *) params; } __syncthreads(); functions::grid::GRIDShaped<T>::template transformCuda<OpClass>(dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, paramsPtr, nullptr, nullptr, nullptr); }; template<typename T, typename OpClass> __device__ static inline void invertedMetaPairwiseShapedGeneric(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *dz, Nd4jLong *zShapeInfo, T *extraA, T *extraB, T scalarA, T scalarB) { __shared__ Nd4jPointer params[2]; __shared__ T *paramsPtr; if (threadIdx.x == 0) { if (opTypeA == 0) { params[0] = (Nd4jPointer *) &scalarA; } else params[0] = (Nd4jPointer *) extraA; if (opTypeB == 0) { params[1] = (Nd4jPointer *) &scalarB; } else params[1] = (Nd4jPointer *) extraB; paramsPtr = (T *) params; } __syncthreads(); functions::grid::GRIDShaped<T>::template transformCuda<OpClass>(opTypeA, opNumA, opTypeB, opNumB, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, paramsPtr, nullptr, nullptr, nullptr); }; template<typename T> __device__ static inline void invertedMetaPairwiseShapedNumericGeneric(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *dz, Nd4jLong *zShapeInfo, T *extraA, T *extraB, T scalarA, T scalarB) { __shared__ Nd4jPointer params[2]; __shared__ T *paramsPtr; if (threadIdx.x == 0) { if (opTypeA == 0) { params[0] = (Nd4jPointer *) &scalarA; } else params[0] = (Nd4jPointer *) extraA; if (opTypeB == 0) { params[1] = (Nd4jPointer *) &scalarB; } else params[1] = (Nd4jPointer *) extraB; paramsPtr = (T *) params; } __syncthreads(); functions::grid::GRIDShaped<T>::transformCuda(opTypeA, opNumA, opTypeB, opNumB, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, paramsPtr, nullptr, nullptr, nullptr); }; extern "C" __global__ void invertedMetaPairwiseShapedNumericFloat(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float *dx, Nd4jLong *xShapeInfo, float *dy, Nd4jLong *yShapeInfo, float *dz, Nd4jLong *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB) { invertedMetaPairwiseShapedNumericGeneric<float>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB); } extern "C" __global__ void invertedMetaPairwiseShapedNumericDouble(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, double *dx, Nd4jLong *xShapeInfo, double *dy, Nd4jLong *yShapeInfo, double *dz, Nd4jLong *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB) { invertedMetaPairwiseShapedNumericGeneric<double>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB); } extern "C" __global__ void invertedMetaPairwiseShapedNumericHalf(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float16 *dx, Nd4jLong *xShapeInfo, float16 *dy, Nd4jLong *yShapeInfo, float16 *dz, Nd4jLong *zShapeInfo, float16 *extraA, float16 *extraB, float16 scalarA, float16 scalarB) { invertedMetaPairwiseShapedNumericGeneric<float16>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB); } #ifndef __CLION_IDE__ // kernels set for pairwise + scalar based on shape //DISPATCH_KERNEL_META(invertedMetaPairwiseShaped_Pairwise_Scalar_, invertedMetaPairwiseShapedGeneric, float, metaOps::InvertedMetaOp, INPUT(const int opTypeA, const int opTypeB, Nd4jLong N, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB), PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)) //DISPATCH_KERNEL_META(invertedMetaPairwiseShaped_Pairwise_Scalar_, invertedMetaPairwiseShapedGeneric, double, metaOps::InvertedMetaOp, INPUT(const int opTypeA, const int opTypeB, Nd4jLong N, double *dx, int *xShapeInfo, double *dy, int *yShapeInfo, double *dz, int *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB), PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)) //DISPATCH_KERNEL_META(invertedMetaPairwiseShaped_Pairwise_Scalar_, invertedMetaPairwiseShapedGeneric, float16, metaOps::InvertedMetaOp, INPUT(const int opTypeA, const int opTypeB, Nd4jLong N, float16 *dx, int *xShapeInfo, float16 *dy, int *yShapeInfo, float16 *dz, int *zShapeInfo, float16 *extraA, float16 *extraB, float16 scalarA, float16 scalarB), PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)) #endif namespace functions { namespace grid { __device__ void _ind2subC(int rank, Nd4jLong *shape, Nd4jLong idx, Nd4jLong *coords) { shape::ind2subC(rank, shape, idx, coords); } __device__ Nd4jLong _getOffset(Nd4jLong offset, Nd4jLong *shape, Nd4jLong *stride, Nd4jLong *coords, int rank) { return shape::getOffset(offset, shape, stride, coords, rank); } __device__ Nd4jLong* _shapeOf(Nd4jLong *shape) { return shape::shapeOf(shape); } __device__ Nd4jLong* _stride(Nd4jLong *shape) { return shape::stride(shape); } __device__ int _rank(Nd4jLong* shape) { return shape::rank(shape); } /** * This method is able to execute various ops that takes 2 operands (x, y) + extras * @tparam T */ template <typename T> __device__ T _execute_2OE(const int opType, const int opNum, T x, T y, T *extras) { T z; switch(opType) { case 2: { EXECUTE_NOE((x, y, extras), OPS_A(PAIRWISE_TRANSFORM_OPS)); }; break; default: { PRINT_FIRST("Unknown opType provided: [%i]\n", opType); } break; } return z; } /** * This method is able to execute various ops that takes 1 operand (x) + extras * @tparam T */ template <typename T> __device__ T _execute_1OE(const int opType, const int opNum, T x, T *extras) { T z; switch(opType) { case 0: { EXECUTE_NOE((x, extras), OPS_A(SCALAR_OPS)); } break; default: { PRINT_FIRST("Unknown opType provided: [%i]\n", opType); } break; } return z; } template <typename T> __device__ T _invertedOpExecutorA(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, T x, T y, T *extras) { // this code is basically InvertedMetaOp, reorganized to suit per-type execution Nd4jPointer *wrap = reinterpret_cast<Nd4jPointer *> (extras); T *paramsA = reinterpret_cast<T *> (wrap[0]); T *paramsB = reinterpret_cast<T *> (wrap[1]); T intermediate; // Executing first op, opA intermediate = _execute_2OE<T>(opTypeA, opNumA, x, y, paramsA); // Executing second op, opB intermediate = _execute_1OE<T>(opTypeB, opNumB, intermediate, paramsB); // just returning result now return intermediate; } template<typename T> __device__ void GRIDShaped<T>::transformCuda(int opTypeA, int opNumA, int opTypeB, int opNumB, T *dx, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) { int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int xRank; __shared__ int yRank; __shared__ int resultRank; __shared__ Nd4jLong n; __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *yShape; __shared__ Nd4jLong *zShape; __shared__ Nd4jLong *xStride; __shared__ Nd4jLong *yStride; __shared__ Nd4jLong *zStride; if (threadIdx.x == 0) { xRank = _rank(xShapeBuffer); yRank = _rank(yShapeBuffer); resultRank = _rank(resultShapeBuffer); n = shape::length(xShapeBuffer); xShape = _shapeOf(xShapeBuffer); yShape = _shapeOf(yShapeBuffer); if (dx != result) { zShape = _shapeOf(resultShapeBuffer); zStride = _stride(resultShapeBuffer); } xStride = _stride(xShapeBuffer); yStride = _stride(yShapeBuffer); } __syncthreads(); if (dx == result) { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) { _ind2subC(xRank, xShape, i, xCoord); _ind2subC(yRank, yShape, i, yCoord); auto xOffset = _getOffset(0, xShape, xStride, xCoord, xRank); auto yOffset = _getOffset(0, yShape, yStride, yCoord, yRank); result[xOffset] = _invertedOpExecutorA(opTypeA, opNumA, opTypeB, opNumB, dx[xOffset], y[yOffset], extraParams); //OpType::op(dx[xOffset], y[yOffset], extraParams); } } else { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; Nd4jLong resultCoord[MAX_RANK]; for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) { _ind2subC(xRank, xShape, i, xCoord); _ind2subC(yRank, yShape, i, yCoord); _ind2subC(resultRank, zShape, i, resultCoord); auto xOffset = _getOffset(0, xShape, xStride, xCoord, xRank); auto yOffset = _getOffset(0, yShape, yStride, yCoord, yRank); auto resultOffset = _getOffset(0, zShape, zStride, resultCoord, resultRank); result[0] = _invertedOpExecutorA(opTypeA, opNumA, opTypeB, opNumB, dx[xOffset], y[yOffset], extraParams); //OpType::op(dx[xOffset], y[yOffset], extraParams); } } } template<typename T> template<typename OpType> __device__ void GRIDShaped<T>::transformCuda(T *dx, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) { int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int xRank; __shared__ int yRank; __shared__ int resultRank; __shared__ Nd4jLong n; __shared__ Nd4jLong *xShape; __shared__ Nd4jLong *yShape; __shared__ Nd4jLong *zShape; __shared__ Nd4jLong *xStride; __shared__ Nd4jLong *yStride; __shared__ Nd4jLong *zStride; if (threadIdx.x == 0) { xRank = _rank(xShapeBuffer); yRank = _rank(yShapeBuffer); resultRank = _rank(resultShapeBuffer); n = shape::length(xShapeBuffer); xShape = _shapeOf(xShapeBuffer); yShape = _shapeOf(yShapeBuffer); if (dx != result) { zShape = _shapeOf(resultShapeBuffer); zStride = _stride(resultShapeBuffer); } xStride = _stride(xShapeBuffer); yStride = _stride(yShapeBuffer); } __syncthreads(); if (dx == result) { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) { _ind2subC(xRank, xShape, i, xCoord); _ind2subC(yRank, yShape, i, yCoord); auto xOffset = _getOffset(0, xShape, xStride, xCoord, xRank); auto yOffset = _getOffset(0, yShape, yStride, yCoord, yRank); result[xOffset] = OpType::op(dx[xOffset], y[yOffset], extraParams); } } else { Nd4jLong xCoord[MAX_RANK]; Nd4jLong yCoord[MAX_RANK]; Nd4jLong resultCoord[MAX_RANK]; for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) { _ind2subC(xRank, xShape, i, xCoord); _ind2subC(yRank, yShape, i, yCoord); _ind2subC(resultRank, zShape, i, resultCoord); auto xOffset = _getOffset(0, xShape, xStride, xCoord, xRank); auto yOffset = _getOffset(0, yShape, yStride, yCoord, yRank); auto resultOffset = _getOffset(0, zShape, zStride, resultCoord, resultRank); result[resultOffset] = OpType::op(dx[xOffset], y[yOffset], extraParams); } } } template <> void GRIDShaped<float>::execMetaPredicateShaped(cudaStream_t * stream, Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float *dx, Nd4jLong *xShapeInfo, float *dy, Nd4jLong *yShapeInfo, float *dz, Nd4jLong *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB) { invertedMetaPairwiseShapedNumericFloat<<<128, 1024, 2048, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB); DEBUG_KERNEL(stream, opNumA); } template <> void GRIDShaped<float16>::execMetaPredicateShaped(cudaStream_t * stream, Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float16 *dx, Nd4jLong *xShapeInfo, float16 *dy, Nd4jLong *yShapeInfo, float16 *dz, Nd4jLong *zShapeInfo, float16 *extraA, float16 *extraB, float16 scalarA, float16 scalarB) { invertedMetaPairwiseShapedNumericHalf<<<128, 1024, 2048, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB); DEBUG_KERNEL(stream, opNumB); } template <> void GRIDShaped<double>::execMetaPredicateShaped(cudaStream_t * stream, Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, double *dx, Nd4jLong *xShapeInfo, double *dy, Nd4jLong *yShapeInfo, double *dz, Nd4jLong *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB) { invertedMetaPairwiseShapedNumericDouble<<<128, 1024, 2048, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB); DEBUG_KERNEL(stream, opNumA); } } }
the_stack
#include "boolean3.cuh" // TODO: make this runtime configurable for quicker debug constexpr bool kVerbose = false; using namespace manifold; namespace { // These two functions (Interpolate and Intersect) are the only places where // floating-point operations take place in the whole Boolean function. These are // carefully designed to minimize rounding error and to eliminate it at edge // cases to ensure consistency. __host__ __device__ glm::vec2 Interpolate(glm::vec3 pL, glm::vec3 pR, float x) { float dxL = x - pL.x; float dxR = x - pR.x; if (dxL * dxR > 0) printf("Not in domain!\n"); bool useL = fabs(dxL) < fabs(dxR); float lambda = (useL ? dxL : dxR) / (pR.x - pL.x); if (!isfinite(lambda)) return glm::vec2(pL.y, pL.z); glm::vec2 yz; yz[0] = (useL ? pL.y : pR.y) + lambda * (pR.y - pL.y); yz[1] = (useL ? pL.z : pR.z) + lambda * (pR.z - pL.z); return yz; } __host__ __device__ glm::vec4 Intersect(const glm::vec3 &pL, const glm::vec3 &pR, const glm::vec3 &qL, const glm::vec3 &qR) { float dyL = qL.y - pL.y; float dyR = qR.y - pR.y; if (dyL * dyR > 0) printf("No intersection!\n"); bool useL = fabs(dyL) < fabs(dyR); float dx = pR.x - pL.x; float lambda = (useL ? dyL : dyR) / (dyL - dyR); if (!isfinite(lambda)) lambda = 0.0f; glm::vec4 xyzz; xyzz.x = (useL ? pL.x : pR.x) + lambda * dx; float pDy = pR.y - pL.y; float qDy = qR.y - qL.y; bool useP = fabs(pDy) < fabs(qDy); xyzz.y = (useL ? (useP ? pL.y : qL.y) : (useP ? pR.y : qR.y)) + lambda * (useP ? pDy : qDy); xyzz.z = (useL ? pL.z : pR.z) + lambda * (pR.z - pL.z); xyzz.w = (useL ? qL.z : qR.z) + lambda * (qR.z - qL.z); return xyzz; } struct CopyFaceEdges { // x can be either vert or edge (0 or 1). thrust::pair<int *, int *> pXq1; const Halfedge *halfedgesQ; __host__ __device__ void operator()(thrust::tuple<int, int, int> in) { int idx = 3 * thrust::get<0>(in); const int pX = thrust::get<1>(in); const int q2 = thrust::get<2>(in); for (const int i : {0, 1, 2}) { pXq1.first[idx + i] = pX; const int q1 = 3 * q2 + i; const Halfedge edge = halfedgesQ[q1]; pXq1.second[idx + i] = edge.IsForward() ? q1 : edge.pairedHalfedge; } } }; SparseIndices Filter11(const Manifold::Impl &inP, const Manifold::Impl &inQ, const SparseIndices &p1q2, const SparseIndices &p2q1) { SparseIndices p1q1(3 * p1q2.size() + 3 * p2q1.size()); thrust::for_each_n(zip(countAt(0), p1q2.beginD(0), p1q2.beginD(1)), p1q2.size(), CopyFaceEdges({p1q1.ptrDpq(), inQ.halfedge_.cptrD()})); p1q1.SwapPQ(); thrust::for_each_n(zip(countAt(p1q2.size()), p2q1.beginD(1), p2q1.beginD(0)), p2q1.size(), CopyFaceEdges({p1q1.ptrDpq(), inP.halfedge_.cptrD()})); p1q1.SwapPQ(); p1q1.Unique(); return p1q1; } __host__ __device__ bool Shadows(float p, float q, float dir) { return p == q ? dir < 0 : p < q; } __host__ __device__ thrust::pair<int, glm::vec2> Shadow01( const int p0, const int q1, const glm::vec3 *vertPosP, const glm::vec3 *vertPosQ, const Halfedge *halfedgeQ, const float expandP, const glm::vec3 *normalP, const bool reverse) { const int q1s = halfedgeQ[q1].startVert; const int q1e = halfedgeQ[q1].endVert; const float p0x = vertPosP[p0].x; const float q1sx = vertPosQ[q1s].x; const float q1ex = vertPosQ[q1e].x; int s01 = reverse ? Shadows(q1sx, p0x, expandP * normalP[q1s].x) - Shadows(q1ex, p0x, expandP * normalP[q1e].x) : Shadows(p0x, q1ex, expandP * normalP[p0].x) - Shadows(p0x, q1sx, expandP * normalP[p0].x); glm::vec2 yz01(0.0f / 0.0f); if (s01 != 0) { yz01 = Interpolate(vertPosQ[q1s], vertPosQ[q1e], vertPosP[p0].x); if (reverse) { glm::vec3 diff = vertPosQ[q1s] - vertPosP[p0]; const float start2 = glm::dot(diff, diff); diff = vertPosQ[q1e] - vertPosP[p0]; const float end2 = glm::dot(diff, diff); const float dir = start2 < end2 ? normalP[q1s].y : normalP[q1e].y; if (!Shadows(yz01[0], vertPosP[p0].y, expandP * dir)) s01 = 0; } else { if (!Shadows(vertPosP[p0].y, yz01[0], expandP * normalP[p0].y)) s01 = 0; } } return thrust::make_pair(s01, yz01); } __host__ __device__ int BinarySearch( const thrust::pair<const int *, const int *> keys, const int size, const thrust::pair<int, int> key) { if (size <= 0) return -1; int left = 0; int right = size - 1; int m; thrust::pair<int, int> keyM; while (1) { m = right - (right - left) / 2; keyM = thrust::make_pair(keys.first[m], keys.second[m]); if (left == right) break; if (keyM > key) right = m - 1; else left = m; } if (keyM == key) return m; else return -1; } struct Kernel11 { const glm::vec3 *vertPosP; const glm::vec3 *vertPosQ; const Halfedge *halfedgeP; const Halfedge *halfedgeQ; float expandP; const glm::vec3 *normalP; __host__ __device__ void operator()( thrust::tuple<glm::vec4 &, int &, int, int> inout) { glm::vec4 &xyzz11 = thrust::get<0>(inout); int &s11 = thrust::get<1>(inout); const int p1 = thrust::get<2>(inout); const int q1 = thrust::get<3>(inout); // For pRL[k], qRL[k], k==0 is the left and k==1 is the right. int k = 0; glm::vec3 pRL[2], qRL[2]; // Either the left or right must shadow, but not both. This ensures the // intersection is between the left and right. bool shadows; s11 = 0; const int p0[2] = {halfedgeP[p1].startVert, halfedgeP[p1].endVert}; for (int i : {0, 1}) { const auto syz01 = Shadow01(p0[i], q1, vertPosP, vertPosQ, halfedgeQ, expandP, normalP, false); const int s01 = syz01.first; const glm::vec2 yz01 = syz01.second; // If the value is NaN, then these do not overlap. if (isfinite(yz01[0])) { s11 += s01 * (i == 0 ? -1 : 1); if (k < 2 && (k == 0 || (s01 != 0) != shadows)) { shadows = s01 != 0; pRL[k] = vertPosP[p0[i]]; qRL[k] = glm::vec3(pRL[k].x, yz01); ++k; } } } const int q0[2] = {halfedgeQ[q1].startVert, halfedgeQ[q1].endVert}; for (int i : {0, 1}) { const auto syz10 = Shadow01(q0[i], p1, vertPosQ, vertPosP, halfedgeP, expandP, normalP, true); const int s10 = syz10.first; const glm::vec2 yz10 = syz10.second; // If the value is NaN, then these do not overlap. if (isfinite(yz10[0])) { s11 += s10 * (i == 0 ? -1 : 1); if (k < 2 && (k == 0 || (s10 != 0) != shadows)) { shadows = s10 != 0; qRL[k] = vertPosQ[q0[i]]; pRL[k] = glm::vec3(qRL[k].x, yz10); ++k; } } } if (s11 == 0) { // No intersection xyzz11 = glm::vec4(0.0f / 0.0f); } else { // Assert left and right were both found if (k != 2) { printf("k = %d\n", k); } xyzz11 = Intersect(pRL[0], pRL[1], qRL[0], qRL[1]); const int p1s = halfedgeP[p1].startVert; const int p1e = halfedgeP[p1].endVert; glm::vec3 diff = vertPosP[p1s] - glm::vec3(xyzz11); const float start2 = glm::dot(diff, diff); diff = vertPosP[p1e] - glm::vec3(xyzz11); const float end2 = glm::dot(diff, diff); const float dir = start2 < end2 ? normalP[p1s].z : normalP[p1e].z; if (!Shadows(xyzz11.z, xyzz11.w, expandP * dir)) s11 = 0; } } }; std::tuple<VecDH<int>, VecDH<glm::vec4>> Shadow11(SparseIndices &p1q1, const Manifold::Impl &inP, const Manifold::Impl &inQ, float expandP) { VecDH<int> s11(p1q1.size()); VecDH<glm::vec4> xyzz11(p1q1.size()); thrust::for_each_n( zip(xyzz11.beginD(), s11.beginD(), p1q1.beginD(0), p1q1.beginD(1)), p1q1.size(), Kernel11({inP.vertPos_.cptrD(), inQ.vertPos_.cptrD(), inP.halfedge_.cptrD(), inQ.halfedge_.cptrD(), expandP, inP.vertNormal_.cptrD()})); p1q1.KeepFinite(xyzz11, s11); return std::make_tuple(s11, xyzz11); }; struct Kernel02 { const glm::vec3 *vertPosP; const Halfedge *halfedgeQ; const glm::vec3 *vertPosQ; const bool forward; const float expandP; const glm::vec3 *vertNormalP; __host__ __device__ void operator()( thrust::tuple<int &, float &, int, int> inout) { int &s02 = thrust::get<0>(inout); float &z02 = thrust::get<1>(inout); const int p0 = thrust::get<2>(inout); const int q2 = thrust::get<3>(inout); // For yzzLR[k], k==0 is the left and k==1 is the right. int k = 0; glm::vec3 yzzRL[2]; // Either the left or right must shadow, but not both. This ensures the // intersection is between the left and right. bool shadows; int closestVert; float minMetric = 1.0f / 0.0f; s02 = 0; const glm::vec3 posP = vertPosP[p0]; for (const int i : {0, 1, 2}) { const int q1 = 3 * q2 + i; const Halfedge edge = halfedgeQ[q1]; const int q1F = edge.IsForward() ? q1 : edge.pairedHalfedge; if (!forward) { const int qVert = halfedgeQ[q1F].startVert; const glm::vec3 diff = posP - vertPosQ[qVert]; const float metric = glm::dot(diff, diff); if (metric < minMetric) { minMetric = metric; closestVert = qVert; } } const auto syz01 = Shadow01(p0, q1F, vertPosP, vertPosQ, halfedgeQ, expandP, vertNormalP, !forward); const int s01 = syz01.first; const glm::vec2 yz01 = syz01.second; // If the value is NaN, then these do not overlap. if (isfinite(yz01[0])) { s02 += s01 * (forward == edge.IsForward() ? -1 : 1); if (k < 2 && (k == 0 || (s01 != 0) != shadows)) { shadows = s01 != 0; yzzRL[k++] = glm::vec3(yz01[0], yz01[1], yz01[1]); } } } if (s02 == 0) { // No intersection z02 = 0.0f / 0.0f; } else { // Assert left and right were both found if (k != 2) { printf("k = %d\n", k); } glm::vec3 vertPos = vertPosP[p0]; z02 = Interpolate(yzzRL[0], yzzRL[1], vertPos.y)[1]; if (forward) { if (!Shadows(vertPos.z, z02, expandP * vertNormalP[p0].z)) s02 = 0; } else { if (!Shadows(z02, vertPos.z, expandP * vertNormalP[closestVert].z)) s02 = 0; } } } }; std::tuple<VecDH<int>, VecDH<float>> Shadow02(const Manifold::Impl &inP, const Manifold::Impl &inQ, SparseIndices &p0q2, bool forward, float expandP) { VecDH<int> s02(p0q2.size()); VecDH<float> z02(p0q2.size()); auto vertNormalP = forward ? inP.vertNormal_.cptrD() : inQ.vertNormal_.cptrD(); thrust::for_each_n( zip(s02.beginD(), z02.beginD(), p0q2.beginD(!forward), p0q2.beginD(forward)), p0q2.size(), Kernel02({inP.vertPos_.cptrD(), inQ.halfedge_.cptrD(), inQ.vertPos_.cptrD(), forward, expandP, vertNormalP})); p0q2.KeepFinite(z02, s02); return std::make_tuple(s02, z02); }; struct Kernel12 { const thrust::pair<const int *, const int *> p0q2; const int *s02; const float *z02; const int size02; const thrust::pair<const int *, const int *> p1q1; const int *s11; const glm::vec4 *xyzz11; const int size11; const Halfedge *halfedgesP; const Halfedge *halfedgesQ; const glm::vec3 *vertPosP; const bool forward; __host__ __device__ void operator()( thrust::tuple<int &, glm::vec3 &, int, int> inout) { int &x12 = thrust::get<0>(inout); glm::vec3 &v12 = thrust::get<1>(inout); const int p1 = thrust::get<2>(inout); const int q2 = thrust::get<3>(inout); // For xzyLR-[k], k==0 is the left and k==1 is the right. int k = 0; glm::vec3 xzyLR0[2]; glm::vec3 xzyLR1[2]; // Either the left or right must shadow, but not both. This ensures the // intersection is between the left and right. bool shadows; x12 = 0; const Halfedge edge = halfedgesP[p1]; for (int vert : {edge.startVert, edge.endVert}) { const auto key = forward ? thrust::make_pair(vert, q2) : thrust::make_pair(q2, vert); const int idx = BinarySearch(p0q2, size02, key); if (idx != -1) { const int s = s02[idx]; x12 += s * ((vert == edge.startVert) == forward ? 1 : -1); if (k < 2 && (k == 0 || (s != 0) != shadows)) { shadows = s != 0; xzyLR0[k] = vertPosP[vert]; thrust::swap(xzyLR0[k].y, xzyLR0[k].z); xzyLR1[k] = xzyLR0[k]; xzyLR1[k][1] = z02[idx]; k++; } } } for (const int i : {0, 1, 2}) { const int q1 = 3 * q2 + i; const Halfedge edge = halfedgesQ[q1]; const int q1F = edge.IsForward() ? q1 : edge.pairedHalfedge; const auto key = forward ? thrust::make_pair(p1, q1F) : thrust::make_pair(q1F, p1); const int idx = BinarySearch(p1q1, size11, key); if (idx != -1) { // s is implicitly zero for anything not found const int s = s11[idx]; x12 -= s * (edge.IsForward() ? 1 : -1); if (k < 2 && (k == 0 || (s != 0) != shadows)) { shadows = s != 0; const glm::vec4 xyzz = xyzz11[idx]; xzyLR0[k][0] = xyzz.x; xzyLR0[k][1] = xyzz.z; xzyLR0[k][2] = xyzz.y; xzyLR1[k] = xzyLR0[k]; xzyLR1[k][1] = xyzz.w; if (!forward) thrust::swap(xzyLR0[k][1], xzyLR1[k][1]); k++; } } } if (x12 == 0) { // No intersection v12 = glm::vec3(0.0f / 0.0f); } else { // Assert left and right were both found if (k != 2) { printf("k = %d\n", k); } const glm::vec4 xzyy = Intersect(xzyLR0[0], xzyLR0[1], xzyLR1[0], xzyLR1[1]); v12.x = xzyy[0]; v12.y = xzyy[2]; v12.z = xzyy[1]; } } }; std::tuple<VecDH<int>, VecDH<glm::vec3>> Intersect12( const Manifold::Impl &inP, const Manifold::Impl &inQ, const VecDH<int> &s02, const SparseIndices &p0q2, const VecDH<int> &s11, const SparseIndices &p1q1, const VecDH<float> &z02, const VecDH<glm::vec4> &xyzz11, SparseIndices &p1q2, bool forward) { VecDH<int> x12(p1q2.size()); VecDH<glm::vec3> v12(p1q2.size()); thrust::for_each_n( zip(x12.beginD(), v12.beginD(), p1q2.beginD(!forward), p1q2.beginD(forward)), p1q2.size(), Kernel12({p0q2.ptrDpq(), s02.ptrD(), z02.cptrD(), p0q2.size(), p1q1.ptrDpq(), s11.ptrD(), xyzz11.cptrD(), p1q1.size(), inP.halfedge_.cptrD(), inQ.halfedge_.cptrD(), inP.vertPos_.cptrD(), forward})); p1q2.KeepFinite(v12, x12); return std::make_tuple(x12, v12); }; VecDH<int> Winding03(const Manifold::Impl &inP, SparseIndices &p0q2, VecDH<int> &s02, bool reverse) { // verts that are not shadowed (not in p0q2) have winding number zero. VecDH<int> w03(inP.NumVert(), 0); if (!thrust::is_sorted(p0q2.beginD(reverse), p0q2.endD(reverse))) thrust::sort_by_key(p0q2.beginD(reverse), p0q2.endD(reverse), s02.beginD()); VecDH<int> w03val(w03.size()); VecDH<int> w03vert(w03.size()); // sum known s02 values into w03 (winding number) auto endPair = thrust::reduce_by_key(p0q2.beginD(reverse), p0q2.endD(reverse), s02.beginD(), w03vert.beginD(), w03val.beginD()); thrust::scatter(w03val.beginD(), endPair.second, w03vert.beginD(), w03.beginD()); if (reverse) thrust::transform(w03.beginD(), w03.endD(), w03.beginD(), thrust::negate<int>()); return w03; }; } // namespace namespace manifold { Boolean3::Boolean3(const Manifold::Impl &inP, const Manifold::Impl &inQ, Manifold::OpType op) : inP_(inP), inQ_(inQ), expandP_(op == Manifold::OpType::ADD ? 1.0 : -1.0) { // Symbolic perturbation: // Union -> expand inP // Difference, Intersection -> contract inP Timer filter; filter.Start(); if (inP.IsEmpty() || inQ.IsEmpty() || !inP.bBox_.DoesOverlap(inQ.bBox_)) { if (kVerbose) std::cout << "No overlap, early out" << std::endl; w03_.resize(inP.NumVert(), 0); w30_.resize(inQ.NumVert(), 0); return; } // Level 3 // Find edge-triangle overlaps (broad phase) p1q2_ = inQ_.EdgeCollisions(inP_); p1q2_.Sort(); if (kVerbose) std::cout << "p1q2 size = " << p1q2_.size() << std::endl; p2q1_ = inP_.EdgeCollisions(inQ_); p2q1_.SwapPQ(); p2q1_.Sort(); if (kVerbose) std::cout << "p2q1 size = " << p2q1_.size() << std::endl; // Level 2 // Find vertices that overlap faces in XY-projection SparseIndices p0q2 = inQ.VertexCollisionsZ(inP.vertPos_); p0q2.Sort(); if (kVerbose) std::cout << "p0q2 size = " << p0q2.size() << std::endl; SparseIndices p2q0 = inP.VertexCollisionsZ(inQ.vertPos_); p2q0.SwapPQ(); p2q0.Sort(); if (kVerbose) std::cout << "p2q0 size = " << p2q0.size() << std::endl; // Find involved edge pairs from Level 3 SparseIndices p1q1 = Filter11(inP_, inQ_, p1q2_, p2q1_); if (kVerbose) std::cout << "p1q1 size = " << p1q1.size() << std::endl; filter.Stop(); Timer levels; levels.Start(); // Level 2 // Build up XY-projection intersection of two edges, including the z-value for // each edge, keeping only those whose intersection exists. VecDH<int> s11; VecDH<glm::vec4> xyzz11; std::tie(s11, xyzz11) = Shadow11(p1q1, inP, inQ, expandP_); if (kVerbose) std::cout << "s11 size = " << s11.size() << std::endl; // Build up Z-projection of vertices onto triangles, keeping only those that // fall inside the triangle. VecDH<int> s02; VecDH<float> z02; std::tie(s02, z02) = Shadow02(inP, inQ, p0q2, true, expandP_); if (kVerbose) std::cout << "s02 size = " << s02.size() << std::endl; VecDH<int> s20; VecDH<float> z20; std::tie(s20, z20) = Shadow02(inQ, inP, p2q0, false, expandP_); if (kVerbose) std::cout << "s20 size = " << s20.size() << std::endl; // Level 3 // Build up the intersection of the edges and triangles, keeping only those // that intersect, and record the direction the edge is passing through the // triangle. std::tie(x12_, v12_) = Intersect12(inP, inQ, s02, p0q2, s11, p1q1, z02, xyzz11, p1q2_, true); if (kVerbose) std::cout << "x12 size = " << x12_.size() << std::endl; std::tie(x21_, v21_) = Intersect12(inQ, inP, s20, p2q0, s11, p1q1, z20, xyzz11, p2q1_, false); if (kVerbose) std::cout << "x21 size = " << x21_.size() << std::endl; // Sum up the winding numbers of all vertices. w03_ = Winding03(inP, p0q2, s02, false); w30_ = Winding03(inQ, p2q0, s20, true); levels.Stop(); if (kVerbose) { filter.Print("Filter"); levels.Print("Levels 1-3"); MemUsage(); } } } // namespace manifold
the_stack
using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif long fsize(int fd) { struct stat stat; int res = fstat(fd, &stat); return stat.st_size; } int printll(char *s) { while (*s != '\n' && *s != ',' && *s != '\t') { putchar(*s++); } return 0; } long hash(char *str0, int len) { unsigned char *str = (unsigned char *)str0; unsigned long hash = 5381; int c; while ((c = *str++) && len--) hash = ((hash << 5) + hash) + c; /* hash * 33 + c */ return hash; } long HEAP_SIZE_CPU = 10737418260; // 1048576; // 536870912; // 268435456; // 2097152; 1610612739; // 4294967304; // void *mallocBase = calloc(HEAP_SIZE_CPU, 1); void *mallocAddr = mallocBase; void *waterMark = mallocBase; void *myMalloc(size_t bytes) { void *res = mallocAddr; mallocAddr = (void *)((char *)mallocAddr + bytes); if ((long)mallocAddr >= (long)mallocBase + HEAP_SIZE_CPU) fprintf(stderr, "CPU memory breached limit of HEAP_SIZE_CPU\n"); return res; } long HEAP_SIZE = 8589934608; // 4294967304; // this is for GPU int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1) { long int diff = (t2->tv_usec + 1000000 * t2->tv_sec) - (t1->tv_usec + 1000000 * t1->tv_sec); result->tv_sec = diff / 1000000; result->tv_usec = diff % 1000000; return (diff < 0); } #define CUDA_CALL(f) { \ cudaError_t err = (f); \ if (err != cudaSuccess) { \ fprintf(stderr, "CUDA error occurred: %s (%s:%d)\n", \ cudaGetErrorString(err), __FILE__, __LINE__); \ exit(err); \ } \ } #define CUBLAS_CALL(f) { \ cublasStatus_t stat = (f); \ if (stat != CUBLAS_STATUS_SUCCESS) { \ fprintf(stderr, "cuBLAS error occurred: %d (%s:%d)\n", \ stat, __FILE__, __LINE__); \ exit(stat); \ } \ } void *gpuMallocBase; void *gpuMallocAddr; // Alignment boundary size, in bytes. constexpr int N = 4; // 16 void *myGpuMalloc(size_t bytes) { bytes = ((bytes + (1 << N) - 1) >> N) << N; void *res = gpuMallocAddr; gpuMallocAddr = (void *)((char *)gpuMallocAddr + bytes); if ((long)gpuMallocAddr >= (long)gpuMallocBase + HEAP_SIZE) fprintf(stderr, "GPU breached memory limit of HEAP_SIZE\n"); return res; } void myGpuFree(size_t bytes) { bytes = ((bytes + (1 << N) - 1) >> N) << N; gpuMallocAddr = (void *)((char *)gpuMallocAddr - bytes); cudaMemset((void*)gpuMallocAddr, 0, bytes); return; } template <typename T> __global__ void arrayUpdate(T *data, int index, T value) { data[index] = value; } __global__ void arrayFill(float* data, float value, int size) { int stride = gridDim.x * blockDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < size; i += stride) data[i] = value; } __global__ void hardTanh(float* in, float* out, float min_val, float max_val, int size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < size; i += stride) { out[i] = in[i] < min_val ? min_val : (in[i] > max_val ? max_val : in[i]); } } __global__ void hardTanh_grad(float* in_x, float* in_d, float* out_d, float min_val, float max_val, int size, bool inplace) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < size; i += stride) { if (inplace) { if (in_x[i] < min_val || in_x[i] > max_val) in_d[i] = 0; } else { if (in_x[i] >= min_val && in_x[i] <= max_val) in_d[i] += out_d[i]; } } } __global__ void nllLoss(float *x, int x_stride, float *y, int* target) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = tid * x_stride + target[tid]; y[tid] = -1 * x[offset]; } __global__ void nllLoss_grad(int x_stride, float *yGrad, int* target, float* xGrad) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = tid * x_stride + target[tid]; xGrad[offset] += -1 * yGrad[tid]; } // only for 4D tensor in and 3D tensor out (TODO: incorrect!) __global__ void sum_optimization(float* in, int inStr0, int inStr1, int inStr2, int inStr3, float* out, int outStr0, int outStr1, int outStr2, int dim, int nElementOut, int dimSize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < nElementOut; i += stride) { int outOff0 = i / outStr0; int outOff1temp = i - outOff0 * outStr0; int outOff1 = outOff1temp / outStr1; int outOff2 = outOff1temp - outOff1 * outStr1; for (int j = 0; j < dimSize; j++) { int inOff; if (dim == 0) inOff = j * inStr0 + outOff0 * inStr1 + outOff1 * inStr2 + outOff2 * inStr3; if (dim == 1) inOff = outOff0 * inStr0 + j * inStr1 + outOff1 * inStr2 + outOff2 * inStr3; if (dim == 2) inOff = outOff0 * inStr0 + outOff1 * inStr1 + j * inStr2 + outOff2 * inStr3; if (dim == 3) inOff = outOff0 * inStr0 + outOff1 * inStr1 + outOff2 * inStr2 + j * inStr3; out[i] += in[inOff]; } } } // only for 4D tensor in and 3D tensor out __global__ void sum_grad(float* in, int inSize0, int inSize1, int inSize2, int inSize3, int nElement, float* out, int outStride0, int outStride1, int outStride2, int dim) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < nElement; i += stride) { int inOff2 = i / inSize3; int inDim3 = i - inOff2 * inSize3; int inOff1 = inOff2 / inSize2; int inDim2 = inOff2 - inOff1 * inSize2; int inDim0 = inOff1 / inSize1; int inDim1 = inOff1 - inDim0 * inSize1; int outOff = 0; if (dim == 0) outOff = inDim1 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2; if (dim == 1) outOff = inDim0 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2; if (dim == 2) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim3 * outStride2; if (dim == 3) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim2 * outStride2; in[i] += out[outOff]; } } //following - https://github.com/torch/cutorch/blob/master/lib/THC/THCTensorMath.cuh#L49 template <int Dims> static inline __device__ int compute(const int outputSizes[Dims], const int outputStrides[Dims], const int dimSize, const int concatDim, int linearIndex) { int offset = 0; #pragma unroll for (int i = Dims - 1; i >= 1; --i) { int curDimSize = i == concatDim? dimSize : outputSizes[i]; int nextDimIndex = linearIndex / curDimSize; int curDimIndex = linearIndex - curDimSize * nextDimIndex; int curDimOffset = curDimIndex * outputStrides[i]; offset += curDimOffset; linearIndex = nextDimIndex; } return offset + linearIndex * outputStrides[0]; } // TODO: Only for Dim of rank 4, and only for 2 inputs __global__ void concat2D_1D_greg(float* in1, int dimSize1, int nElement1, float* in2, int dimSize2, int nElement2, float* out, int concatDim, int outSize0, int outSize1, int outSize2, int outSize3, int outStride0, int outStride1, int outStride2, int outStride3) { int outSizes[] = {outSize0, outSize1, outSize2, outSize3}; int outStrides[] = {outStride0, outStride1, outStride2, outStride3}; int tid = blockIdx.x * blockDim.x + threadIdx.x; int nElement = blockIdx.y == 0 ? nElement1 : nElement2; if (tid >= nElement) return; float* data = blockIdx.y == 0 ? in1 : in2; int offset = blockIdx.y == 0 ? 0 : dimSize1; int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2; int dataOffset = offset * outStrides[concatDim]; int stride = gridDim.x * blockDim.x; for (; tid < nElement; tid += stride) { int elementOffset = compute<4>(outSizes, //0, outSize1, outSize2, outSize3, outStrides, //0, outStride1, outStride2, outStride3, dimSize, concatDim, tid); out[dataOffset + elementOffset] = data[tid]; } } // TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1 __global__ void concat2D_1D_greg_grad(float* in1, int dimSize1, int nElement1, float* in2, int dimSize2, int nElement2, float* out, int concatDim, int outSize0, int outSize1, int outSize2, int outSize3, int outStride0, int outStride1, int outStride2, int outStride3) { int outSizes[] = {outSize0, outSize1, outSize2, outSize3}; int outStrides[] = {outStride0, outStride1, outStride2, outStride3}; int tid = blockIdx.x * blockDim.x + threadIdx.x; int nElement = blockIdx.y == 0 ? nElement1 : nElement2; if (tid >= nElement) return; float* data = blockIdx.y == 0 ? in1 : in2; int offset = blockIdx.y == 0 ? 0 : dimSize1; int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2; int dataOffset = offset * outStride1; int stride = gridDim.x * blockDim.x; for (; tid < nElement; tid += stride) { int elementOffset = compute<4>(outSizes, //0, outSize1, outSize2, outSize3, outStrides, //0, outStride1, outStride2, outStride3, dimSize, concatDim, tid); data[tid] += out[dataOffset + elementOffset]; } } __global__ void repeat0(float* in, float* out, int outStride0, int outStride1, int outScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < outScalarCount; tid += stride) { int linearIndex = tid; int outIndex0 = linearIndex / outStride0; linearIndex = linearIndex - outIndex0 * outStride0; int outIndex1 = linearIndex / outStride1; int outIndex2 = linearIndex - outIndex1 * outStride1; int inIndex = outIndex2 + (outIndex0 + outIndex1) * outStride1; out[tid] = in[inIndex]; } } __global__ void shift0(float* in, float* out, int inDim0, int inStride0, int inStride1, int inScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < inScalarCount; tid += stride) { int linearIndex = tid; int inIndex0 = linearIndex / inStride0; linearIndex = linearIndex - inIndex0 * inStride0; int inIndex1 = linearIndex / inStride1; if (inIndex0 + inIndex1 >= inDim0) return; out[tid + inIndex1 * inStride0] = in[tid]; } } __global__ void adagrad_update_1D_1D(float* x, float* d, float* m, float clip, float lr, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) { if (d[tid] > clip) d[tid] = clip; if (d[tid] < -clip) d[tid] = -clip; m[tid] += d[tid] * d[tid]; x[tid] -= lr * d[tid] / sqrt(m[tid] + 0.00000001); d[tid] = 0; } } __global__ void momentum_update_1D_1D(float* x, float* d, float* m, float learning_rate, float momentum, float gradClip, bool nesterov, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) { float temp = d[tid]; if (temp > gradClip) temp = gradClip; if (temp < -gradClip) temp = -gradClip; m[tid] *= momentum; m[tid] += temp; if (nesterov) { temp += momentum * m[tid]; } else { temp = m[tid]; } x[tid] -= learning_rate * temp; d[tid] = 0; } } __global__ void addScalarInArrayInPlace(float* in, float* add, float scale, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in[tid] += add[0] * scale; } __global__ void addScalar(float* in, float* out, float add, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] + add; } __global__ void minusScalar(float* in, float* out, float minus, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] - minus; } __global__ void multScalar(float* in, float* out, float mult, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] * mult; } __global__ void divScalar(float* in, float* out, float div, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] / div; } __global__ void elementwise_1D_1D_mul(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] * in2[tid]; } __global__ void elementwise_1D_1D_mul_mutate(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] += in1[tid] * in2[tid]; } __global__ void elementwise_1D_1D_add(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] + in2[tid]; } __global__ void elementwise_1D_1D_minus(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] - in2[tid]; } __global__ void elementwise_1D_1D_div(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] / in2[tid]; } __global__ void elementwise_1D_1D_exp(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = exp(in[tid]); } __global__ void elementwise_1D_1D_log(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = log(in[tid]); } __global__ void elementwise_1D_1D_sqrt(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = sqrt(in[tid]); } __global__ void elementwise_1D_1D_square(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] * in[tid]; } __global__ void elementwise_1D_1D_exp_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] * out_x[tid]; } __global__ void elementwise_1D_1D_log_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] / in_x[tid]; } __global__ void elementwise_1D_1D_sqrt_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] / out_x[tid] / 2; } __global__ void elementwise_1D_1D_square_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] * 2 * in_x[tid]; } __global__ void clipAt(float* in, float bound, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) { if (in[tid] > bound) in[tid] = bound; if (in[tid] < -bound) in[tid] = -bound; } } __global__ void mask4D(float* in, int* mask, int xstrides0, int xstrides1, int xstrides2, int xstrides3, int scalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < scalarCount; tid += stride) { int linearIndex = tid; int xindex0 = linearIndex / xstrides0; linearIndex = linearIndex - xstrides0 * xindex0; int xindex1 = linearIndex / xstrides1; linearIndex = linearIndex - xstrides1 * xindex1; int xindex2 = linearIndex / xstrides2; int xindex3 = linearIndex - xstrides2 * xindex2; if (xindex3 >= mask[xindex0]) in[tid] = 0; } } __global__ void mul_sub(float* in1, float* in2, float* out, int in1ScalarCount, int in2ScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < in1ScalarCount; tid += stride) { out[tid] = in1[tid] * in2[tid % in2ScalarCount]; } } __global__ void mul_sub_grad(float* in1_x, float* in1_d, float* in2_x, float* in2_d, float* out, int in1ScalarCount, int in2ScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < in1ScalarCount; tid += stride) { int index = tid % in2ScalarCount; in1_d[tid] += out[tid] * in2_x[index]; in2_d[tid] = in1_x[tid] * out[tid]; // this is the temp array, need to be reduced! } } #define CUDNN_CALL(f) { \ cudnnStatus_t stat = (f); \ if (stat != CUDNN_STATUS_SUCCESS) { \ fprintf(stderr, "cuDNN error occurred: %d (%s:%d)\n", \ stat, __FILE__, __LINE__); \ exit(stat); \ } \ } void Snippet(char *); std::random_device rd{}; std::mt19937 gen{rd()}; std::normal_distribution<> d{0, 0.01}; int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: query <filename>\n"); return 0; } Snippet(argv[1]); return 0; } /***************************************** Emitting C Generated Code *******************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> void Snippet(char* x0) { // Backend setup. cublasHandle_t cublasHandle; CUBLAS_CALL(cublasCreate(&cublasHandle)); CUDA_CALL(cudaMalloc(&gpuMallocBase, HEAP_SIZE)); CUDA_CALL(cudaMemset(gpuMallocBase, 0, HEAP_SIZE)); gpuMallocAddr = gpuMallocBase; cudnnHandle_t cudnnHandle; CUDNN_CALL(cudnnCreate(&cudnnHandle)); srand(42); struct timeval begin_0, end_0, diff_0; gettimeofday(&begin_0, NULL); float* x7 = (float*)myMalloc(14432 * sizeof(float));; for(int x9=0; x9 < 14432; x9++) { float x10 = (float)rand()/RAND_MAX; float x11 = x10 - 0.5f; float x12 = x11 * 0.23068394f; x7[x9] = x12; } // Tensor 'toGPU' invocation. float* x17 = (float*)myGpuMalloc(14432 * sizeof(float)); CUDA_CALL(cudaMemcpy(x17, x7, 14432 * sizeof(float), cudaMemcpyHostToDevice)); float* x19 = (float*)myGpuMalloc(14432 * sizeof(float)); float* x20 = (float*)myGpuMalloc(32 * sizeof(float)); arrayFill<<<28, 512>>>(x20, 1.0f, 32); float* x22 = (float*)myGpuMalloc(32 * sizeof(float)); float* x23 = (float*)myGpuMalloc(32 * sizeof(float)); float* x24 = (float*)myGpuMalloc(32 * sizeof(float)); float* x25 = (float*)myGpuMalloc(32 * sizeof(float)); float* x26 = (float*)myGpuMalloc(32 * sizeof(float)); float* x27 = (float*)myMalloc(236544 * sizeof(float));; for(int x29=0; x29 < 236544; x29++) { float x30 = (float)rand()/RAND_MAX; float x31 = x30 - 0.5f; float x32 = x31 * 0.05698029f; x27[x29] = x32; } // Tensor 'toGPU' invocation. float* x37 = (float*)myGpuMalloc(236544 * sizeof(float)); CUDA_CALL(cudaMemcpy(x37, x27, 236544 * sizeof(float), cudaMemcpyHostToDevice)); float* x39 = (float*)myGpuMalloc(236544 * sizeof(float)); float* x40 = (float*)myGpuMalloc(32 * sizeof(float)); arrayFill<<<28, 512>>>(x40, 1.0f, 32); float* x42 = (float*)myGpuMalloc(32 * sizeof(float)); float* x43 = (float*)myGpuMalloc(32 * sizeof(float)); float* x44 = (float*)myGpuMalloc(32 * sizeof(float)); float* x45 = (float*)myGpuMalloc(32 * sizeof(float)); float* x46 = (float*)myGpuMalloc(32 * sizeof(float)); printf("initial rnn input size is %d \n",672); float* x48 = (float*)myMalloc(3477504 * sizeof(float));; for(int x50=0; x50 < 3477504; x50++) { float x51 = (float)rand()/RAND_MAX; float x52 = x51 - 0.5f; float x53 = x52 * 0.01f; x48[x50] = x53; } // Tensor 'toGPU' invocation. float* x58 = (float*)myGpuMalloc(3477504 * sizeof(float)); CUDA_CALL(cudaMemcpy(x58, x48, 3477504 * sizeof(float), cudaMemcpyHostToDevice)); float* x60 = (float*)myGpuMalloc(3477504 * sizeof(float)); int32_t x61 = 0; int32_t x62 = x61; float* x63 = x58+x62; float* x64 = x60+x62; x61 += 688128; int32_t x66 = x61; float* x67 = x58+x66; float* x68 = x60+x66; x61 += 1048576; int32_t x70 = x61; float* x71 = x58+x70; float* x72 = x60+x70; x61 += 1024; int32_t x74 = x61; float* x75 = x58+x74; float* x76 = x60+x74; x61 += 1024; int32_t x78 = x61; float* x79 = x58+x78; float* x80 = x60+x78; x61 += 688128; int32_t x82 = x61; float* x83 = x58+x82; float* x84 = x60+x82; x61 += 1048576; int32_t x86 = x61; float* x87 = x58+x86; float* x88 = x60+x86; x61 += 1024; int32_t x90 = x61; float* x91 = x58+x90; float* x92 = x60+x90; x61 += 1024; float* x94 = (float*)myMalloc(4198400 * sizeof(float));; for(int x96=0; x96 < 4198400; x96++) { float x97 = (float)rand()/RAND_MAX; float x98 = x97 - 0.5f; float x99 = x98 * 0.01f; x94[x96] = x99; } // Tensor 'toGPU' invocation. float* x104 = (float*)myGpuMalloc(4198400 * sizeof(float)); CUDA_CALL(cudaMemcpy(x104, x94, 4198400 * sizeof(float), cudaMemcpyHostToDevice)); float* x106 = (float*)myGpuMalloc(4198400 * sizeof(float)); int32_t x107 = 0; int32_t x108 = x107; float* x109 = x104+x108; float* x110 = x106+x108; x107 += 1048576; int32_t x112 = x107; float* x113 = x104+x112; float* x114 = x106+x112; x107 += 1048576; int32_t x116 = x107; float* x117 = x104+x116; float* x118 = x106+x116; x107 += 1024; int32_t x120 = x107; float* x121 = x104+x120; float* x122 = x106+x120; x107 += 1024; int32_t x124 = x107; float* x125 = x104+x124; float* x126 = x106+x124; x107 += 1048576; int32_t x128 = x107; float* x129 = x104+x128; float* x130 = x106+x128; x107 += 1048576; int32_t x132 = x107; float* x133 = x104+x132; float* x134 = x106+x132; x107 += 1024; int32_t x136 = x107; float* x137 = x104+x136; float* x138 = x106+x136; x107 += 1024; float* x140 = (float*)myMalloc(4198400 * sizeof(float));; for(int x141=0; x141 < 4198400; x141++) { float x142 = (float)rand()/RAND_MAX; float x143 = x142 - 0.5f; float x144 = x143 * 0.01f; x140[x141] = x144; } // Tensor 'toGPU' invocation. float* x149 = (float*)myGpuMalloc(4198400 * sizeof(float)); CUDA_CALL(cudaMemcpy(x149, x140, 4198400 * sizeof(float), cudaMemcpyHostToDevice)); float* x151 = (float*)myGpuMalloc(4198400 * sizeof(float)); int32_t x152 = 0; int32_t x153 = x152; float* x154 = x149+x153; float* x155 = x151+x153; x152 += 1048576; int32_t x157 = x152; float* x158 = x149+x157; float* x159 = x151+x157; x152 += 1048576; int32_t x161 = x152; float* x162 = x149+x161; float* x163 = x151+x161; x152 += 1024; int32_t x165 = x152; float* x166 = x149+x165; float* x167 = x151+x165; x152 += 1024; int32_t x169 = x152; float* x170 = x149+x169; float* x171 = x151+x169; x152 += 1048576; int32_t x173 = x152; float* x174 = x149+x173; float* x175 = x151+x173; x152 += 1048576; int32_t x177 = x152; float* x178 = x149+x177; float* x179 = x151+x177; x152 += 1024; int32_t x181 = x152; float* x182 = x149+x181; float* x183 = x151+x181; x152 += 1024; float* x185 = (float*)myGpuMalloc(1024 * sizeof(float)); arrayFill<<<28, 512>>>(x185, 1.0f, 1024); float* x187 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x188 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x189 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x190 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x191 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x192 = (float*)myMalloc(29696 * sizeof(float));; for(int x194=0; x194 < 29696; x194++) { float x195 = (float)rand()/RAND_MAX; float x196 = x195 - 0.5f; float x197 = x196 * 0.03125f; x192[x194] = x197; } // Tensor 'toGPU' invocation. float* x202 = (float*)myGpuMalloc(29696 * sizeof(float)); CUDA_CALL(cudaMemcpy(x202, x192, 29696 * sizeof(float), cudaMemcpyHostToDevice)); float* x204 = (float*)myGpuMalloc(29696 * sizeof(float)); float* x205 = (float*)myGpuMalloc(14432 * sizeof(float)); float* x206 = (float*)myGpuMalloc(236544 * sizeof(float)); float* x207 = (float*)myGpuMalloc(32 * sizeof(float)); float* x208 = (float*)myGpuMalloc(32 * sizeof(float)); float* x209 = (float*)myGpuMalloc(32 * sizeof(float)); float* x210 = (float*)myGpuMalloc(32 * sizeof(float)); float* x211 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x212 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x213 = (float*)myGpuMalloc(29696 * sizeof(float)); float* x214 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x215 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x216 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x217 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x218 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x219 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x220 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x221 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x222 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x223 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x224 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x225 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x226 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x227 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x228 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x229 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x230 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x231 = (float*)myGpuMalloc(688128 * sizeof(float)); float* x232 = (float*)myGpuMalloc(688128 * sizeof(float)); float* x233 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x234 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x235 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x236 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x237 = (float*)myGpuMalloc(1048576 * sizeof(float)); int32_t x238 = open("/scratch-ml00/wang603/deepspeechData/deepspeech_train.bin",0); int64_t x239 = fsize(x238); printf("file size is %ld\n",x239); char* x241 = (char*)mmap(0, x239, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x238, 0); int64_t x242 = (long)x241; int64_t x243 = x242; int64_t x244 = x243; int* x245 = (int32_t*) x244; int64_t x246 = (int64_t)4; x243 += x246; int32_t x248 = x245[0]; int64_t x249 = x243; int* x250 = (int32_t*) x249; x243 += x246; int32_t x252 = x250[0]; printf("data size is %d batches, %d batch size\n",200,x248); int* x255 = (int32_t*)myMalloc(200 * sizeof(int32_t));; int* x256 = (int32_t*)myMalloc(200 * sizeof(int32_t));; float** x257 = (float**)myMalloc(200 * sizeof(float*));; float** x258 = (float**)myMalloc(200 * sizeof(float*));; int** x259 = (int**)myMalloc(200 * sizeof(int*));; int** x260 = (int**)myMalloc(200 * sizeof(int*));; // load data by batchs int32_t x286 = 4 * x248; int64_t x287 = (int64_t)x286; for(int x263=0; x263 < 200; x263++) { int64_t x264 = x243; int* x265 = (int32_t*) x264; x243 += x246; int32_t x267 = x265[0]; x255[x263] = x267; int64_t x269 = x243; int* x270 = (int32_t*) x269; x243 += x246; int32_t x272 = x270[0]; x256[x263] = x272; int32_t x274 = x255[x263]; int32_t x276 = x256[x263]; int64_t x278 = x243; float* x279 = (float*) x278; int32_t x275 = x248 * x274; int32_t x277 = x275 * x276; int32_t x280 = 4 * x277; int64_t x281 = (int64_t)x280; x243 += x281; x257[x263] = x279; int64_t x284 = x243; float* x285 = (float*) x284; x243 += x287; x258[x263] = x285; int64_t x290 = x243; int* x291 = (int32_t*) x290; x243 += x287; x259[x263] = x291; int* x294 = x259[x263]; int* x295 = x259[x263]; int32_t x296 = accumulate(x294, x295 + x248, 0); int64_t x297 = x243; int* x298 = (int32_t*) x297; int32_t x299 = 4 * x296; int64_t x300 = (int64_t)x299; x243 += x300; x260[x263] = x298; } gettimeofday(&end_0, NULL); timeval_subtract(&diff_0, &end_0, &begin_0);; int64_t x307 = ((diff_0.tv_sec * 1000000L) + (diff_0.tv_usec)); float x308 = (float)x307; float x309 = x308 / 1000000.0f; printf("Data reading (all prepare time) in %lf sec\n",x309); double* x311 = (double*)myMalloc(1 * sizeof(double));; double* x312 = (double*)myMalloc(1 * sizeof(double));; int64_t x313 = (long)mallocAddr; int64_t x314 = (long)gpuMallocAddr; // training loop starts here int32_t x358 = x248 * 32; int32_t x451 = 2048 / 2; int32_t x455 = x248 * x451; int32_t x452 = 2 * x451; int32_t x453 = x248 * x452; int32_t x655 = x248 * 20; int32_t x253 = x248 * 200; double x660 = (double)x253; int64_t x683 = (int64_t)x253; float x690 = (float)x253; for(int x317=0; x317 < 1; x317++) { struct timeval begin_1, end_1, diff_1; int32_t x319 = 0; int32_t x320 = x319; int32_t x321 = x320; float x322 = 0.0f; float x323 = x322; float x324 = x323; int32_t x325 = x317 + 1; printf("Start training epoch %d\n",x325); gettimeofday(&begin_1, NULL); for(int x328=0; x328 < 200; x328++) { int32_t x329 = x256[x328]; int32_t x330 = x255[x328]; float* x331 = x257[x328]; float* x334 = x258[x328]; int* x335 = x260[x328]; int* x336 = x259[x328]; x321 += x248; // Tensor 'toGPU' invocation. int32_t x332 = x330 * x329; int32_t x333 = x248 * x332; float* x339 = (float*)myGpuMalloc(x333 * sizeof(float)); CUDA_CALL(cudaMemcpy(x339, x331, x333 * sizeof(float), cudaMemcpyHostToDevice)); float* x341 = (float*)myGpuMalloc(2 * sizeof(float)); float* x342 = (float*)myGpuMalloc(1 * sizeof(float)); float* x343 = (float*)myGpuMalloc(1 * sizeof(float)); // allocate memory to save the final loss in CPU Tensor float* x345 = (float*)myGpuMalloc(1 * sizeof(float)); int32_t x352 = x329 - 11; int32_t x353 = x352 / 2; int32_t x354 = x353 + 1; int32_t x349 = x330 - 41; int32_t x350 = x349 / 2; int32_t x351 = x350 + 1; int32_t x359 = x358 * x351; int32_t x360 = x359 * x354; float* x361 = (float*)myGpuMalloc(x360 * sizeof(float)); float* x362 = (float*)myMalloc(1 * sizeof(float));; x362[0] = 0.0f; float* x364 = (float*)myMalloc(1 * sizeof(float));; x364[0] = 1.0f; cudnnTensorDescriptor_t in_desc_0; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc_0)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc_0, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 1, x330, x329)); cudnnFilterDescriptor_t filt_desc_0; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc_0)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc_0, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 32, 1, 41, 11)); cudnnTensorDescriptor_t out_desc_0; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_0)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc_0, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 32, x351, x354)); cudnnConvolutionDescriptor_t conv_desc_0; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc_0)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc_0, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc_0, CUDNN_TENSOR_OP_MATH));; // Algorithm. { cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc_0, filt_desc_0, conv_desc_0, out_desc_0, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc_0, filt_desc_0, conv_desc_0, out_desc_0, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x364, in_desc_0, x339, filt_desc_0, x17, conv_desc_0, algo, ws_data, ws_size, x362, out_desc_0, x361)); }; float* x368 = (float*)myGpuMalloc(x360 * sizeof(float)); int32_t x355 = x351 * x354; int32_t x356 = 32 * x355; int32_t x357 = x248 * x356; float* x369 = (float*)myGpuMalloc(x357 * sizeof(float)); float* x370 = (float*)myGpuMalloc(32 * sizeof(float)); float* x371 = (float*)myGpuMalloc(32 * sizeof(float)); float* x372 = (float*)myMalloc(1 * sizeof(float));; x372[0] = 0.0f; float* x374 = (float*)myMalloc(1 * sizeof(float));; x374[0] = 1.0f; cudnnTensorDescriptor_t in_desc_1; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc_1)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc_1, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 32, x351, x354)); cudnnTensorDescriptor_t out_desc_1; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_1)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc_1, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 32, x351, x354)); cudnnTensorDescriptor_t sbmv_desc_1; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc_1)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc_1, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 32, 1, 1)); ; CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x374, x372, in_desc_1, x361, out_desc_1, x369, sbmv_desc_1, x20, x23, 0.1, x25, x26, 1.0E-5, x370, x371)); ; float* x378 = (float*)myGpuMalloc(x360 * sizeof(float)); hardTanh<<<28, 512>>>(x369, x369, 0.0, 20.0, true); int32_t x386 = x354 - 11; int32_t x387 = x386 / 1; int32_t x388 = x387 + 1; int32_t x383 = x351 - 21; int32_t x384 = x383 / 2; int32_t x385 = x384 + 1; int32_t x392 = x358 * x385; int32_t x393 = x392 * x388; float* x394 = (float*)myGpuMalloc(x393 * sizeof(float)); float* x395 = (float*)myMalloc(1 * sizeof(float));; x395[0] = 0.0f; float* x397 = (float*)myMalloc(1 * sizeof(float));; x397[0] = 1.0f; cudnnTensorDescriptor_t in_desc_2; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc_2)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc_2, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 32, x351, x354)); cudnnFilterDescriptor_t filt_desc_2; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc_2)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc_2, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 32, 32, 21, 11)); cudnnTensorDescriptor_t out_desc_2; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_2)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc_2, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 32, x385, x388)); cudnnConvolutionDescriptor_t conv_desc_2; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc_2)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc_2, 0, 0, 2, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc_2, CUDNN_TENSOR_OP_MATH));; // Algorithm. { cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc_2, filt_desc_2, conv_desc_2, out_desc_2, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc_2, filt_desc_2, conv_desc_2, out_desc_2, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x397, in_desc_2, x369, filt_desc_2, x37, conv_desc_2, algo, ws_data, ws_size, x395, out_desc_2, x394)); }; float* x401 = (float*)myGpuMalloc(x393 * sizeof(float)); int32_t x389 = x385 * x388; int32_t x390 = 32 * x389; int32_t x391 = x248 * x390; float* x402 = (float*)myGpuMalloc(x391 * sizeof(float)); float* x403 = (float*)myGpuMalloc(32 * sizeof(float)); float* x404 = (float*)myGpuMalloc(32 * sizeof(float)); float* x405 = (float*)myMalloc(1 * sizeof(float));; x405[0] = 0.0f; float* x407 = (float*)myMalloc(1 * sizeof(float));; x407[0] = 1.0f; cudnnTensorDescriptor_t in_desc_3; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc_3)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc_3, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 32, x385, x388)); cudnnTensorDescriptor_t out_desc_3; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_3)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc_3, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 32, x385, x388)); cudnnTensorDescriptor_t sbmv_desc_3; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc_3)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc_3, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 32, 1, 1)); ; CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x407, x405, in_desc_3, x394, out_desc_3, x402, sbmv_desc_3, x40, x43, 0.1, x45, x46, 1.0E-5, x403, x404)); ; float* x411 = (float*)myGpuMalloc(x393 * sizeof(float)); hardTanh<<<28, 512>>>(x402, x402, 0.0, 20.0, true); // after conv ops int32_t x414 = 32 * x385; int32_t x415 = x414 * x388; int32_t x416 = x248 * x415; float* x417 = (float*)myGpuMalloc(x416 * sizeof(float)); int* x420 = (int32_t*)myMalloc(4 * sizeof(int32_t));; int32_t x418 = x248 * x414; x420[2] = x418; x420[0] = x414; x420[1] = 1; x420[3] = 1; float* x425 = (float*)myMalloc(1 * sizeof(float));; x425[0] = 1.0f; float* x427 = (float*)myMalloc(0 * sizeof(float));; x427[0] = 0.0f; int32_t x429 = x420[0]; int32_t x430 = x420[1]; int32_t x431 = x420[2]; int32_t x432 = x420[3]; cudnnTensorDescriptor_t in_desc_4; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc_4)); CUDNN_CALL(cudnnSetTensor4dDescriptorEx( in_desc_4, CUDNN_DATA_FLOAT, x248, x414, x388, 1, x415, x388, 1, 1)); cudnnTensorDescriptor_t out_desc_4; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_4)); CUDNN_CALL(cudnnSetTensor4dDescriptorEx( out_desc_4, CUDNN_DATA_FLOAT, x248, x414, x388, 1, x429, x430, x431, x432)); ; CUDNN_CALL(cudnnTransformTensor( cudnnHandle, x425, in_desc_4, x402, x427, out_desc_4, x417)); ; int32_t x435 = x388 * x248; int32_t x436 = x435 * x414; float* x437 = (float*)myGpuMalloc(x436 * sizeof(float)); // after resize and permute float* x439 = (float*)NULL; float* x440 = (float*)NULL; float* x441 = (float*)NULL; int32_t x444 = x435 * 2048; float* x445 = (float*)myGpuMalloc(x444 * sizeof(float)); float* x446 = (float*)NULL; int32_t x447 = 0; size_t dropoutStateSize_5; CUDNN_CALL(cudnnDropoutGetStatesSize(cudnnHandle, &dropoutStateSize_5)); void* dropoutStates_5 = NULL; cudnnDropoutDescriptor_t dropout_desc_5; CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_5)); CUDNN_CALL(cudnnSetDropoutDescriptor( dropout_desc_5, cudnnHandle, 0.0, dropoutStates_5, dropoutStateSize_5, time(NULL))); cudnnRNNDescriptor_t rnn_desc_5; CUDNN_CALL(cudnnCreateRNNDescriptor(&rnn_desc_5)); CUDNN_CALL(cudnnSetRNNDescriptor( cudnnHandle, rnn_desc_5, /*hiddenSize*/ 1024, /*numLayers*/ 1, dropout_desc_5, CUDNN_LINEAR_INPUT, CUDNN_BIDIRECTIONAL, CUDNN_RNN_TANH, CUDNN_RNN_ALGO_STANDARD, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetRNNMatrixMathType(rnn_desc_5, CUDNN_TENSOR_OP_MATH)); int32_t seqLength_5 = x388; int32_t batchSize_5 = x248; int32_t inputSize_5 = x414; cudnnTensorDescriptor_t x_descs_5[seqLength_5]; cudnnTensorDescriptor_t x_desc_5; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_5)); int x_dims_5[] = {batchSize_5, inputSize_5, 1}; int x_strides_5[] = {x_dims_5[1] * x_dims_5[2], x_dims_5[2], 1}; CUDNN_CALL(cudnnSetTensorNdDescriptor( x_desc_5, CUDNN_DATA_FLOAT, /*nbDims*/ 3, x_dims_5, x_strides_5)); for (int i = 0; i < seqLength_5; i++) { x_descs_5[i] = x_desc_5; } cudnnTensorDescriptor_t hx_desc_5; CUDNN_CALL(cudnnCreateTensorDescriptor(&hx_desc_5)); int hx_dims_5[] = {2, batchSize_5, 1024}; int hx_strides_5[] = {hx_dims_5[1] * hx_dims_5[2], hx_dims_5[2], 1}; CUDNN_CALL(cudnnSetTensorNdDescriptor( hx_desc_5, CUDNN_DATA_FLOAT, /*nbDims*/ 3, hx_dims_5, hx_strides_5)); size_t paramsSize_5; CUDNN_CALL(cudnnGetRNNParamsSize( cudnnHandle, rnn_desc_5, x_descs_5[0], &paramsSize_5, CUDNN_DATA_FLOAT)); #ifdef DEBUG assert(paramsSize_5 / sizeof(float) == 3477504 && "Expected parameter size mismatch"); #endif cudnnFilterDescriptor_t w_desc_5; CUDNN_CALL(cudnnCreateFilterDescriptor(&w_desc_5)); int w_dims_5[] = {int(paramsSize_5 / sizeof(float)), 1, 1}; CUDNN_CALL(cudnnSetFilterNdDescriptor( w_desc_5, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, /*nbDims*/ 3, w_dims_5)); cudnnTensorDescriptor_t y_descs_5[seqLength_5]; cudnnTensorDescriptor_t y_desc_5; CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_5)); int y_dims_5[] = {batchSize_5, 2048, 1}; int y_strides_5[] = {y_dims_5[1] * y_dims_5[2], y_dims_5[2], 1}; CUDNN_CALL(cudnnSetTensorNdDescriptor( y_desc_5, CUDNN_DATA_FLOAT, /*nbDims*/ 3, y_dims_5, y_strides_5)); for (int i = 0; i < seqLength_5; i++) { y_descs_5[i] = y_desc_5; } size_t workspaceSize_5; CUDNN_CALL(cudnnGetRNNWorkspaceSize( cudnnHandle, rnn_desc_5, seqLength_5, x_descs_5, &workspaceSize_5)); void* workspace_5 = myGpuMalloc(workspaceSize_5); ; {// Reserve space used by `ForwardTraining` function. size_t reserveSize; CUDNN_CALL(cudnnGetRNNTrainingReserveSize( cudnnHandle, rnn_desc_5, seqLength_5, x_descs_5, &reserveSize)); void* reserveSpace = myGpuMalloc(reserveSize); x446 = (float*)reserveSpace; x447 = (int)reserveSize; CUDNN_CALL(cudnnRNNForwardTraining( cudnnHandle, rnn_desc_5, seqLength_5, x_descs_5, x417, hx_desc_5,x439, hx_desc_5,x440, w_desc_5, x58, y_descs_5, x445, hx_desc_5, x441, hx_desc_5, NULL, workspace_5, workspaceSize_5, reserveSpace, reserveSize)); }; float* x450 = (float*)myGpuMalloc(x444 * sizeof(float)); int32_t x456 = x388 * x455; float* x457 = (float*)myGpuMalloc(x456 * sizeof(float)); // optimization for dimension sum if size is small int32_t x459 = x435 * x451; sum_optimization<<<28, 512>>>(x445, x453, x452, x451, 1, x457, x455, x451, 1, 2, x459, 2); ; float* x461 = (float*)myGpuMalloc(x459 * sizeof(float)); float* x462 = (float*)NULL; float* x463 = (float*)NULL; float* x464 = (float*)NULL; float* x465 = (float*)myGpuMalloc(x444 * sizeof(float)); float* x466 = (float*)NULL; int32_t x467 = 0; size_t dropoutStateSize_6; CUDNN_CALL(cudnnDropoutGetStatesSize(cudnnHandle, &dropoutStateSize_6)); void* dropoutStates_6 = NULL; cudnnDropoutDescriptor_t dropout_desc_6; CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_6)); CUDNN_CALL(cudnnSetDropoutDescriptor( dropout_desc_6, cudnnHandle, 0.0, dropoutStates_6, dropoutStateSize_6, time(NULL))); cudnnRNNDescriptor_t rnn_desc_6; CUDNN_CALL(cudnnCreateRNNDescriptor(&rnn_desc_6)); CUDNN_CALL(cudnnSetRNNDescriptor( cudnnHandle, rnn_desc_6, /*hiddenSize*/ 1024, /*numLayers*/ 1, dropout_desc_6, CUDNN_LINEAR_INPUT, CUDNN_BIDIRECTIONAL, CUDNN_RNN_TANH, CUDNN_RNN_ALGO_STANDARD, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetRNNMatrixMathType(rnn_desc_6, CUDNN_TENSOR_OP_MATH)); int32_t seqLength_6 = x388; int32_t batchSize_6 = x248; int32_t inputSize_6 = x451; cudnnTensorDescriptor_t x_descs_6[seqLength_6]; cudnnTensorDescriptor_t x_desc_6; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_6)); int x_dims_6[] = {batchSize_6, inputSize_6, 1}; int x_strides_6[] = {x_dims_6[1] * x_dims_6[2], x_dims_6[2], 1}; CUDNN_CALL(cudnnSetTensorNdDescriptor( x_desc_6, CUDNN_DATA_FLOAT, /*nbDims*/ 3, x_dims_6, x_strides_6)); for (int i = 0; i < seqLength_6; i++) { x_descs_6[i] = x_desc_6; } cudnnTensorDescriptor_t hx_desc_6; CUDNN_CALL(cudnnCreateTensorDescriptor(&hx_desc_6)); int hx_dims_6[] = {2, batchSize_6, 1024}; int hx_strides_6[] = {hx_dims_6[1] * hx_dims_6[2], hx_dims_6[2], 1}; CUDNN_CALL(cudnnSetTensorNdDescriptor( hx_desc_6, CUDNN_DATA_FLOAT, /*nbDims*/ 3, hx_dims_6, hx_strides_6)); size_t paramsSize_6; CUDNN_CALL(cudnnGetRNNParamsSize( cudnnHandle, rnn_desc_6, x_descs_6[0], &paramsSize_6, CUDNN_DATA_FLOAT)); #ifdef DEBUG assert(paramsSize_6 / sizeof(float) == 4198400 && "Expected parameter size mismatch"); #endif cudnnFilterDescriptor_t w_desc_6; CUDNN_CALL(cudnnCreateFilterDescriptor(&w_desc_6)); int w_dims_6[] = {int(paramsSize_6 / sizeof(float)), 1, 1}; CUDNN_CALL(cudnnSetFilterNdDescriptor( w_desc_6, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, /*nbDims*/ 3, w_dims_6)); cudnnTensorDescriptor_t y_descs_6[seqLength_6]; cudnnTensorDescriptor_t y_desc_6; CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_6)); int y_dims_6[] = {batchSize_6, 2048, 1}; int y_strides_6[] = {y_dims_6[1] * y_dims_6[2], y_dims_6[2], 1}; CUDNN_CALL(cudnnSetTensorNdDescriptor( y_desc_6, CUDNN_DATA_FLOAT, /*nbDims*/ 3, y_dims_6, y_strides_6)); for (int i = 0; i < seqLength_6; i++) { y_descs_6[i] = y_desc_6; } size_t workspaceSize_6; CUDNN_CALL(cudnnGetRNNWorkspaceSize( cudnnHandle, rnn_desc_6, seqLength_6, x_descs_6, &workspaceSize_6)); void* workspace_6 = myGpuMalloc(workspaceSize_6); ; {// Reserve space used by `ForwardTraining` function. size_t reserveSize; CUDNN_CALL(cudnnGetRNNTrainingReserveSize( cudnnHandle, rnn_desc_6, seqLength_6, x_descs_6, &reserveSize)); void* reserveSpace = myGpuMalloc(reserveSize); x466 = (float*)reserveSpace; x467 = (int)reserveSize; CUDNN_CALL(cudnnRNNForwardTraining( cudnnHandle, rnn_desc_6, seqLength_6, x_descs_6, x457, hx_desc_6,x462, hx_desc_6,x463, w_desc_6, x104, y_descs_6, x465, hx_desc_6, x464, hx_desc_6, NULL, workspace_6, workspaceSize_6, reserveSpace, reserveSize)); }; float* x470 = (float*)myGpuMalloc(x444 * sizeof(float)); float* x471 = (float*)myGpuMalloc(x456 * sizeof(float)); // optimization for dimension sum if size is small sum_optimization<<<28, 512>>>(x465, x453, x452, x451, 1, x471, x455, x451, 1, 2, x459, 2); ; float* x474 = (float*)myGpuMalloc(x459 * sizeof(float)); float* x475 = (float*)NULL; float* x476 = (float*)NULL; float* x477 = (float*)NULL; float* x478 = (float*)myGpuMalloc(x444 * sizeof(float)); float* x479 = (float*)NULL; int32_t x480 = 0; size_t dropoutStateSize_7; CUDNN_CALL(cudnnDropoutGetStatesSize(cudnnHandle, &dropoutStateSize_7)); void* dropoutStates_7 = NULL; cudnnDropoutDescriptor_t dropout_desc_7; CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_7)); CUDNN_CALL(cudnnSetDropoutDescriptor( dropout_desc_7, cudnnHandle, 0.0, dropoutStates_7, dropoutStateSize_7, time(NULL))); cudnnRNNDescriptor_t rnn_desc_7; CUDNN_CALL(cudnnCreateRNNDescriptor(&rnn_desc_7)); CUDNN_CALL(cudnnSetRNNDescriptor( cudnnHandle, rnn_desc_7, /*hiddenSize*/ 1024, /*numLayers*/ 1, dropout_desc_7, CUDNN_LINEAR_INPUT, CUDNN_BIDIRECTIONAL, CUDNN_RNN_TANH, CUDNN_RNN_ALGO_STANDARD, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetRNNMatrixMathType(rnn_desc_7, CUDNN_TENSOR_OP_MATH)); int32_t seqLength_7 = x388; int32_t batchSize_7 = x248; int32_t inputSize_7 = x451; cudnnTensorDescriptor_t x_descs_7[seqLength_7]; cudnnTensorDescriptor_t x_desc_7; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_7)); int x_dims_7[] = {batchSize_7, inputSize_7, 1}; int x_strides_7[] = {x_dims_7[1] * x_dims_7[2], x_dims_7[2], 1}; CUDNN_CALL(cudnnSetTensorNdDescriptor( x_desc_7, CUDNN_DATA_FLOAT, /*nbDims*/ 3, x_dims_7, x_strides_7)); for (int i = 0; i < seqLength_7; i++) { x_descs_7[i] = x_desc_7; } cudnnTensorDescriptor_t hx_desc_7; CUDNN_CALL(cudnnCreateTensorDescriptor(&hx_desc_7)); int hx_dims_7[] = {2, batchSize_7, 1024}; int hx_strides_7[] = {hx_dims_7[1] * hx_dims_7[2], hx_dims_7[2], 1}; CUDNN_CALL(cudnnSetTensorNdDescriptor( hx_desc_7, CUDNN_DATA_FLOAT, /*nbDims*/ 3, hx_dims_7, hx_strides_7)); size_t paramsSize_7; CUDNN_CALL(cudnnGetRNNParamsSize( cudnnHandle, rnn_desc_7, x_descs_7[0], &paramsSize_7, CUDNN_DATA_FLOAT)); #ifdef DEBUG assert(paramsSize_7 / sizeof(float) == 4198400 && "Expected parameter size mismatch"); #endif cudnnFilterDescriptor_t w_desc_7; CUDNN_CALL(cudnnCreateFilterDescriptor(&w_desc_7)); int w_dims_7[] = {int(paramsSize_7 / sizeof(float)), 1, 1}; CUDNN_CALL(cudnnSetFilterNdDescriptor( w_desc_7, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, /*nbDims*/ 3, w_dims_7)); cudnnTensorDescriptor_t y_descs_7[seqLength_7]; cudnnTensorDescriptor_t y_desc_7; CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_7)); int y_dims_7[] = {batchSize_7, 2048, 1}; int y_strides_7[] = {y_dims_7[1] * y_dims_7[2], y_dims_7[2], 1}; CUDNN_CALL(cudnnSetTensorNdDescriptor( y_desc_7, CUDNN_DATA_FLOAT, /*nbDims*/ 3, y_dims_7, y_strides_7)); for (int i = 0; i < seqLength_7; i++) { y_descs_7[i] = y_desc_7; } size_t workspaceSize_7; CUDNN_CALL(cudnnGetRNNWorkspaceSize( cudnnHandle, rnn_desc_7, seqLength_7, x_descs_7, &workspaceSize_7)); void* workspace_7 = myGpuMalloc(workspaceSize_7); ; {// Reserve space used by `ForwardTraining` function. size_t reserveSize; CUDNN_CALL(cudnnGetRNNTrainingReserveSize( cudnnHandle, rnn_desc_7, seqLength_7, x_descs_7, &reserveSize)); void* reserveSpace = myGpuMalloc(reserveSize); x479 = (float*)reserveSpace; x480 = (int)reserveSize; CUDNN_CALL(cudnnRNNForwardTraining( cudnnHandle, rnn_desc_7, seqLength_7, x_descs_7, x471, hx_desc_7,x475, hx_desc_7,x476, w_desc_7, x149, y_descs_7, x478, hx_desc_7, x477, hx_desc_7, NULL, workspace_7, workspaceSize_7, reserveSpace, reserveSize)); }; float* x483 = (float*)myGpuMalloc(x444 * sizeof(float)); float* x484 = (float*)myGpuMalloc(x456 * sizeof(float)); // optimization for dimension sum if size is small sum_optimization<<<28, 512>>>(x478, x453, x452, x451, 1, x484, x455, x451, 1, 2, x459, 2); ; float* x487 = (float*)myGpuMalloc(x459 * sizeof(float)); float* x490 = (float*)myGpuMalloc(x459 * sizeof(float)); float* x491 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x492 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x493 = (float*)myMalloc(1 * sizeof(float));; x493[0] = 0.0f; float* x495 = (float*)myMalloc(1 * sizeof(float));; x495[0] = 1.0f; cudnnTensorDescriptor_t in_desc_8; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc_8)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc_8, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x435, x451, 1, 1)); cudnnTensorDescriptor_t sbmv_desc_8; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc_8)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc_8, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); ; CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_PER_ACTIVATION, x495, x493, in_desc_8, x484, in_desc_8, x490, sbmv_desc_8, x185, x188, 0.1, x190, x191, 1.0E-5, x491, x492)); ; float* x499 = (float*)myGpuMalloc(x459 * sizeof(float)); int32_t x500 = x435 * 29; float* x501 = (float*)myGpuMalloc(x500 * sizeof(float)); float* x502 = (float*)myMalloc(1 * sizeof(float));; x502[0] = 0.0f; float* x504 = (float*)myMalloc(1 * sizeof(float));; x504[0] = 1.0f; CUBLAS_CALL(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 29,x435,1024,x504,x202,29,x490,1024,x502,x501,29)); float* x507 = (float*)myGpuMalloc(x500 * sizeof(float)); float* x510 = (float*)myMalloc(1 * sizeof(float));; x510[0] = 0.0f; float* x512 = (float*)myMalloc(1 * sizeof(float));; x512[0] = 1.0f; float* x514 = (float*)myGpuMalloc(x500 * sizeof(float)); cudnnTensorDescriptor_t x_desc_9; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_9)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc_9, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x435, 29, 1, 1)); ; CUDNN_CALL(cudnnSoftmaxForward( cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, x512, x_desc_9, x501, x510, x_desc_9, x514)); ; float* x517 = (float*)myGpuMalloc(x500 * sizeof(float)); // before CTC loss int* x519 = (int32_t*)myMalloc(x248 * sizeof(int32_t));; float x523 = (float)x388; for(int x521=0; x521 < x248; x521++) { float x522 = x334[x521]; float x524 = x522 * x523; int32_t x525 = (int)x524; x519[x521] = x525; } float* x530 = (float*)myGpuMalloc(x248 * sizeof(float)); { cudnnTensorDescriptor_t probs_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&probs_desc)); int probs_dims[] = {x388, x248, 29}; int probs_strides[] = {probs_dims[1] * probs_dims[2], probs_dims[2], 1}; CUDNN_CALL(cudnnSetTensorNdDescriptor( probs_desc, CUDNN_DATA_FLOAT, /*nbDims*/ 3, probs_dims, probs_strides)); cudnnTensorDescriptor_t grad_desc = probs_desc; cudnnCTCLossDescriptor_t ctc_desc; CUDNN_CALL(cudnnCreateCTCLossDescriptor(&ctc_desc)); CUDNN_CALL(cudnnSetCTCLossDescriptor(ctc_desc, CUDNN_DATA_FLOAT)); size_t wsSize; CUDNN_CALL(cudnnGetCTCLossWorkspaceSize( cudnnHandle, probs_desc, grad_desc, x335, x336, x519, CUDNN_CTC_LOSS_ALGO_DETERMINISTIC, ctc_desc, &wsSize)); void *ws = myGpuMalloc(wsSize); CUDNN_CALL(cudnnCTCLoss( cudnnHandle, probs_desc, x514, x335, x336, x519, x530, grad_desc, x517, CUDNN_CTC_LOSS_ALGO_DETERMINISTIC, ctc_desc, ws, wsSize)); }; float* x532 = (float*)myGpuMalloc(1 * sizeof(float)); float* x533 = (float*)myMalloc(1 * sizeof(float));; x533[0] = 0.0f; float* x535 = (float*)myMalloc(1 * sizeof(float));; x535[0] = 1.0f; { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 1, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1, 1, 1)); cudnnReduceTensorDescriptor_t reduce_desc; CUDNN_CALL(cudnnCreateReduceTensorDescriptor(&reduce_desc)); CUDNN_CALL(cudnnSetReduceTensorDescriptor( reduce_desc, CUDNN_REDUCE_TENSOR_AVG, CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN, CUDNN_REDUCE_TENSOR_NO_INDICES, CUDNN_32BIT_INDICES)); void *indices = nullptr; // Don't store indices. // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetReductionWorkspaceSize( cudnnHandle, reduce_desc, x_desc, out_desc, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnReduceTensor( cudnnHandle, reduce_desc, indices, 0, ws_data, ws_size, x535, x_desc, x530, x533, out_desc, x532)); }; // after CTC loss float* x539 = (float*)myGpuMalloc(1 * sizeof(float)); // make sure the size of loss is 1 arrayFill<<<28, 512>>>(x539, 1.0f, 1); // backend is lantern.TensorDslCudnn$BackendCudnn@23fbaf4a CUDA_CALL(cudaMemcpy(x345, x532, 1 * sizeof(float), cudaMemcpyDeviceToDevice)); float* x544 = (float*)myMalloc(1 * sizeof(float));; x544[0] = 1.0f; CUDNN_CALL(cudnnSoftmaxBackward( cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, x544, x_desc_9, x514, x_desc_9, x517, x544, x_desc_9, x507)); ; float* x547 = (float*)myMalloc(1 * sizeof(float));; x547[0] = 0.0f; float* x549 = (float*)myMalloc(1 * sizeof(float));; x549[0] = 1.0f; // backprop of matrix-matrix-dot float* x552 = (float*)myMalloc(1 * sizeof(float));; x552[0] = 1.0f; CUBLAS_CALL(cublasSgemm(cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, x451,x435,29,x552,x202,29,x507,29,x552,x499,x451)); float* x555 = (float*)myMalloc(1 * sizeof(float));; x555[0] = 1.0f; CUBLAS_CALL(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, 29,x451,x435,x555,x507,29,x490,x451,x555,x204,29)); float* x558 = (float*)myMalloc(1 * sizeof(float));; x558[0] = 0.0f; float* x560 = (float*)myMalloc(1 * sizeof(float));; x560[0] = 1.0f; CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_PER_ACTIVATION, x560, x560, x560, x560, in_desc_8, x484, in_desc_8, x499, in_desc_8, x487, sbmv_desc_8, x185, x187,x189, 1.0E-5, x491, x492)); ; // backprop for sum on dim op int32_t x454 = x388 * x453; sum_grad<<<28, 512>>>(x483, x388, x248, 2, x451, x454, x487, x455, x451, 1, 2); ; float* x565 = (float*)NULL; float* x566 = (float*)NULL; CUDNN_CALL(cudnnRNNBackwardData( cudnnHandle, rnn_desc_7, seqLength_7, y_descs_7, x478, y_descs_7, x483, hx_desc_7, NULL, hx_desc_7, NULL, w_desc_7, x149, hx_desc_7, x565, hx_desc_7, x566, x_descs_7, x474, hx_desc_7, NULL, hx_desc_7, NULL, workspace_7, workspaceSize_7, x479, x480)); ; float* x568 = (float*)NULL; CUDNN_CALL(cudnnRNNBackwardWeights( cudnnHandle, rnn_desc_7, seqLength_7, x_descs_7, x471, hx_desc_7, x568, y_descs_7, x478, workspace_7, workspaceSize_7, w_desc_7, x151, x479, x480)); ; // backprop for sum on dim op sum_grad<<<28, 512>>>(x470, x388, x248, 2, x451, x454, x474, x455, x451, 1, 2); ; float* x572 = (float*)NULL; float* x573 = (float*)NULL; CUDNN_CALL(cudnnRNNBackwardData( cudnnHandle, rnn_desc_6, seqLength_6, y_descs_6, x465, y_descs_6, x470, hx_desc_6, NULL, hx_desc_6, NULL, w_desc_6, x104, hx_desc_6, x572, hx_desc_6, x573, x_descs_6, x461, hx_desc_6, NULL, hx_desc_6, NULL, workspace_6, workspaceSize_6, x466, x467)); ; float* x575 = (float*)NULL; CUDNN_CALL(cudnnRNNBackwardWeights( cudnnHandle, rnn_desc_6, seqLength_6, x_descs_6, x457, hx_desc_6, x575, y_descs_6, x465, workspace_6, workspaceSize_6, w_desc_6, x106, x466, x467)); ; // backprop for sum on dim op sum_grad<<<28, 512>>>(x450, x388, x248, 2, x451, x454, x461, x455, x451, 1, 2); ; float* x579 = (float*)NULL; float* x580 = (float*)NULL; CUDNN_CALL(cudnnRNNBackwardData( cudnnHandle, rnn_desc_5, seqLength_5, y_descs_5, x445, y_descs_5, x450, hx_desc_5, NULL, hx_desc_5, NULL, w_desc_5, x58, hx_desc_5, x579, hx_desc_5, x580, x_descs_5, x437, hx_desc_5, NULL, hx_desc_5, NULL, workspace_5, workspaceSize_5, x446, x447)); ; float* x582 = (float*)NULL; CUDNN_CALL(cudnnRNNBackwardWeights( cudnnHandle, rnn_desc_5, seqLength_5, x_descs_5, x417, hx_desc_5, x582, y_descs_5, x445, workspace_5, workspaceSize_5, w_desc_5, x60, x446, x447)); ; // backprop for permute WrappedArray(2, 0, 1) int* x585 = (int32_t*)myMalloc(4 * sizeof(int32_t));; x585[2] = x418; x585[0] = x414; x585[1] = 1; x585[3] = 1; float* x590 = (float*)myMalloc(1 * sizeof(float));; x590[0] = 1.0f; CUDNN_CALL(cudnnTransformTensor( cudnnHandle, x590, out_desc_4, x437, x590, in_desc_4, x411)); ; hardTanh_grad<<<28, 512>>>(x402, x411, x411, 0.0, 20.0, x391, true); float* x594 = (float*)myMalloc(1 * sizeof(float));; x594[0] = 0.0f; float* x596 = (float*)myMalloc(1 * sizeof(float));; x596[0] = 1.0f; CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x596, x596, x596, x596, in_desc_3, x394, out_desc_3, x411, in_desc_3, x401, sbmv_desc_3, x40, x42,x44, 1.0E-5, x403, x404)); ; // conv2D back-propagate float* x600 = (float*)myMalloc(1 * sizeof(float));; x600[0] = 1.0f; {// Algorithm. cudnnConvolutionBwdDataAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filt_desc_2, out_desc_2, conv_desc_2, in_desc_2, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filt_desc_2, out_desc_2, conv_desc_2, in_desc_2, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x600, filt_desc_2, x37, out_desc_2, x401, conv_desc_2, algo, ws_data, ws_size, x600, in_desc_2, x378)); }; float* x603 = (float*)myMalloc(1 * sizeof(float));; x603[0] = 1.0f; {// Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc_2, out_desc_2, conv_desc_2, filt_desc_2, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc_2, out_desc_2, conv_desc_2, filt_desc_2, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x603, in_desc_2, x369, out_desc_2, x401, conv_desc_2, algo, ws_data, ws_size, x603, filt_desc_2, x39)); }; hardTanh_grad<<<28, 512>>>(x369, x378, x378, 0.0, 20.0, x357, true); float* x607 = (float*)myMalloc(1 * sizeof(float));; x607[0] = 0.0f; float* x609 = (float*)myMalloc(1 * sizeof(float));; x609[0] = 1.0f; CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x609, x609, x609, x609, in_desc_1, x361, out_desc_1, x378, in_desc_1, x368, sbmv_desc_1, x20, x22,x24, 1.0E-5, x370, x371)); ; // conv2D back-propagate float* x613 = (float*)myMalloc(1 * sizeof(float));; x613[0] = 1.0f; {// Algorithm. cudnnConvolutionBwdFilterAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, in_desc_0, out_desc_0, conv_desc_0, filt_desc_0, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algo)); // algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, in_desc_0, out_desc_0, conv_desc_0, filt_desc_0, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x613, in_desc_0, x339, out_desc_0, x368, conv_desc_0, algo, ws_data, ws_size, x613, filt_desc_0, x19)); }; // Tensor 'toCPU' invocation. float* x617 = (float*)myMalloc(1 * sizeof(float));; CUDA_CALL(cudaMemcpy(x617, x345, 1 * sizeof(float), cudaMemcpyDeviceToHost)); float x619 = x617[0]; x324 += x619; momentum_update_1D_1D<<<28, 512>>>(x17, x19, x205, 3.0E-8, 0.01, 400.0, true, 14432); momentum_update_1D_1D<<<28, 512>>>(x37, x39, x206, 3.0E-8, 0.01, 400.0, true, 236544); momentum_update_1D_1D<<<28, 512>>>(x40, x42, x207, 3.0E-8, 0.01, 400.0, true, 32); momentum_update_1D_1D<<<28, 512>>>(x43, x44, x208, 3.0E-8, 0.01, 400.0, true, 32); momentum_update_1D_1D<<<28, 512>>>(x23, x24, x209, 3.0E-8, 0.01, 400.0, true, 32); momentum_update_1D_1D<<<28, 512>>>(x20, x22, x210, 3.0E-8, 0.01, 400.0, true, 32); momentum_update_1D_1D<<<28, 512>>>(x185, x187, x211, 3.0E-8, 0.01, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x188, x189, x212, 3.0E-8, 0.01, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x202, x204, x213, 3.0E-8, 0.01, 400.0, true, 29696); momentum_update_1D_1D<<<28, 512>>>(x174, x175, x214, 3.0E-8, 0.01, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x170, x171, x215, 3.0E-8, 0.01, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x182, x183, x216, 3.0E-8, 0.01, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x178, x179, x217, 3.0E-8, 0.01, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x154, x155, x218, 3.0E-8, 0.01, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x158, x159, x219, 3.0E-8, 0.01, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x166, x167, x220, 3.0E-8, 0.01, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x162, x163, x221, 3.0E-8, 0.01, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x129, x130, x222, 3.0E-8, 0.01, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x125, x126, x223, 3.0E-8, 0.01, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x137, x138, x224, 3.0E-8, 0.01, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x109, x110, x225, 3.0E-8, 0.01, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x133, x134, x226, 3.0E-8, 0.01, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x113, x114, x227, 3.0E-8, 0.01, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x117, x118, x228, 3.0E-8, 0.01, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x121, x122, x229, 3.0E-8, 0.01, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x91, x92, x230, 3.0E-8, 0.01, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x79, x80, x231, 3.0E-8, 0.01, 400.0, true, 688128); momentum_update_1D_1D<<<28, 512>>>(x63, x64, x232, 3.0E-8, 0.01, 400.0, true, 688128); momentum_update_1D_1D<<<28, 512>>>(x87, x88, x233, 3.0E-8, 0.01, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x67, x68, x234, 3.0E-8, 0.01, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x71, x72, x235, 3.0E-8, 0.01, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x75, x76, x236, 3.0E-8, 0.01, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x83, x84, x237, 3.0E-8, 0.01, 400.0, true, 1048576); int32_t x654 = x321; int32_t x656 = x654 % x655; bool x657 = x656 == 0; if (x657) { float x662 = x324; double x658 = (double)x654; double x659 = 100.0 * x658; double x661 = x659 / x660; float x663 = (float)x654; float x664 = x662 / x663; printf("Train epoch %d: [%d/%d (%.0f%%)]\tAverage Loss: %.6f\n",x317,x654,x253,x661,x664); fflush(stdout); } else { } int64_t x669 = (long)mallocAddr; int64_t x670 = x669 - x313; memset((void*)x313, 0, x670); mallocAddr = (void*)x313; int64_t x673 = (long)gpuMallocAddr; int64_t x674 = x673 - x314; cudaMemset((void*)x314, 0, x674); gpuMallocAddr = (void*)x314; } gettimeofday(&end_1, NULL); timeval_subtract(&diff_1, &end_1, &begin_1);; int64_t x681 = ((diff_1.tv_sec * 1000000L) + (diff_1.tv_usec)); int64_t x682 = x681 / 1000LL; int64_t x684 = x681 / x683; printf("Training completed in %ldms (%ld us/images)\n",x682,x684); double x686 = (double)x681; double x687 = x686 / 1000000.0; x312[x317] = x687; float x689 = x324; float x691 = x689 / x690; double x692 = (double)x691; x311[x317] = x692; } gettimeofday(&end_0, NULL); timeval_subtract(&diff_0, &end_0, &begin_0);; int64_t x698 = ((diff_0.tv_sec * 1000000L) + (diff_0.tv_usec)); sort(x312, x312 + 1); double x704 = x312[0]; int64_t x705 = (long)fopen(x0, "w"); fprintf((FILE *)x705, "unit: %s\n", "1 epoch"); for(int x707=0; x707 < 1; x707++) { double x708 = x311[x707]; fprintf((FILE *)x705, "%lf\n", x708); } fprintf((FILE *)x705, "run time: %lf %lf\n", x309, x704); fclose((FILE*)x705); // Backend cleanup. CUBLAS_CALL(cublasDestroy(cublasHandle)); CUDA_CALL(cudaFree(gpuMallocBase)); CUDNN_CALL(cudnnDestroy(cudnnHandle)); } /***************************************** End of C Generated Code *******************************************/
the_stack
#include <iostream> #include <thrust/sort.h> #ifdef _WIN32 typedef unsigned int uint32_t; //typedef unsigned short uint32_t; #endif using namespace std; #define PROFILE 1 #define USE_GRID 1 #define USE_BOX_PRUNING 0 #define kRadius 0.05f #define kMaxRadius (kRadius)// + 0.2f*kRadius) #define kInvCellEdge (0.5f/kMaxRadius) #if USE_GRID typedef uint32_t CellId; #else typedef float CellId; #endif struct GrainSystem { public: float2* mPositions; float2* mVelocities; float* mRadii; float2* mSortedPositions; float2* mSortedVelocities; float* mSortedRadii; float2* mNewVelocities; uint32_t* mCellStarts; uint32_t* mCellEnds; CellId* mCellIds; uint32_t* mIndices; uint32_t mNumGrains; GrainParams mParams; }; #if PROFILE struct CudaTimer { CudaTimer(const char* name, cudaEvent_t start, cudaEvent_t stop, float& timer) : mTimer(timer), mName(name), mStart(start), mStop(stop) { cudaEventRecord(mStart, 0); } ~CudaTimer() { cudaEventRecord(mStop, 0); cudaEventSynchronize(mStop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, mStart, mStop); mTimer += elapsedTime; //cout << mName << " took: " << elapsedTime << endl; } float& mTimer; cudaEvent_t mStart; cudaEvent_t mStop; const char* mName; }; #else struct CudaTimer { CudaTimer(const char*, cudaEvent_t, cudaEvent_t, float& ) {} }; #endif void SortCellIndices(uint32_t* cellIds, uint32_t* particleIndices, uint32_t numGrains); void SortCellIndices(float* cellIds, uint32_t* particleIndices, uint32_t numGrains); __device__ inline float sqr(float x) { return x*x; } // calculate collision impulse __device__ inline float2 CollisionImpulse(float2 va, float2 vb, float ma, float mb, float2 n, float d, float baumgarte, float friction, float overlap) { // calculate relative velocity float2 vd = vb-va; // calculate relative normal velocity float vn = dot(vd, n); float2 j = make_float2(0.0f, 0.0f); //if (vn < 0.0f) vn = min(vn, 0.0f); { // calculate relative tangential velocity float2 vt = vd - n*vn; float rcpvt = rsqrtf(dot(vt, vt) + 0.001f); // position bias float bias = baumgarte*min(d+overlap, 0.0f); float2 jn = -(vn + bias)*n; float2 jt = max(friction*vn*rcpvt, -0.5f)*vt; // total mass float msum = ma + mb; // normal impulse j = (jn + jt)*mb/msum; } return j; } #if USE_GRID const uint32_t kGridDim = 128; // transform a world space coordinate into cell coordinate __device__ inline uint32_t GridCoord(float x, float invCellEdge) { // offset to handle negative numbers float l = x+100.0f; uint32_t c = (uint32_t)(floorf(l*invCellEdge)); return c; } /* __device__ inline uint32_t GridHash(int x, int y) { uint32_t cx = x & (kGridDim-1); uint32_t cy = y & (kGridDim-1); return cy*kGridDim + cx; } */ __device__ inline uint32_t GridHash(int x, int y) { const uint32_t p1 = 73856093; // some large primes const uint32_t p2 = 19349663; uint32_t n = x*p1 ^ y*p2; return n&(kGridDim*kGridDim-1); } __global__ void CreateCellIndices(const float2* positions, uint32_t* cellIds, uint32_t* particleIndices) { uint32_t i = blockIdx.x*blockDim.x + threadIdx.x; float2 p = positions[i]; cellIds[i] = GridHash(GridCoord(p.x, kInvCellEdge), GridCoord(p.y, kInvCellEdge)); particleIndices[i] = i; } __global__ void CreateGrid(const uint32_t* cellIds, uint32_t* cellStarts, uint32_t* cellEnds, uint32_t numGrains) { uint32_t i = blockIdx.x*blockDim.x + threadIdx.x; // scan the particle-cell array to find the start and end uint32_t c = cellIds[i]; if (i == 0) { cellStarts[c] = i; } else { uint32_t p = cellIds[i-1]; if (c != p) { cellStarts[c] = i; cellEnds[p] = i; } } if (i == numGrains-1) { cellEnds[c] = i+1; } } __device__ inline float2 CollideCell(int index, int cx, int cy, const uint32_t* cellStarts, const uint32_t* cellEnds, const uint32_t* indices, const float2* positions, const float2* velocities, const float* radii, float2 x, float2 v, float r, float baumgarte, float friction, float overlap) { float2 j = make_float2(0.0f, 0.0f); uint32_t cellIndex = GridHash(cx, cy); uint32_t cellStart = cellStarts[cellIndex]; uint32_t cellEnd = cellEnds[cellIndex]; for (int i=cellStart; i < cellEnd; ++i) { uint32_t particleIndex = i;//indices[i]; if (particleIndex != index) { // distance to sphere float2 t = x - positions[particleIndex]; float d = dot(t, t); float rsum = r + radii[particleIndex]; float mtd = d - sqr(rsum); if (mtd < 0.0f) { float2 n = make_float2(0.0f, 1.0f); if (d > 0.0f) { d = sqrtf(d); n = t / d; } j += CollisionImpulse(velocities[particleIndex], v, 1.0f, 1.0f, n, d-rsum, baumgarte, friction, overlap); } } } return j; } #endif #if USE_BOX_PRUNING __global__ void CreateCellIndices(const float2* positions, float* cellIds, uint32_t* particleIndices) { uint32_t i = blockIdx.x*blockDim.x + threadIdx.x; cellIds[i] = positions[i].x; particleIndices[i] = i; } #endif __global__ void ReorderParticles(const float2* positions, const float2* velocities, const float* radii, float2* sortedPositions, float2* sortedVelocities, float* sortedRadii, const uint32_t* indices) { uint32_t i = blockIdx.x*blockDim.x + threadIdx.x; int originalIndex = indices[i]; sortedPositions[i] = positions[originalIndex]; sortedVelocities[i] = velocities[originalIndex]; sortedRadii[i] = radii[originalIndex]; } __global__ void Collide(const float2* positions, const float2* velocities, const float* radii, const uint32_t* cellStarts, const uint32_t* cellEnds, const uint32_t* indices, float2* newVelocities, int numGrains, GrainParams params, float dt) { const int index = blockIdx.x*blockDim.x + threadIdx.x; const float2 x = positions[index]; const float2 v = velocities[index]; const float r = radii[index]; float2 vd = make_float2(0.0f, 0.0f); #if USE_GRID // collide particles int cx = GridCoord(x.x, kInvCellEdge); int cy = GridCoord(x.y, kInvCellEdge); for (int j=cy-1; j <= cy+1; ++j) { for (int i=cx-1; i <= cx+1; ++i) { vd += CollideCell(index, i, j, cellStarts, cellEnds, indices, positions, velocities, radii, x, v, r, params.mBaumgarte, params.mFriction, params.mOverlap); } } #endif #if USE_BOX_PRUNING // walk forward along the list of neighbouring particles int i=index+1; float maxCoord = x.x + 2.0f*kMaxRadius; float minCoord = x.x - 2.0f*kMaxRadius; while (i < numGrains) { if (positions[i].x > maxCoord) break; // distance to sphere float2 t = x - positions[i]; float d = dot(t, t); float rsum = r + radii[i]; float mtd = d - sqr(rsum); if (mtd < 0.0f) { float2 n = make_float2(0.0f, 1.0f); if (d > 0.0f) { d = sqrtf(d); n = t / d; } vd += CollisionImpulse(velocities[i], v, 1.0f, 1.0f, n, d-rsum, params.mBaumgarte, params.mFriction, params.mOverlap); } ++i; } // walk backward along the list of neighbouring particles i=index-1; while (i >= 0) { if (positions[i].x < minCoord) break; // distance to sphere float2 t = x - positions[i]; float d = dot(t, t); float rsum = r + radii[i]; float mtd = d - sqr(rsum); if (mtd < 0.0f) { float2 n = make_float2(0.0f, 1.0f); if (d > 0.0f) { d = sqrtf(d); n = t / d; } vd += CollisionImpulse(velocities[i], v, 1.0f, 1.0f, n, d-rsum, params.mBaumgarte, params.mFriction, params.mOverlap); } --i; } #endif // collide planes for (int i=0; i < params.mNumPlanes; ++i) { float3 p = params.mPlanes[i]; // distance to plane float d = x.x*p.x + x.y*p.y - p.z; float mtd = d - r; if (mtd < 0.0f) { vd += CollisionImpulse(make_float2(0.0f, 0.0f), v, 0.0f, 1.0f, make_float2(p.x, p.y), mtd, params.mBaumgarte, 0.9f, params.mOverlap); } } // write back velocity newVelocities[index] = v + vd; } __global__ void IntegrateForce(float2* velocities, float2 gravity, float damp, float dt) { int index = blockIdx.x*blockDim.x + threadIdx.x; velocities[index] += (gravity - damp*velocities[index])*dt; } __global__ void IntegrateVelocity(float2* positions, float2* velocities, const float2* newVelocities, float dt) { int index = blockIdx.x*blockDim.x + threadIdx.x; // x += v*dt velocities[index] = newVelocities[index]; positions[index] += velocities[index]*dt; //+ 0.5f*make_float2(0.0f, -9.8f)*dt*dt; } __global__ void PrintCellCounts(uint32_t* cellStarts, uint32_t* cellEnds) { int index = blockIdx.x*blockDim.x + threadIdx.x; printf("%d\n", cellEnds[index]-cellStarts[index]); } //------------------------------------------------------------------ GrainSystem* grainCreateSystem(int numGrains) { GrainSystem* s = new GrainSystem(); s->mNumGrains = numGrains; cudaMalloc(&s->mPositions, numGrains*sizeof(float2)); cudaMalloc(&s->mVelocities, numGrains*sizeof(float2)); cudaMalloc(&s->mNewVelocities, numGrains*sizeof(float2)); cudaMalloc(&s->mRadii, numGrains*sizeof(float)); cudaMalloc(&s->mSortedPositions, numGrains*sizeof(float2)); cudaMalloc(&s->mSortedVelocities, numGrains*sizeof(float2)); cudaMalloc(&s->mSortedRadii, numGrains*sizeof(float)); // grid #if USE_GRID cudaMalloc(&s->mCellStarts, kGridDim*kGridDim*sizeof(uint32_t)); cudaMalloc(&s->mCellEnds, kGridDim*kGridDim*sizeof(uint32_t)); #endif cudaMalloc(&s->mCellIds, numGrains*sizeof(uint32_t)); cudaMalloc(&s->mIndices, numGrains*sizeof(uint32_t)); return s; } void grainDestroySystem(GrainSystem* s) { cudaFree(s->mPositions); cudaFree(s->mVelocities); cudaFree(s->mNewVelocities); cudaFree(s->mRadii); cudaFree(s->mSortedPositions); cudaFree(s->mSortedVelocities); cudaFree(s->mSortedRadii); #if USE_GRID cudaFree(s->mCellStarts); cudaFree(s->mCellEnds); #endif cudaFree(s->mCellIds); cudaFree(s->mIndices); delete s; } void grainSetPositions(GrainSystem* s, float* p, int n) { cudaMemcpy(&s->mPositions[0], p, sizeof(float2)*n, cudaMemcpyHostToDevice); } void grainSetVelocities(GrainSystem* s, float* v, int n) { cudaMemcpy(&s->mVelocities[0], v, sizeof(float2)*n, cudaMemcpyHostToDevice); } void grainSetRadii(GrainSystem* s, float* r) { cudaMemcpy(&s->mRadii[0], r, sizeof(float)*s->mNumGrains, cudaMemcpyHostToDevice); } void grainGetPositions(GrainSystem* s, float* p) { cudaMemcpy(p, &s->mPositions[0], sizeof(float2)*s->mNumGrains, cudaMemcpyDeviceToHost); } void grainGetVelocities(GrainSystem* s, float* v) { cudaMemcpy(v, &s->mVelocities[0], sizeof(float2)*s->mNumGrains, cudaMemcpyDeviceToHost); } void grainGetRadii(GrainSystem* s, float* r) { cudaMemcpy(r, &s->mRadii[0], sizeof(float)*s->mNumGrains, cudaMemcpyDeviceToHost); } void grainSetParams(GrainSystem* s, GrainParams* params) { //cudaMemcpy(s->mParams, params, sizeof(GrainParams), cudaMemcpyHostToDevice); s->mParams = *params; } void grainUpdateSystem(GrainSystem* s, float dt, int iterations, GrainTimers* timers) { dt /= iterations; const int kNumThreadsPerBlock = 128; const int kNumBlocks = s->mNumGrains / kNumThreadsPerBlock; GrainParams params = s->mParams; params.mBaumgarte /= dt; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); for (int i=0; i < iterations; ++i) { { CudaTimer timer("CreateCellIndices", start, stop, timers->mCreateCellIndices); CreateCellIndices<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mPositions, s->mCellIds, s->mIndices); } { CudaTimer timer("SortCellIndices", start, stop, timers->mSortCellIndices); SortCellIndices(s->mCellIds, s->mIndices, s->mNumGrains); } #if USE_GRID { CudaTimer timer("CreateGrid", start, stop, timers->mCreateGrid); cudaMemset(s->mCellStarts, 0, sizeof(uint32_t)*kGridDim*kGridDim); cudaMemset(s->mCellEnds, 0, sizeof(uint32_t)*kGridDim*kGridDim); CreateGrid<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mCellIds, s->mCellStarts, s->mCellEnds, s->mNumGrains); } #endif { CudaTimer timer("ReorderParticles", start, stop, timers->mReorder); ReorderParticles<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mPositions, s->mVelocities, s->mRadii, s->mSortedPositions, s->mSortedVelocities, s->mSortedRadii, s->mIndices); } //PrintCellCounts<<<kGridDim*kGridDim/kNumThreadsPerBlock, kNumThreadsPerBlock>>>(s->mCellStarts, s->mCellEnds); { float t; CudaTimer timer("Integrate Force", start, stop, t); IntegrateForce<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mSortedVelocities, s->mParams.mGravity, s->mParams.mDamp, dt); } { CudaTimer timer("Collide", start, stop, timers->mCollide); Collide<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mSortedPositions, s->mSortedVelocities, s->mSortedRadii, s->mCellStarts, s->mCellEnds, s->mIndices, s->mNewVelocities, s->mNumGrains, params, dt); } { CudaTimer timer("Integrate", start, stop, timers->mIntegrate); IntegrateVelocity<<<kNumBlocks, kNumThreadsPerBlock>>>(s->mSortedPositions, s->mSortedVelocities, s->mNewVelocities, dt); } swap(s->mSortedPositions, s->mPositions); swap(s->mSortedVelocities, s->mVelocities); swap(s->mSortedRadii, s->mRadii); } cudaEventDestroy(start); cudaEventDestroy(stop); }
the_stack
#include "common.h" #include "transformerKernels.h" /** @file Implemented the cuda kernel function and its launcher that required by transformer model. Currently, fp16 and fp32 versions are provided */ namespace lightseq { namespace cuda { __forceinline__ __device__ int8_t float2int8(float x, float quant_scale) { float i8_f = x * quant_scale; int32_t i8 = floorf(i8_f + 0.5); i8 = i8 < -127 ? -127 : (i8 > 127 ? 127 : i8); return int8_t(i8); } __forceinline__ __device__ int8_t posfloat2int8(float x, float quant_scale, float clip_max) { float i8_f = x * 2 * quant_scale - quant_scale * clip_max; int32_t i8 = floorf(i8_f + 0.5); i8 = i8 < -127 ? -127 : (i8 > 127 ? 127 : i8); return int8_t(i8); } __global__ void quantize_tensor_kernel(const float *input, int8_t *output, int batch_tokens, int hidden_size, float quant_scale, bool out_col32) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_tokens * hidden_size) return; int output_index; if (out_col32) { int row_id = i / hidden_size; int col_id = i % hidden_size; output_index = row_major2flat_col32(row_id, col_id, batch_tokens, hidden_size); } else { output_index = i; } output[output_index] = float2int8(input[i], quant_scale); } __global__ void quantize_tensor_kernel(const __half *input, int8_t *output, int batch_tokens, int hidden_size, float quant_scale, bool out_col32) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_tokens * hidden_size) return; int output_index; if (out_col32) { int row_id = i / hidden_size; int col_id = i % hidden_size; output_index = row_major2flat_col32(row_id, col_id, batch_tokens, hidden_size); } else { output_index = i; } output[output_index] = float2int8(__half2float(input[i]), quant_scale); } template <> void launch_quantize_tensor<float>(const float *input, int8_t *output, int batch_tokens, int hidden_size, float quant_scale, cudaStream_t &stream, bool out_col32) { int grid_dim = (batch_tokens * hidden_size) >> 10; quantize_tensor_kernel<<<grid_dim + 1, 1024, 0, stream>>>( input, output, batch_tokens, hidden_size, quant_scale, out_col32); } template <> void launch_quantize_tensor<__half>(const __half *input, int8_t *output, int batch_tokens, int hidden_size, float quant_scale, cudaStream_t &stream, bool out_col32) { int grid_dim = (batch_tokens * hidden_size) >> 10; quantize_tensor_kernel<<<grid_dim + 1, 1024, 0, stream>>>( input, output, batch_tokens, hidden_size, quant_scale, out_col32); } template <typename T> __global__ void dequantize_tensor_kernel(const int32_t *input, T *output, int batch_tokens, int hidden_size, float dequant_scale, bool in_col32); template <> __global__ void dequantize_tensor_kernel<float>(const int32_t *input, float *output, int batch_tokens, int hidden_size, float dequant_scale, bool in_col32) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_tokens * hidden_size) return; int input_index; if (in_col32) { int row_id = i / hidden_size; int col_id = i % hidden_size; input_index = row_major2flat_col32(row_id, col_id, batch_tokens, hidden_size); } else { input_index = i; } output[i] = input[input_index] * dequant_scale; } template <> __global__ void dequantize_tensor_kernel<__half>( const int32_t *input, __half *output, int batch_tokens, int hidden_size, float dequant_scale, bool in_col32) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_tokens * hidden_size) return; int in_index; if (in_col32) { int row_id = i / hidden_size; int col_id = i % hidden_size; in_index = row_major2flat_col32(row_id, col_id, batch_tokens, hidden_size); } else { in_index = i; } output[i] = __float2half(float(input[in_index]) * dequant_scale); } template <> void launch_dequantize_tensor<float>(const int32_t *input, float *output, int batch_tokens, int hidden_size, float dequant_scale, cudaStream_t &stream, bool in_col32) { int total_count = batch_tokens * hidden_size; int grid_dim = total_count >> 10; dequantize_tensor_kernel<<<grid_dim + 1, 1024, 0, stream>>>( input, output, batch_tokens, hidden_size, dequant_scale, in_col32); } template <> void launch_dequantize_tensor<__half>(const int32_t *input, __half *output, int batch_tokens, int hidden_size, float dequant_scale, cudaStream_t &stream, bool in_col32) { int total_count = batch_tokens * hidden_size; int grid_dim = total_count >> 10; dequantize_tensor_kernel<<<grid_dim + 1, 1024, 0, stream>>>( input, output, batch_tokens, hidden_size, dequant_scale, in_col32); } template <typename T> __global__ void ker_norm_layer_resual_i8O(T *input, int8_t *output, const T *scale, const T *bias, const T *residual_bias, const int hidden_size, float quant_scale, bool is_post_ln, bool out_col32) { int block_start = blockIdx.x * hidden_size; int start = block_start + threadIdx.x; int end = block_start + hidden_size; float val = 0.0; for (int i = start; i < end; i += blockDim.x) { val += input[i]; } // step 0. compute mean __shared__ float s_mean; float reduce_res = blockReduceSum<float>(val); if (threadIdx.x == 0) s_mean = reduce_res / float(hidden_size); __syncthreads(); // step 1. compute variance val = 0.0; for (int i = start; i < end; i += blockDim.x) { float tmp = input[i] - s_mean; val += tmp * tmp; } __shared__ float s_var; reduce_res = blockReduceSum(val); if (threadIdx.x == 0) s_var = rsqrtf(reduce_res / float(hidden_size) + epsilon); __syncthreads(); float output_f; // step 2. layer norm for (int i = start; i < end; i += blockDim.x) { val = input[i] - s_mean; output_f = val * s_var * __ldg(&scale[i - block_start]) + __ldg(&bias[i - block_start]); int8_t res = float2int8(output_f, quant_scale); if (out_col32) { int row_id = blockIdx.x; int col_id = i - block_start; int col32_index = row_major2flat_col32(row_id, col_id, gridDim.x, hidden_size); output[col32_index] = res; } else { output[i] = res; } if (is_post_ln) { input[i] = output_f + __ldg(&residual_bias[i - block_start]); } else { input[i] += __ldg(&residual_bias[i - block_start]); } } } template <> __global__ void ker_norm_layer_resual_i8O<__half>( __half *input, int8_t *output, const __half *scale, const __half *bias, const __half *residual_bias, const int half_hidden_size, float quant_scale, bool is_post_ln, bool out_col32) { int block_start = blockIdx.x * half_hidden_size; int start = block_start + threadIdx.x; int end = blockIdx.x * half_hidden_size + half_hidden_size; half2 *pinput = (half2 *)input; char2 *poutput = (char2 *)output; const half2 *pscale = (const half2 *)scale; const half2 *pbias = (const half2 *)bias; const half2 *presidual_bias = (const half2 *)residual_bias; float mean_dim = float(half_hidden_size) * 2.f; float val = 0.0; // step 0. compute mean for (int i = start; i < end; i += blockDim.x) { float2 local_f2 = safe_half2_to_float2(pinput[i]); val += local_f2.x + local_f2.y; } __shared__ float s_mean; float reduce_res = blockReduceSum<float>(val); if (threadIdx.x == 0) s_mean = reduce_res / mean_dim; __syncthreads(); // step 1. compute variance val = 0.0; for (int i = start; i < end; i += blockDim.x) { float2 local_f2 = safe_half2_to_float2(pinput[i]); float tmpx = local_f2.x - s_mean; float tmpy = local_f2.y - s_mean; val += tmpx * tmpx + tmpy * tmpy; } __shared__ float s_var; reduce_res = blockReduceSum(val); if (threadIdx.x == 0) s_var = rsqrtf(reduce_res / mean_dim + epsilon); __syncthreads(); char2 output_c2; // step 2. layer norm for (int i = start; i < end; i += blockDim.x) { float2 scale_val = __half22float2(__ldg(&pscale[i - block_start])); float2 bias_val = __half22float2(__ldg(&pbias[i - block_start])); float2 local_f2 = safe_half2_to_float2(pinput[i]); local_f2.x = (local_f2.x - s_mean) * s_var * scale_val.x + bias_val.x; local_f2.y = (local_f2.y - s_mean) * s_var * scale_val.y + bias_val.y; output_c2.x = float2int8(local_f2.x, quant_scale); output_c2.y = float2int8(local_f2.y, quant_scale); if (out_col32) { int row_id = blockIdx.x; int col_id = (i - block_start) * 2; int col32_index = row_major2flat_col32(row_id, col_id, gridDim.x, half_hidden_size * 2) >> 1; poutput[col32_index] = output_c2; } else { poutput[i] = output_c2; } if (!is_post_ln) { local_f2 = safe_half2_to_float2(pinput[i]); } float2 residual_bias_val = __half22float2(__ldg(&presidual_bias[i - block_start])); float2 new_input_f2; new_input_f2.x = local_f2.x + residual_bias_val.x; new_input_f2.y = local_f2.y + residual_bias_val.y; pinput[i] = __float22half2_rn(new_input_f2); } } template <typename T> void ker_norm_layer_resual_i8O_launcher(int token_num, int hidden_size, cudaStream_t stream, T *input, int8_t *output, const T *scale, const T *bias, const T *residual_bias, const int max_thread_per_block, float quant_scale, bool is_post_ln, bool out_col32) { ker_norm_layer_resual_i8O<T><<<token_num, max_thread_per_block, 0, stream>>>( input, output, scale, bias, residual_bias, hidden_size, quant_scale, is_post_ln, out_col32); } template <> void ker_norm_layer_resual_i8O_launcher<__half>( int token_num, int hidden_size, cudaStream_t stream, __half *input, int8_t *output, const __half *scale, const __half *bias, const __half *residual_bias, const int max_thread_per_block, float quant_scale, bool is_post_ln, bool out_col32) { ker_norm_layer_resual_i8O<__half> <<<token_num, max_thread_per_block, 0, stream>>>( input, output, scale, bias, residual_bias, hidden_size / 2, quant_scale, is_post_ln, out_col32); } template void ker_norm_layer_resual_i8O_launcher<float>( int token_num, int hidden_size, cudaStream_t stream, float *input, int8_t *output, const float *scale, const float *bias, const float *residual_bias, const int max_thread_per_block, float quant_scale, bool is_post_ln, bool out_col32); template void ker_norm_layer_resual_i8O_launcher<__half>( int token_num, int hidden_size, cudaStream_t stream, __half *input, int8_t *output, const __half *scale, const __half *bias, const __half *residual_bias, const int max_thread_per_block, float quant_scale, bool is_post_ln, bool out_col32); template <typename T> __global__ void ker_residual_bias_ln_i32I_i8O( const int32_t *input, const T *scale, const T *bias, const T *residual_bias, int8_t *output, T *residual, int hidden_size, float dequant_scale, float quant_scale, bool is_post_ln, bool in_col32, bool out_col32, const T *colsum) { extern __shared__ float s_row_out[]; int block_start = blockIdx.x * hidden_size; int start = block_start + threadIdx.x; int end = block_start + hidden_size; float val = 0.0; int input_index; for (int i = start; i < end; i += blockDim.x) { if (in_col32) { int row_id = blockIdx.x; int col_id = i - block_start; input_index = row_major2flat_col32(row_id, col_id, gridDim.x, hidden_size); } else { input_index = i; } float residual_out = __int2float_rn(input[input_index]) * dequant_scale + residual[i]; if (colsum != nullptr) residual_out += __ldg(&colsum[i - block_start]); s_row_out[i - block_start] = residual_out; val += residual_out; } // step 0. compute mean __shared__ float s_mean; float reduce_res = blockReduceSum<float>(val); if (threadIdx.x == 0) s_mean = reduce_res / __int2float_rn(hidden_size); __syncthreads(); // step 1. compute variance val = 0.0; for (int i = start; i < end; i += blockDim.x) { float tmp = s_row_out[i - block_start] - s_mean; val += tmp * tmp; } __shared__ float s_var; reduce_res = blockReduceSum(val); if (threadIdx.x == 0) s_var = rsqrtf(reduce_res / float(hidden_size) + epsilon); __syncthreads(); float output_f; // step 2. layer norm for (int i = start; i < end; i += blockDim.x) { val = s_row_out[i - block_start] - s_mean; output_f = val * s_var * __ldg(&scale[i - block_start]) + __ldg(&bias[i - block_start]); int8_t res = float2int8(output_f, quant_scale); int output_index; if (out_col32) { int row_id = blockIdx.x; int col_id = i - block_start; output_index = row_major2flat_col32(row_id, col_id, gridDim.x, hidden_size); } else { output_index = i; } output[output_index] = res; T res_bias_val = (residual_bias == nullptr) ? T{0.f} : __ldg(&residual_bias[i - block_start]); if (is_post_ln) { residual[i] = output_f + res_bias_val; } else { residual[i] = s_row_out[i - block_start] + res_bias_val; } } } template <> __global__ void ker_residual_bias_ln_i32I_i8O<half>( const int32_t *input, const half *scale, const half *bias, const half *residual_bias, int8_t *output, half *residual, int hidden_size, float dequant_scale, float quant_scale, bool is_post_ln, bool in_col32, bool out_col32, const half *colsum) { extern __shared__ float s_row_out[]; int block_start = blockIdx.x * hidden_size; int start = block_start + threadIdx.x; int end = block_start + hidden_size; float val = 0.0; int input_index; for (int i = start; i < end; i += blockDim.x) { if (in_col32) { int row_id = blockIdx.x; int col_id = i - block_start; input_index = row_major2flat_col32(row_id, col_id, gridDim.x, hidden_size); } else { input_index = i; } float residual_out = __int2float_rn(input[input_index]) * dequant_scale + safe_half_to_float(residual[i]); if (colsum != nullptr) residual_out += safe_half_to_float(__ldg(&colsum[i - block_start])); s_row_out[i - block_start] = residual_out; val += residual_out; } // step 0. compute mean __shared__ float s_mean; float reduce_res = blockReduceSum<float>(val); if (threadIdx.x == 0) s_mean = reduce_res / __int2float_rn(hidden_size); __syncthreads(); // step 1. compute variance val = 0.0; for (int i = start; i < end; i += blockDim.x) { float tmp = s_row_out[i - block_start] - s_mean; val += tmp * tmp; } __shared__ float s_var; reduce_res = blockReduceSum(val); if (threadIdx.x == 0) s_var = rsqrtf(reduce_res / __int2float_rn(hidden_size) + epsilon); __syncthreads(); float output_f; // step 2. layer norm for (int i = start; i < end; i += blockDim.x) { val = s_row_out[i - block_start] - s_mean; output_f = val * s_var * safe_half_to_float(__ldg(&scale[i - block_start])) + safe_half_to_float(__ldg(&bias[i - block_start])); int8_t res = float2int8(output_f, quant_scale); int output_index; if (out_col32) { int row_id = blockIdx.x; int col_id = i - block_start; output_index = row_major2flat_col32(row_id, col_id, gridDim.x, hidden_size); } else { output_index = i; } output[output_index] = res; half res_bias_val = (residual_bias == nullptr) ? __float2half(0.f) : __ldg(&residual_bias[i - block_start]); if (is_post_ln) { residual[i] = __float2half(output_f) + res_bias_val; } else { residual[i] = __float2half(s_row_out[i - block_start]) + res_bias_val; } } } template <typename T> void ker_residual_bias_ln_i32I_i8O_launcher( const int32_t *input, const T *scale, const T *bias, const T *residual_bias, int8_t *output, T *residual, int batch_tokens, int hidden_size, float dequant_scale, float quant_scale, int max_thread_per_block, cudaStream_t stream, bool is_post_ln, bool in_col32, bool out_col32, const T *colsum) { ker_residual_bias_ln_i32I_i8O<T><<<batch_tokens, max_thread_per_block, hidden_size * sizeof(float), stream>>>( input, scale, bias, residual_bias, output, residual, hidden_size, dequant_scale, quant_scale, is_post_ln, in_col32, out_col32, colsum); } template <> void ker_residual_bias_ln_i32I_i8O_launcher<half>( const int32_t *input, const half *scale, const half *bias, const half *residual_bias, int8_t *output, half *residual, int batch_tokens, int hidden_size, float dequant_scale, float quant_scale, int max_thread_per_block, cudaStream_t stream, bool is_post_ln, bool in_col32, bool out_col32, const half *colsum) { ker_residual_bias_ln_i32I_i8O<half><<<batch_tokens, max_thread_per_block, hidden_size * sizeof(float), stream>>>( input, scale, bias, residual_bias, output, residual, hidden_size, dequant_scale, quant_scale, is_post_ln, in_col32, out_col32, colsum); } template void ker_residual_bias_ln_i32I_i8O_launcher<float>( const int32_t *input, const float *scale, const float *bias, const float *residual_bias, int8_t *output, float *residual, int batch_tokens, int hidden_size, float dequant_scale, float quant_scale, int max_thread_per_block, cudaStream_t stream, bool is_post_ln, bool in_col32, bool out_col32, const float *colsum); template void ker_residual_bias_ln_i32I_i8O_launcher<half>( const int32_t *input, const half *scale, const half *bias, const half *residual_bias, int8_t *output, half *residual, int batch_tokens, int hidden_size, float dequant_scale, float quant_scale, int max_thread_per_block, cudaStream_t stream, bool is_post_ln, bool in_col32, bool out_col32, const half *colsum); template <typename T> __global__ void ker_residual_bias_ln_i8I_i8O( const int8_t *input, const T *scale, const T *bias, const T *residual_bias, int8_t *output, T *residual, int hidden_size, float dequant_scale, float quant_scale, bool is_post_ln, bool in_col32, bool out_col32, const T *colsum) { extern __shared__ float s_row_out[]; int block_start = blockIdx.x * hidden_size; int start = block_start + threadIdx.x; int end = block_start + hidden_size; float val = 0.0; int input_index; for (int i = start; i < end; i += blockDim.x) { if (in_col32) { int row_id = blockIdx.x; int col_id = i - block_start; input_index = row_major2flat_col32(row_id, col_id, gridDim.x, hidden_size); } else { input_index = i; } float residual_out = __int2float_rn(input[input_index]) * dequant_scale + residual[i]; if (colsum) residual_out += colsum[i - block_start]; s_row_out[i - block_start] = residual_out; val += residual_out; } // step 0. compute mean __shared__ float s_mean; float reduce_res = blockReduceSum<float>(val); if (threadIdx.x == 0) s_mean = reduce_res / __int2float_rn(hidden_size); __syncthreads(); // step 1. compute variance val = 0.0; for (int i = start; i < end; i += blockDim.x) { float tmp = s_row_out[i - block_start] - s_mean; val += tmp * tmp; } __shared__ float s_var; reduce_res = blockReduceSum(val); if (threadIdx.x == 0) s_var = rsqrtf(reduce_res / float(hidden_size) + epsilon); __syncthreads(); float output_f; // step 2. layer norm for (int i = start; i < end; i += blockDim.x) { val = s_row_out[i - block_start] - s_mean; output_f = val * s_var * __ldg(&scale[i - block_start]) + __ldg(&bias[i - block_start]); int8_t res = float2int8(output_f, quant_scale); int output_index; if (out_col32) { int row_id = blockIdx.x; int col_id = i - block_start; output_index = row_major2flat_col32(row_id, col_id, gridDim.x, hidden_size); } else { output_index = i; } output[output_index] = res; T res_bias_val = (residual_bias == nullptr) ? T{0.f} : __ldg(&residual_bias[i - block_start]); if (is_post_ln) { residual[i] = output_f + res_bias_val; } else { residual[i] = s_row_out[i - block_start] + res_bias_val; } } } template <> __global__ void ker_residual_bias_ln_i8I_i8O<half>( const int8_t *input, const half *scale, const half *bias, const half *residual_bias, int8_t *output, half *residual, int hidden_size, float dequant_scale, float quant_scale, bool is_post_ln, bool in_col32, bool out_col32, const half *colsum) { extern __shared__ float s_row_out[]; int block_start = blockIdx.x * hidden_size; int start = block_start + threadIdx.x; int end = block_start + hidden_size; float val = 0.0; int input_index; for (int i = start; i < end; i += blockDim.x) { if (in_col32) { int row_id = blockIdx.x; int col_id = i - block_start; input_index = row_major2flat_col32(row_id, col_id, gridDim.x, hidden_size); } else { input_index = i; } float residual_out = __int2float_rn(input[input_index]) * dequant_scale + safe_half_to_float(residual[i]); if (colsum) residual_out += safe_half_to_float(colsum[i - block_start]); s_row_out[i - block_start] = residual_out; val += residual_out; } // step 0. compute mean __shared__ float s_mean; float reduce_res = blockReduceSum<float>(val); if (threadIdx.x == 0) s_mean = reduce_res / __int2float_rn(hidden_size); __syncthreads(); // step 1. compute variance val = 0.0; for (int i = start; i < end; i += blockDim.x) { float tmp = s_row_out[i - block_start] - s_mean; val += tmp * tmp; } __shared__ float s_var; reduce_res = blockReduceSum(val); if (threadIdx.x == 0) s_var = rsqrtf(reduce_res / __int2float_rn(hidden_size) + epsilon); __syncthreads(); float output_f; // step 2. layer norm for (int i = start; i < end; i += blockDim.x) { val = s_row_out[i - block_start] - s_mean; output_f = val * s_var * safe_half_to_float(__ldg(&scale[i - block_start])) + safe_half_to_float(__ldg(&bias[i - block_start])); int8_t res = float2int8(output_f, quant_scale); int output_index; if (out_col32) { int row_id = blockIdx.x; int col_id = i - block_start; output_index = row_major2flat_col32(row_id, col_id, gridDim.x, hidden_size); } else { output_index = i; } output[output_index] = res; half res_bias_val = (residual_bias == nullptr) ? __float2half(0.f) : __ldg(&residual_bias[i - block_start]); if (is_post_ln) { residual[i] = __float2half(output_f) + res_bias_val; } else { residual[i] = __float2half(s_row_out[i - block_start]) + res_bias_val; } } } template <typename T> void ker_residual_bias_ln_i8I_i8O_launcher( const int8_t *input, const T *scale, const T *bias, const T *residual_bias, int8_t *output, T *residual, int batch_tokens, int hidden_size, float dequant_scale, float quant_scale, int max_thread_per_block, cudaStream_t stream, bool is_post_ln, bool in_col32, bool out_col32, const T *colsum) { ker_residual_bias_ln_i8I_i8O<T><<<batch_tokens, max_thread_per_block, hidden_size * sizeof(float), stream>>>( input, scale, bias, residual_bias, output, residual, hidden_size, dequant_scale, quant_scale, is_post_ln, in_col32, out_col32, colsum); } template <> void ker_residual_bias_ln_i8I_i8O_launcher<half>( const int8_t *input, const half *scale, const half *bias, const half *residual_bias, int8_t *output, half *residual, int batch_tokens, int hidden_size, float dequant_scale, float quant_scale, int max_thread_per_block, cudaStream_t stream, bool is_post_ln, bool in_col32, bool out_col32, const half *colsum) { ker_residual_bias_ln_i8I_i8O<half><<<batch_tokens, max_thread_per_block, hidden_size * sizeof(float), stream>>>( input, scale, bias, residual_bias, output, residual, hidden_size, dequant_scale, quant_scale, is_post_ln, in_col32, out_col32, colsum); } template void ker_residual_bias_ln_i8I_i8O_launcher<float>( const int8_t *input, const float *scale, const float *bias, const float *residual_bias, int8_t *output, float *residual, int batch_tokens, int hidden_size, float dequant_scale, float quant_scale, int max_thread_per_block, cudaStream_t stream, bool is_post_ln, bool in_col32, bool out_col32, const float *colsum); template void ker_residual_bias_ln_i8I_i8O_launcher<half>( const int8_t *input, const half *scale, const half *bias, const half *residual_bias, int8_t *output, half *residual, int batch_tokens, int hidden_size, float dequant_scale, float quant_scale, int max_thread_per_block, cudaStream_t stream, bool is_post_ln, bool in_col32, bool out_col32, const half *colsum); template <typename T> __global__ void ker_residual_bias_ln_i32I(const int32_t *input, const T *scale, const T *bias, const T *residual, T *output, int hidden_size, float dequant_scale, bool in_col32, const T *colsum) { extern __shared__ float s_row_out[]; int block_start = blockIdx.x * hidden_size; int start = block_start + threadIdx.x; int end = block_start + hidden_size; float val = 0.0; int input_index; for (int i = start; i < end; i += blockDim.x) { if (in_col32) { int row_id = blockIdx.x; int col_id = i - block_start; input_index = row_major2flat_col32(row_id, col_id, gridDim.x, hidden_size); } else { input_index = i; } float residual_out = __int2float_rn(input[input_index]) * dequant_scale + residual[i]; if (colsum) residual_out += colsum[i - block_start]; s_row_out[i - block_start] = residual_out; val += residual_out; } // step 0. compute mean __shared__ float s_mean; float reduce_res = blockReduceSum<float>(val); if (threadIdx.x == 0) s_mean = reduce_res / __int2float_rn(hidden_size); __syncthreads(); // step 1. compute variance val = 0.0; for (int i = start; i < end; i += blockDim.x) { float tmp = s_row_out[i - block_start] - s_mean; val += tmp * tmp; } __shared__ float s_var; reduce_res = blockReduceSum(val); if (threadIdx.x == 0) s_var = rsqrtf(reduce_res / float(hidden_size) + epsilon); __syncthreads(); float output_f; // step 2. layer norm for (int i = start; i < end; i += blockDim.x) { val = s_row_out[i - block_start] - s_mean; output_f = val * s_var * __ldg(&scale[i - block_start]) + __ldg(&bias[i - block_start]); int output_index = i; output[output_index] = output_f; } } template <> __global__ void ker_residual_bias_ln_i32I<half>( const int32_t *input, const half *scale, const half *bias, const half *residual, half *output, int hidden_size, float dequant_scale, bool in_col32, const half *colsum) { extern __shared__ float s_row_out[]; int block_start = blockIdx.x * hidden_size; int start = block_start + threadIdx.x; int end = block_start + hidden_size; float val = 0.0; int input_index; for (int i = start; i < end; i += blockDim.x) { if (in_col32) { int row_id = blockIdx.x; int col_id = i - block_start; input_index = row_major2flat_col32(row_id, col_id, gridDim.x, hidden_size); } else { input_index = i; } float residual_out = __int2float_rn(input[input_index]) * dequant_scale + safe_half_to_float(residual[i]); if (colsum) residual_out += safe_half_to_float(colsum[i - block_start]); s_row_out[i - block_start] = residual_out; val += residual_out; } // step 0. compute mean __shared__ float s_mean; float reduce_res = blockReduceSum<float>(val); if (threadIdx.x == 0) s_mean = reduce_res / __int2float_rn(hidden_size); __syncthreads(); // step 1. compute variance val = 0.0; for (int i = start; i < end; i += blockDim.x) { float tmp = s_row_out[i - block_start] - s_mean; val += tmp * tmp; } __shared__ float s_var; reduce_res = blockReduceSum(val); if (threadIdx.x == 0) s_var = rsqrtf(reduce_res / __int2float_rn(hidden_size) + epsilon); __syncthreads(); float output_f; // step 2. layer norm for (int i = start; i < end; i += blockDim.x) { val = s_row_out[i - block_start] - s_mean; output_f = val * s_var * safe_half_to_float(__ldg(&scale[i - block_start])) + safe_half_to_float(__ldg(&bias[i - block_start])); int output_index = i; output[output_index] = __float2half(output_f); } } template <typename T> void ker_residual_bias_ln_i32I_launcher(const int32_t *input, const T *scale, const T *bias, const T *residual, T *output, int batch_tokens, int hidden_size, float dequant_scale, int max_thread_per_block, cudaStream_t stream, bool in_col32, const T *colsum) { ker_residual_bias_ln_i32I<T> <<<batch_tokens, max_thread_per_block, hidden_size * sizeof(float), stream>>>(input, scale, bias, residual, output, hidden_size, dequant_scale, in_col32, colsum); } template <> void ker_residual_bias_ln_i32I_launcher<half>( const int32_t *input, const half *scale, const half *bias, const half *residual, half *output, int batch_tokens, int hidden_size, float dequant_scale, int max_thread_per_block, cudaStream_t stream, bool in_col32, const half *colsum) { ker_residual_bias_ln_i32I<half> <<<batch_tokens, max_thread_per_block, hidden_size * sizeof(float), stream>>>(input, scale, bias, residual, output, hidden_size, dequant_scale, in_col32, colsum); } template void ker_residual_bias_ln_i32I_launcher<float>( const int32_t *input, const float *scale, const float *bias, const float *residual, float *output, int batch_tokens, int hidden_size, float dequant_scale, int max_thread_per_block, cudaStream_t stream, bool in_col32, const float *colsum); template void ker_residual_bias_ln_i32I_launcher<half>( const int32_t *input, const half *scale, const half *bias, const half *residual, half *output, int batch_tokens, int hidden_size, float dequant_scale, int max_thread_per_block, cudaStream_t stream, bool in_col32, const half *colsum); template <typename T> __global__ void ker_bias_gelu_i8I_i8O(int8_t *input, int8_t *output, const T *bias, int feature_dim, float dequant_scale, float quant_scale, bool in_col32, bool out_col32) { int block_start = blockIdx.x * feature_dim; int start = block_start + threadIdx.x; int end = block_start + feature_dim; for (int i = start; i < end; i += blockDim.x) { int input_index; if (in_col32) { int row_id = blockIdx.x; int col_id = i - block_start; input_index = row_major2flat_col32(row_id, col_id, gridDim.x, feature_dim); } else { input_index = i; } float fout = gelu<float>(float(input[input_index]) * dequant_scale + __ldg(&bias[i - block_start])); int output_index; if (out_col32) { int row_id = blockIdx.x; int col_id = i - block_start; output_index = row_major2flat_col32(row_id, col_id, gridDim.x, feature_dim); } else { output_index = i; } output[output_index] = float2int8(fout, quant_scale); } } /* fp16 version */ template <> __global__ void ker_bias_gelu_i8I_i8O<__half>( int8_t *input, int8_t *output, const __half *bias, int feature_dim, float dequant_scale, float quant_scale, bool in_col32, bool out_col32) { int block_start = blockIdx.x * feature_dim; int start = block_start + threadIdx.x; int end = block_start + feature_dim; for (int i = start; i < end; i += blockDim.x) { int input_index; if (in_col32) { int row_id = blockIdx.x; int col_id = i - block_start; input_index = row_major2flat_col32(row_id, col_id, gridDim.x, feature_dim); } else { input_index = i; } float fout = gelu<float>(float(input[input_index]) * dequant_scale + __half2float(__ldg(&bias[i - block_start]))); int output_index; if (out_col32) { int row_id = blockIdx.x; int col_id = i - block_start; output_index = row_major2flat_col32(row_id, col_id, gridDim.x, feature_dim); } else { output_index = i; } output[output_index] = float2int8(fout, quant_scale); } } template <typename T> void ker_bias_gelu_i8I_i8O_launcher(int batch_token_num, cudaStream_t stream, int8_t *input, int8_t *output, const T *bias, int feature_dim, float dequant_scale, float quant_scale, bool in_col32, bool out_col32) { ker_bias_gelu_i8I_i8O<T><<<batch_token_num, 1024, 0, stream>>>( input, output, bias, feature_dim, dequant_scale, quant_scale, in_col32, out_col32); } template <> void ker_bias_gelu_i8I_i8O_launcher<__half>( int batch_token_num, cudaStream_t stream, int8_t *input, int8_t *output, const __half *bias, int feature_dim, float dequant_scale, float quant_scale, bool in_col32, bool out_col32) { ker_bias_gelu_i8I_i8O<__half><<<batch_token_num, 1024, 0, stream>>>( input, output, bias, feature_dim, dequant_scale, quant_scale, in_col32, out_col32); } template void ker_bias_gelu_i8I_i8O_launcher<float>( int batch_token_num, cudaStream_t stream, int8_t *input, int8_t *output, const float *bias, int feature_dim, float dequant_scale, float quant_scale, bool in_col32, bool out_col32); template void ker_bias_gelu_i8I_i8O_launcher<__half>( int batch_token_num, cudaStream_t stream, int8_t *input, int8_t *output, const __half *bias, int feature_dim, float dequant_scale, float quant_scale, bool in_col32, bool out_col32); template <typename T> __global__ void ker_bias_relu_i8I_i8O(int8_t *input, int8_t *output, const T *bias, int feature_dim, float dequant_scale, float quant_scale, float clip_max, bool in_col32, bool out_col32, bool narrow_clip) { int block_start = blockIdx.x * feature_dim; int start = block_start + threadIdx.x; int end = block_start + feature_dim; for (int i = start; i < end; i += blockDim.x) { int input_index; if (in_col32) { int row_id = blockIdx.x; int col_id = i - block_start; input_index = row_major2flat_col32(row_id, col_id, gridDim.x, feature_dim); } else { input_index = i; } float fout = fmaxf(float(input[input_index]) * dequant_scale + __ldg(&bias[i - block_start]), 0.f); int output_index; if (out_col32) { int row_id = blockIdx.x; int col_id = i - block_start; output_index = row_major2flat_col32(row_id, col_id, gridDim.x, feature_dim); } else { output_index = i; } if (narrow_clip) { output[output_index] = posfloat2int8(fout, quant_scale, clip_max); } else { output[output_index] = float2int8(fout, quant_scale); } } } /* fp16 version */ template <> __global__ void ker_bias_relu_i8I_i8O<__half>( int8_t *input, int8_t *output, const __half *bias, int feature_dim, float dequant_scale, float quant_scale, float clip_max, bool in_col32, bool out_col32, bool narrow_clip) { int block_start = blockIdx.x * feature_dim; int start = block_start + threadIdx.x; int end = block_start + feature_dim; for (int i = start; i < end; i += blockDim.x) { int input_index; if (in_col32) { int row_id = blockIdx.x; int col_id = i - block_start; input_index = row_major2flat_col32(row_id, col_id, gridDim.x, feature_dim); } else { input_index = i; } float fout = fmaxf(float(input[input_index]) * dequant_scale + __half2float(__ldg(&bias[i - block_start])), 0.f); int output_index; if (out_col32) { int row_id = blockIdx.x; int col_id = i - block_start; output_index = row_major2flat_col32(row_id, col_id, gridDim.x, feature_dim); } else { output_index = i; } if (narrow_clip) { output[output_index] = posfloat2int8(fout, quant_scale, clip_max); } else { output[output_index] = float2int8(fout, quant_scale); } } } template <typename T> void ker_bias_relu_i8I_i8O_launcher(int batch_token_num, cudaStream_t stream, int8_t *input, int8_t *output, const T *bias, int feature_dim, float dequant_scale, float quant_scale, float clip_max, bool in_col32, bool out_col32, bool narrow_clip) { ker_bias_relu_i8I_i8O<T><<<batch_token_num, 1024, 0, stream>>>( input, output, bias, feature_dim, dequant_scale, quant_scale, clip_max, in_col32, out_col32, narrow_clip); } template <> void ker_bias_relu_i8I_i8O_launcher<__half>( int batch_token_num, cudaStream_t stream, int8_t *input, int8_t *output, const __half *bias, int feature_dim, float dequant_scale, float quant_scale, float clip_max, bool in_col32, bool out_col32, bool narrow_clip) { ker_bias_relu_i8I_i8O<__half><<<batch_token_num, 1024, 0, stream>>>( input, output, bias, feature_dim, dequant_scale, quant_scale, clip_max, in_col32, out_col32, narrow_clip); } template void ker_bias_relu_i8I_i8O_launcher<float>( int batch_token_num, cudaStream_t stream, int8_t *input, int8_t *output, const float *bias, int feature_dim, float dequant_scale, float quant_scale, float clip_max, bool in_col32, bool out_col32, bool narrow_clip); template void ker_bias_relu_i8I_i8O_launcher<__half>( int batch_token_num, cudaStream_t stream, int8_t *input, int8_t *output, const __half *bias, int feature_dim, float dequant_scale, float quant_scale, float clip_max, bool in_col32, bool out_col32, bool narrow_clip); template <typename T> __global__ void ker_arrange_encself_qkv_i8I(const int8_t *ori_qkv, const T *qkv_bias, T *new_qkv, int max_batch_dim, int batch_seq_len, int dim_per_head, int head_num, float dequant_scale, bool in_col32) { int hidden_size = dim_per_head * head_num; int batch_id = blockIdx.x / batch_seq_len; int token_id = blockIdx.x % batch_seq_len; int qkv_offset = max_batch_dim * blockIdx.y; for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) { int head_id = i / dim_per_head; int dim_id = i % dim_per_head; int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num, batch_seq_len, dim_per_head); int qkv_index; if (in_col32) { int row_id = blockIdx.x; int col_id = blockIdx.y * hidden_size + i; qkv_index = row_major2flat_col32(row_id, col_id, gridDim.x, gridDim.y * hidden_size); } else { qkv_index = (blockIdx.x * gridDim.y + blockIdx.y) * hidden_size + i; } new_qkv[qkv_offset + target_id] = float(ori_qkv[qkv_index]) * dequant_scale + __ldg(&qkv_bias[blockIdx.y * hidden_size + i]); } } template <> __global__ void ker_arrange_encself_qkv_i8I<__half>( const int8_t *ori_qkv, const __half *qkv_bias, __half *new_qkv, int max_batch_dim, int batch_seq_len, int dim_per_head, int head_num, float dequant_scale, bool in_col32) { int half_hidden_size = dim_per_head * head_num; int batch_id = blockIdx.x / batch_seq_len; int token_id = blockIdx.x % batch_seq_len; for (std::size_t i = threadIdx.x; i < half_hidden_size; i += blockDim.x) { int head_id = i / dim_per_head; int dim_id = i % dim_per_head; int qkv_offset = max_batch_dim * blockIdx.y; int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num, batch_seq_len, dim_per_head); const char2 *p_ori_qkv = (const char2 *)ori_qkv; const half2 *p_bias = (const half2 *)qkv_bias; half2 *p_new_qkv = (half2 *)new_qkv; int qkv_index; if (in_col32) { int row_id = blockIdx.x; int col_id = (blockIdx.y * half_hidden_size + i) * 2; qkv_index = row_major2flat_col32(row_id, col_id, gridDim.x, gridDim.y * half_hidden_size * 2) >> 1; } else { qkv_index = (blockIdx.x * gridDim.y + blockIdx.y) * half_hidden_size + i; } char2 ori_qkv_i2 = p_ori_qkv[qkv_index]; half2 ori_qkv_h2; ori_qkv_h2.x = __float2half(float(ori_qkv_i2.x) * dequant_scale); ori_qkv_h2.y = __float2half(float(ori_qkv_i2.y) * dequant_scale); p_new_qkv[qkv_offset + target_id] = __hadd2(ori_qkv_h2, __ldg(&p_bias[blockIdx.y * half_hidden_size + i])); } } template <typename T> void ker_arrange_encself_qkv_i8I_launcher( int batch_token_num, int hidden_size, cudaStream_t stream, const int8_t *ori_qkv, const T *qkv_bias, T *new_qkv, int max_batch_dim, int batch_seq_len, int dim_per_head, int head_num, int max_thread_per_block, float dequant_scale, bool in_col32) { ker_arrange_encself_qkv_i8I<T> <<<dim3(batch_token_num, 3), max_thread_per_block, 0, stream>>>( ori_qkv, qkv_bias, new_qkv, max_batch_dim, batch_seq_len, dim_per_head, head_num, dequant_scale, in_col32); } template <> void ker_arrange_encself_qkv_i8I_launcher<__half>( int batch_token_num, int hidden_size, cudaStream_t stream, const int8_t *ori_qkv, const __half *qkv_bias, __half *new_qkv, int max_batch_dim, int batch_seq_len, int dim_per_head, int head_num, int max_thread_per_block, float dequant_scale, bool in_col32) { ker_arrange_encself_qkv_i8I<__half> <<<dim3(batch_token_num, 3), max_thread_per_block, 0, stream>>>( ori_qkv, qkv_bias, new_qkv, max_batch_dim / 2, batch_seq_len, dim_per_head / 2, head_num, dequant_scale, in_col32); } template void ker_arrange_encself_qkv_i8I_launcher<float>( int batch_token_num, int hidden_size, cudaStream_t stream, const int8_t *ori_qkv, const float *qkv_bias, float *new_qkv, int max_batch_dim, int batch_seq_len, int dim_per_head, int head_num, int max_thread_per_block, float dequant_scale, bool in_col32); template void ker_arrange_encself_qkv_i8I_launcher<__half>( int batch_token_num, int hidden_size, cudaStream_t stream, const int8_t *ori_qkv, const __half *qkv_bias, __half *new_qkv, int max_batch_dim, int batch_seq_len, int dim_per_head, int head_num, int max_thread_per_block, float dequant_scale, bool in_col32); template <typename T> __global__ void ker_arrange_encself_qkv_i8I_i8O( const int8_t *ori_qkv, const T *qkv_bias, int8_t *new_q, int8_t *new_k, int8_t *new_v, T *d_v, int batch_seq_len, int dim_per_head, int head_num, float dequant_scale, float quant_scale, bool in_col32) { int hidden_size = dim_per_head * head_num; int batch_id = blockIdx.x / batch_seq_len; int token_id = blockIdx.x % batch_seq_len; for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) { int head_id = i / dim_per_head; int dim_id = i % dim_per_head; int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num, batch_seq_len, dim_per_head); int qkv_index; if (in_col32) { int row_id = blockIdx.x; int col_id = blockIdx.y * hidden_size + i; qkv_index = row_major2flat_col32(row_id, col_id, gridDim.x, gridDim.y * hidden_size); } else { qkv_index = (blockIdx.x * gridDim.y + blockIdx.y) * hidden_size + i; } float val = float(ori_qkv[qkv_index]) * dequant_scale + __ldg(&qkv_bias[blockIdx.y * hidden_size + i]); int8_t quant_val = float2int8(val, quant_scale); if (blockIdx.y == 0) { new_q[target_id] = quant_val; } else if (blockIdx.y == 1) { new_k[target_id] = quant_val; } else { new_v[target_id] = quant_val; d_v[target_id] = float(quant_val) / quant_scale; } } } template <> __global__ void ker_arrange_encself_qkv_i8I_i8O<__half>( const int8_t *ori_qkv, const __half *qkv_bias, int8_t *new_q, int8_t *new_k, int8_t *new_v, __half *d_v, int batch_seq_len, int dim_per_head, int head_num, float dequant_scale, float quant_scale, bool in_col32) { int hidden_size = dim_per_head * head_num; int batch_id = blockIdx.x / batch_seq_len; int token_id = blockIdx.x % batch_seq_len; for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) { int head_id = i / dim_per_head; int dim_id = i % dim_per_head; int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num, batch_seq_len, dim_per_head); int qkv_index; if (in_col32) { int row_id = blockIdx.x; int col_id = blockIdx.y * hidden_size + i; qkv_index = row_major2flat_col32(row_id, col_id, gridDim.x, gridDim.y * hidden_size); } else { qkv_index = (blockIdx.x * gridDim.y + blockIdx.y) * hidden_size + i; } float val = float(ori_qkv[qkv_index]) * dequant_scale + __half2float(__ldg(&qkv_bias[blockIdx.y * hidden_size + i])); int8_t quant_val = float2int8(val, quant_scale); if (blockIdx.y == 0) { new_q[target_id] = quant_val; } else if (blockIdx.y == 1) { new_k[target_id] = quant_val; } else { new_v[target_id] = quant_val; d_v[target_id] = __float2half(float(quant_val) / quant_scale); } } } template <typename T> void ker_arrange_encself_qkv_i8I_i8O_launcher( int batch_token_num, int hidden_size, cudaStream_t stream, const int8_t *ori_qkv, const T *qkv_bias, int8_t *new_q, int8_t *new_k, int8_t *new_v, T *d_v, int batch_seq_len, int dim_per_head, int head_num, int max_thread_per_block, float dequant_scale, float quant_scale, bool in_col32) { ker_arrange_encself_qkv_i8I_i8O<T> <<<dim3(batch_token_num, 3), max_thread_per_block, 0, stream>>>( ori_qkv, qkv_bias, new_q, new_k, new_v, d_v, batch_seq_len, dim_per_head, head_num, dequant_scale, quant_scale, in_col32); } template <> void ker_arrange_encself_qkv_i8I_i8O_launcher<__half>( int batch_token_num, int hidden_size, cudaStream_t stream, const int8_t *ori_qkv, const __half *qkv_bias, int8_t *new_q, int8_t *new_k, int8_t *new_v, __half *d_v, int batch_seq_len, int dim_per_head, int head_num, int max_thread_per_block, float dequant_scale, float quant_scale, bool in_col32) { ker_arrange_encself_qkv_i8I_i8O<__half> <<<dim3(batch_token_num, 3), max_thread_per_block, 0, stream>>>( ori_qkv, qkv_bias, new_q, new_k, new_v, d_v, batch_seq_len, dim_per_head, head_num, dequant_scale, quant_scale, in_col32); } template void ker_arrange_encself_qkv_i8I_i8O_launcher<float>( int batch_token_num, int hidden_size, cudaStream_t stream, const int8_t *ori_qkv, const float *qkv_bias, int8_t *new_q, int8_t *new_k, int8_t *new_v, float *d_v, int batch_seq_len, int dim_per_head, int head_num, int max_thread_per_block, float dequant_scale, float quant_scale, bool in_col32); template void ker_arrange_encself_qkv_i8I_i8O_launcher<__half>( int batch_token_num, int hidden_size, cudaStream_t stream, const int8_t *ori_qkv, const __half *qkv_bias, int8_t *new_q, int8_t *new_k, int8_t *new_v, __half *d_v, int batch_seq_len, int dim_per_head, int head_num, int max_thread_per_block, float dequant_scale, float quant_scale, bool in_col32); template <typename T> __global__ void ker_arrange_atten_output_i8O(const T *ori_q, int8_t *new_q, int beam_size, int dim_per_head, int head_num, float quant_scale, bool out_col32) { int hidden_size = dim_per_head * head_num; int batch_id = blockIdx.x / beam_size; // note, for encoder, beam_id is token_id; for decoder, beam_id is beam_id int beam_id = blockIdx.x % beam_size; for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) { int head_id = i / dim_per_head; int dim_id = i % dim_per_head; int out_index; if (out_col32) { int row_id = blockIdx.x; int col_id = i; out_index = row_major2flat_col32(row_id, col_id, gridDim.x, hidden_size); } else { out_index = blockIdx.x * hidden_size + i; } new_q[out_index] = float2int8(ori_q[targetid_4dim(batch_id, head_id, beam_id, dim_id, head_num, beam_size, dim_per_head)], quant_scale); } } template <> __global__ void ker_arrange_atten_output_i8O<__half>( const __half *ori_q, int8_t *new_q, int beam_size, int dim_per_head, int head_num, float quant_scale, bool out_col32) { int batch_id = blockIdx.x / beam_size; // note, for encoder, beam_id is token_id; for decoder, beam_id is beam_id int beam_id = blockIdx.x % beam_size; int half_hidden_size = dim_per_head * head_num; for (std::size_t i = threadIdx.x; i < half_hidden_size; i += blockDim.x) { int head_id = i / dim_per_head; int dim_id = i % dim_per_head; int out_index; if (out_col32) { int row_id = blockIdx.x; int col_id = i * 2; out_index = row_major2flat_col32(row_id, col_id, gridDim.x, half_hidden_size * 2) >> 1; } else { out_index = blockIdx.x * half_hidden_size + i; } const half2 *p_ori_q = (const half2 *)ori_q; half2 v_ori_q; char2 *p_new_q = (char2 *)new_q; char2 v_new_q; v_ori_q = p_ori_q[targetid_4dim(batch_id, head_id, beam_id, dim_id, head_num, beam_size, dim_per_head)]; v_new_q.x = float2int8(float(v_ori_q.x), quant_scale); v_new_q.y = float2int8(float(v_ori_q.y), quant_scale); p_new_q[out_index] = v_new_q; } } template <typename T> void ker_arrange_atten_output_i8O_launcher(int batch_token_num, int hidden_size, cudaStream_t stream, const T *ori_q, int8_t *new_q, int beam_size, int dim_per_head, int head_num, int max_thread_per_block, float quant_scale, bool out_col32) { ker_arrange_atten_output_i8O<T> <<<batch_token_num, max_thread_per_block, 0, stream>>>( ori_q, new_q, beam_size, dim_per_head, head_num, quant_scale, out_col32); } template <> void ker_arrange_atten_output_i8O_launcher<__half>( int batch_token_num, int hidden_size, cudaStream_t stream, const __half *ori_q, int8_t *new_q, int beam_size, int dim_per_head, int head_num, int max_thread_per_block, float quant_scale, bool out_col32) { ker_arrange_atten_output_i8O<__half> <<<batch_token_num, max_thread_per_block, 0, stream>>>( ori_q, new_q, beam_size, dim_per_head / 2, head_num, quant_scale, out_col32); } template void ker_arrange_atten_output_i8O_launcher<float>( int batch_token_num, int hidden_size, cudaStream_t stream, const float *ori_q, int8_t *new_q, int beam_size, int dim_per_head, int head_num, int max_thread_per_block, float quant_scale, bool out_col32); template void ker_arrange_atten_output_i8O_launcher<__half>( int batch_token_num, int hidden_size, cudaStream_t stream, const __half *ori_q, int8_t *new_q, int beam_size, int dim_per_head, int head_num, int max_thread_per_block, float quant_scale, bool out_col32); template <typename T> __global__ void ker_arrange_decself_qkv_i8I_i8O( const int8_t *ori_qkv, const T *qkv_bias, int8_t *new_q, int8_t *new_k, int8_t *new_v, int head_num, int dim_per_head, int max_step, int step_id, float dequant_scale, float quant_scale, bool in_col32) { int hidden_size = dim_per_head * head_num; for (int i = threadIdx.x; i < hidden_size; i += blockDim.x) { int qkv_index; if (in_col32) { int row_id = blockIdx.x; int col_id = blockIdx.y * hidden_size + i; qkv_index = row_major2flat_col32(row_id, col_id, gridDim.x, gridDim.y * hidden_size); } else { qkv_index = (blockIdx.x * gridDim.y + blockIdx.y) * hidden_size + i; } float val = float(ori_qkv[qkv_index]) * dequant_scale + __ldg(&qkv_bias[blockIdx.y * hidden_size + i]); int8_t quant_val = float2int8(val, quant_scale); int seq_id = blockIdx.x; // obvious, seq_id = batch_id * beam_size + beam_id if (blockIdx.y == 0) { // for query new_q[seq_id * hidden_size + i] = quant_val; return; } int head_id = i / dim_per_head; int dim_id = i % dim_per_head; int target_id = targetid_4dim(seq_id, head_id, step_id, dim_id, head_num, max_step, dim_per_head); if (blockIdx.y == 1) { // for key new_k[target_id] = quant_val; } else { // for value new_v[target_id] = quant_val; } } } template <> __global__ void ker_arrange_decself_qkv_i8I_i8O<__half>( const int8_t *ori_qkv, const __half *qkv_bias, int8_t *new_q, int8_t *new_k, int8_t *new_v, int head_num, int dim_per_head, int max_step, int step_id, float dequant_scale, float quant_scale, bool in_col32) { int hidden_size = dim_per_head * head_num; for (int i = threadIdx.x; i < hidden_size; i += blockDim.x) { int qkv_index; if (in_col32) { int row_id = blockIdx.x; int col_id = blockIdx.y * hidden_size + i; qkv_index = row_major2flat_col32(row_id, col_id, gridDim.x, gridDim.y * hidden_size); } else { qkv_index = (blockIdx.x * gridDim.y + blockIdx.y) * hidden_size + i; } float val = float(ori_qkv[qkv_index]) * dequant_scale + __half2float(__ldg(&qkv_bias[blockIdx.y * hidden_size + i])); int8_t quant_val = float2int8(val, quant_scale); int seq_id = blockIdx.x; // obvious, seq_id = batch_id * beam_size + beam_id if (blockIdx.y == 0) { // for query new_q[seq_id * hidden_size + i] = quant_val; return; } int head_id = i / dim_per_head; int dim_id = i % dim_per_head; int target_id = targetid_4dim(seq_id, head_id, step_id, dim_id, head_num, max_step, dim_per_head); if (blockIdx.y == 1) { // for key new_k[target_id] = quant_val; } else { // for value new_v[target_id] = quant_val; } } } template <typename T> void ker_arrange_decself_qkv_i8I_i8O_launcher( int step_token_num, int hidden_size, cudaStream_t stream, const int8_t *ori_qkv, const T *qkv_bias, int8_t *new_q, int8_t *new_k, int8_t *new_v, int head_num, int dim_per_head, int max_step, int step_id, int max_thread_per_block, float dequant_scale, float quant_scale, bool in_col32) { ker_arrange_decself_qkv_i8I_i8O<T> <<<dim3(step_token_num, 3), max_thread_per_block, 0, stream>>>( ori_qkv, qkv_bias, new_q, new_k, new_v, head_num, dim_per_head, max_step, step_id, dequant_scale, quant_scale, in_col32); } // template <> // void ker_arrange_decself_qkv_i8I_i8O_launcher<__half>( // int step_token_num, int hidden_size, cudaStream_t stream, // const int8_t *ori_qkv, const __half *qkv_bias, int8_t *new_q, int8_t // *new_k, int8_t *new_v, int head_num, int dim_per_head, int max_step, int // step_id, int max_thread_per_block, float dequant_scale, float // quant_scale, bool in_col32) { // ker_arrange_decself_qkv_i8I_i8O<__half> // <<<dim3(step_token_num, 3), max_thread_per_block, 0, stream>>>( // ori_qkv, qkv_bias, new_q, new_k, new_v, head_num, dim_per_head, // max_step, step_id, dequant_scale, quant_scale, in_col32); // } template void ker_arrange_decself_qkv_i8I_i8O_launcher<float>( int step_token_num, int hidden_size, cudaStream_t stream, const int8_t *ori_qkv, const float *qkv_bias, int8_t *new_q, int8_t *new_k, int8_t *new_v, int head_num, int dim_per_head, int max_step, int step_id, int max_thread_per_block, float dequant_scale, float quant_scale, bool in_col32); template void ker_arrange_decself_qkv_i8I_i8O_launcher<__half>( int step_token_num, int hidden_size, cudaStream_t stream, const int8_t *ori_qkv, const __half *qkv_bias, int8_t *new_q, int8_t *new_k, int8_t *new_v, int head_num, int dim_per_head, int max_step, int step_id, int max_thread_per_block, float dequant_scale, float quant_scale, bool in_col32); /** @brief: ker_fuse_softmax_new_value_i32I_i8O fused query-key correlation softmax and new_value for decoder self attention @thread gridDim.x = batch_size * beam_size * head_num blockDim.x = first multiple of WARP_SIZE greater than cur_step + 1 @param correlation: [batch_size, beam_size, head_num, cur_step + 1] */ __global__ void ker_fuse_softmax_new_value_i32I_i8O( const int32_t *logits, const int8_t *v, int8_t *new_v, int step_num, int max_step, int head_num, int dim_per_head, float attn_scale, float dequant_scale, float quant_scale, bool out_col32) { int idx = blockIdx.x * max_step + threadIdx.x; float val = threadIdx.x < step_num ? float(logits[idx]) * dequant_scale * dequant_scale * attn_scale : CUDA_FLOAT_INF_NEG; float max_val = blockReduceMax(val); __shared__ float smax; if (threadIdx.x == 0) smax = max_val; __syncthreads(); val = threadIdx.x < step_num ? expf(val - smax) : 0; float rsum = blockReduceSum(val); __shared__ float ssum; if (threadIdx.x == 0) ssum = rsum; __syncthreads(); extern __shared__ float block_new_value[]; float *step_probs = &block_new_value[dim_per_head]; if (threadIdx.x < step_num) step_probs[threadIdx.x] = val / ssum; __syncthreads(); for (int i = threadIdx.x, end = step_num * dim_per_head; i < end; i += blockDim.x) { int value_idx = blockIdx.x * max_step * dim_per_head + i; int step_idx = i / dim_per_head; int dim_idx = i % dim_per_head; if (step_idx == 0) { block_new_value[dim_idx] = 0; } atomicAdd(&block_new_value[dim_idx], float(v[value_idx]) * step_probs[step_idx] * dequant_scale); } __syncthreads(); for (int i = threadIdx.x, end = dim_per_head; i < end; i += blockDim.x) { int row = blockIdx.x / head_num; int head_idx = blockIdx.x % head_num; int row_size = gridDim.x / head_num; int col = head_idx * dim_per_head + i; int col_size = head_num * dim_per_head; int new_v_idx = row * col_size + col; if (out_col32) { new_v_idx = row_major2flat_col32(row, col, row_size, col_size); } new_v[new_v_idx] = float2int8(block_new_value[i], quant_scale); } } void ker_fuse_softmax_new_value_i32I_i8O_launcher( const int32_t *correlation, const int8_t *v, int8_t *new_v, int batch_head_num, int step_num, int max_step, int head_num, int dim_per_head, float attn_scale, float dequant_scale, float quant_scale, bool out_col32, cudaStream_t stream) { int block_dim = step_num; if (step_num < 1024) { block_dim = (step_num + 31) >> 5; block_dim *= 32; } ker_fuse_softmax_new_value_i32I_i8O<<< batch_head_num, block_dim, dim_per_head * sizeof(float) + step_num * sizeof(float), stream>>>( correlation, v, new_v, step_num, max_step, head_num, dim_per_head, attn_scale, dequant_scale, quant_scale, out_col32); } template <typename T> __global__ void ker_arrange_encdec_q_i8I(const int8_t *ori_q, const T *q_bias, T *new_q, int beam_size, int dim_per_head, int head_num, float dequant_scale, bool in_col32) { int hidden_size = dim_per_head * head_num; for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) { int qkv_index; if (in_col32) { int row_id = blockIdx.x; int col_id = i; qkv_index = row_major2flat_col32(row_id, col_id, gridDim.x, hidden_size); } else { qkv_index = blockIdx.x * hidden_size + i; } T val = float(ori_q[qkv_index]) * dequant_scale + __ldg(&q_bias[i]); int batch_id = blockIdx.x / beam_size; int beam_id = blockIdx.x % beam_size; int head_id = i / dim_per_head; int dim_id = i % dim_per_head; new_q[targetid_4dim(batch_id, head_id, beam_id, dim_id, head_num, beam_size, dim_per_head)] = val; } } template <> __global__ void ker_arrange_encdec_q_i8I<__half>( const int8_t *ori_q, const __half *q_bias, __half *new_q, int beam_size, int dim_per_head, int head_num, float dequant_scale, bool in_col32) { int half_hidden_size = dim_per_head * head_num; const char2 *p_q = reinterpret_cast<const char2 *>(ori_q); for (std::size_t i = threadIdx.x; i < half_hidden_size; i += blockDim.x) { int qkv_index; if (in_col32) { int row_id = blockIdx.x; int col_id = i * 2; qkv_index = row_major2flat_col32(row_id, col_id, gridDim.x, half_hidden_size * 2) >> 1; } else { qkv_index = blockIdx.x * half_hidden_size + i; } char2 p_q_i2 = p_q[qkv_index]; half2 p_q_h2; p_q_h2.x = __float2half(float(p_q_i2.x) * dequant_scale); p_q_h2.y = __float2half(float(p_q_i2.y) * dequant_scale); const half2 *p_bias = (const half2 *)q_bias; half2 val = __hadd2(p_q_h2, __ldg(&p_bias[i])); int batch_id = blockIdx.x / beam_size; int beam_id = blockIdx.x % beam_size; int head_id = i / dim_per_head; int dim_id = i % dim_per_head; ((half2 *)new_q)[targetid_4dim(batch_id, head_id, beam_id, dim_id, head_num, beam_size, dim_per_head)] = val; } } template <typename T> void ker_arrange_encdec_q_i8I_launcher(int step_token_num, int hidden_size, cudaStream_t stream, const int8_t *ori_q, const T *q_bias, T *new_q, int beam_size, int dim_per_head, int head_num, int max_thread_per_block, float dequant_scale, bool in_col32) { ker_arrange_encdec_q_i8I<T> <<<step_token_num, max_thread_per_block, 0, stream>>>( ori_q, q_bias, new_q, beam_size, dim_per_head, head_num, dequant_scale, in_col32); } template <> void ker_arrange_encdec_q_i8I_launcher<__half>( int step_token_num, int hidden_size, cudaStream_t stream, const int8_t *ori_q, const __half *q_bias, __half *new_q, int beam_size, int dim_per_head, int head_num, int max_thread_per_block, float dequant_scale, bool in_col32) { ker_arrange_encdec_q_i8I<__half> <<<step_token_num, max_thread_per_block, 0, stream>>>( ori_q, q_bias, new_q, beam_size, dim_per_head / 2, head_num, dequant_scale, in_col32); } template void ker_arrange_encdec_q_i8I_launcher<float>( int step_token_num, int hidden_size, cudaStream_t stream, const int8_t *ori_q, const float *q_bias, float *new_q, int beam_size, int dim_per_head, int head_num, int max_thread_per_block, float dequant_scale, bool in_col32); template void ker_arrange_encdec_q_i8I_launcher<__half>( int step_token_num, int hidden_size, cudaStream_t stream, const int8_t *ori_q, const __half *q_bias, __half *new_q, int beam_size, int dim_per_head, int head_num, int max_thread_per_block, float dequant_scale, bool in_col32); template <typename T, int beam_size> __global__ void select_beam_rough_topk_i8I( const int8_t *logits, const T *logit_bias, const float *seq_probs, const float *seq_score, const int *alive_seq, float dequant_scale, int *can_idx, float *can_score, int *num_beam_can, int vocab_size, int max_step, float length_norm, int cur_step, float diverse_lambda, int end_id, bool in_col32) { if (cur_step != 0 && alive_seq[blockIdx.x * max_step + cur_step] == end_id) { // this is a finished beam if (threadIdx.x == 0) { num_beam_can[blockIdx.x + 1] = 1; // generate one candidate int pos = atomicAdd(num_beam_can, 1); // get a candidate pos if (diverse_lambda == 0) { can_score[pos] = seq_score[blockIdx.x]; // this beam's score will not be change } else { // add the beam id offset in score to sort in each beam int batch_id = blockIdx.x / beam_size; can_score[pos] = seq_score[blockIdx.x] + (blockIdx.x - batch_id) * min_log_probability; } can_idx[pos] = end_id + (blockIdx.x % beam_size) * vocab_size; // EOS } return; } /* step1: compute each thread's max_logit and sum_exp_logit, store in * rough_top_kth_logit, sum_exp_logit */ const int block_start = blockIdx.x * vocab_size; const int left_idx = block_start + threadIdx.x; const int right_idx = (blockIdx.x + 1) * vocab_size; float rough_top_kth_logit = CUDA_FLOAT_INF_NEG; float sum_exp_logit = 0; for (int i = left_idx; i < right_idx; i += blockDim.x) { int logits_idx; if (in_col32) { int row_id = blockIdx.x; int col_id = i - block_start; logits_idx = row_major2flat_col32(row_id, col_id, gridDim.x, vocab_size); } else { logits_idx = i; } float lgt = (float)logits[logits_idx] * dequant_scale + (float)__ldg(&logit_bias[i - block_start]); rough_top_kth_logit = fmaxf(rough_top_kth_logit, lgt); } float max_logit = blockReduceMax(rough_top_kth_logit); __shared__ float s_max_logit; if (threadIdx.x == 0) { s_max_logit = max_logit; } __syncthreads(); for (int i = left_idx; i < right_idx; i += blockDim.x) { int logits_idx; if (in_col32) { int row_id = blockIdx.x; int col_id = i - block_start; logits_idx = row_major2flat_col32(row_id, col_id, gridDim.x, vocab_size); } else { logits_idx = i; } float lgt = fmaxf((float)(logits[logits_idx]) * dequant_scale + (float)__ldg(&logit_bias[i - block_start]) - s_max_logit, logit_thresh_min); sum_exp_logit += expf(lgt); } /* step2: compute rough top-kth-logits and sum_exp_logit among the whole beam, saved into s_topk and s_log_prob_base */ __shared__ float s_log_prob_base; // prefix sequence log prob - log_sum_exp_logit __shared__ float s_topk; // rough top k-th value of logits __shared__ int num_cur_beam_can; // candidate number for this beam sum_exp_logit = blockReduceSum(sum_exp_logit); rough_top_kth_logit = blockRoughTopK<float, beam_size>(rough_top_kth_logit); if (threadIdx.x == 0) { s_log_prob_base = seq_probs[blockIdx.x] - logf(sum_exp_logit) - s_max_logit; s_topk = rough_top_kth_logit; num_cur_beam_can = 0; } /* step3 : select the candidate token with logits bigger than s_topk, compute the seq probability ended with them, save the probability, token_index, selected token number. */ int idx = left_idx; int batch_id = blockIdx.x / beam_size; int batch_start_pos = batch_id * beam_size * vocab_size; // int unk_vocab_id = vocab_size - 3; // last three element: unk, start, // eos __shared__ int l_n; // current iteration candidate number for (int iter = 0; iter < (vocab_size + blockDim.x - 1) / blockDim.x; iter++) { // zero the counter if (threadIdx.x == 0) l_n = 0; __syncthreads(); float lgt = CUDA_FLOAT_INF_NEG - 1.f; // min s_topk is CUDA_FLOAT_INF_NEG int pos; int vocab_id = idx - block_start; int logits_idx; if (in_col32) { int row_id = blockIdx.x; int col_id = vocab_id; logits_idx = row_major2flat_col32(row_id, col_id, gridDim.x, vocab_size); } else { logits_idx = idx; } // if ((vocab_id < vocab_size) && (vocab_id != unk_vocab_id)) { if (vocab_id < vocab_size) { lgt = (float)(logits[logits_idx]) * dequant_scale + (float)__ldg(&logit_bias[vocab_id]); if (lgt >= s_topk) // pos: relative pos inside this iteration pos = atomicAdd(&l_n, 1); } __syncthreads(); // leader increments the global counter if (threadIdx.x == 0) { atomicAdd(&num_cur_beam_can, l_n); l_n = atomicAdd(num_beam_can, l_n); } __syncthreads(); // threads with true predicates write their elements if ((lgt >= s_topk)) { pos += l_n; // increment local pos by global counter if (diverse_lambda == 0) { can_score[pos] = fmaxf((lgt + s_log_prob_base) * length_norm, min_log_probability + 1.f) + batch_id * min_log_probability; } else { can_score[pos] = fmaxf((lgt + s_log_prob_base) * length_norm, min_log_probability + 1.f) + blockIdx.x * min_log_probability; } can_idx[pos] = idx - batch_start_pos; } __syncthreads(); idx += blockDim.x; } if (threadIdx.x == 0) { num_beam_can[blockIdx.x + 1] = num_cur_beam_can; } } template <typename T> void select_beam_rough_topk_i8I_launcher( const int8_t *logits, const T *logit_bias, const float *seq_probs, const float *seq_score, const int *alive_seq, float dequant_scale, int *can_idx, float *can_score, int *num_beam_can, int vocab_size, int max_step, float length_norm, int cur_step, int step_token_num, int max_thread_per_block, cudaStream_t stream, int beam_size, float diverse_lambda, int end_id, bool in_col32) { if (beam_size == 1) select_beam_rough_topk_i8I<T, 1> <<<step_token_num, max_thread_per_block, 0, stream>>>( logits, logit_bias, seq_probs, seq_score, alive_seq, dequant_scale, can_idx, can_score, num_beam_can, vocab_size, max_step, length_norm, cur_step, diverse_lambda, end_id, in_col32); if (beam_size == 2) select_beam_rough_topk_i8I<T, 2> <<<step_token_num, max_thread_per_block, 0, stream>>>( logits, logit_bias, seq_probs, seq_score, alive_seq, dequant_scale, can_idx, can_score, num_beam_can, vocab_size, max_step, length_norm, cur_step, diverse_lambda, end_id, in_col32); if (beam_size == 4) select_beam_rough_topk_i8I<T, 4> <<<step_token_num, max_thread_per_block, 0, stream>>>( logits, logit_bias, seq_probs, seq_score, alive_seq, dequant_scale, can_idx, can_score, num_beam_can, vocab_size, max_step, length_norm, cur_step, diverse_lambda, end_id, in_col32); if (beam_size == 8) select_beam_rough_topk_i8I<T, 8> <<<step_token_num, max_thread_per_block, 0, stream>>>( logits, logit_bias, seq_probs, seq_score, alive_seq, dequant_scale, can_idx, can_score, num_beam_can, vocab_size, max_step, length_norm, cur_step, diverse_lambda, end_id, in_col32); if (beam_size == 16) select_beam_rough_topk_i8I<T, 16> <<<step_token_num, max_thread_per_block, 0, stream>>>( logits, logit_bias, seq_probs, seq_score, alive_seq, dequant_scale, can_idx, can_score, num_beam_can, vocab_size, max_step, length_norm, cur_step, diverse_lambda, end_id, in_col32); if (beam_size == 32) select_beam_rough_topk_i8I<T, 32> <<<step_token_num, max_thread_per_block, 0, stream>>>( logits, logit_bias, seq_probs, seq_score, alive_seq, dequant_scale, can_idx, can_score, num_beam_can, vocab_size, max_step, length_norm, cur_step, diverse_lambda, end_id, in_col32); } template void select_beam_rough_topk_i8I_launcher<float>( const int8_t *logits, const float *logit_bias, const float *seq_probs, const float *seq_score, const int *alive_seq, float dequant_scale, int *can_idx, float *can_score, int *num_beam_can, int vocab_size, int max_step, float length_norm, int cur_step, int step_token_num, int max_thread_per_block, cudaStream_t stream, int beam_size, float diverse_lambda, int end_id, bool in_col32); template void select_beam_rough_topk_i8I_launcher<__half>( const int8_t *logits, const __half *logit_bias, const float *seq_probs, const float *seq_score, const int *alive_seq, float dequant_scale, int *can_idx, float *can_score, int *num_beam_can, int vocab_size, int max_step, float length_norm, int cur_step, int step_token_num, int max_thread_per_block, cudaStream_t stream, int beam_size, float diverse_lambda, int end_id, bool in_col32); template <typename T, int k> __global__ void ker_topk_sample_i8I(const int8_t *logits, const T *logit_bias, int *old_input_ids, int *new_input_ids, const int vocab_size, const int max_step, const int batch_seq_len, int logits_seq_len, int *unfinished, curandState *curandstate, int eos_id, float dequant_scale, bool in_col32) { int last_token_idx_in_batch = blockIdx.x * max_step + batch_seq_len - 1; /* add EOS to end if last token is EOS */ if (batch_seq_len > 1 && old_input_ids[last_token_idx_in_batch] == eos_id) { if (threadIdx.x == 0) { old_input_ids[last_token_idx_in_batch + 1] = eos_id; } return; } int logits_token_idx_in_batch = blockIdx.x * logits_seq_len + logits_seq_len - 1; int left_logit_idx = logits_token_idx_in_batch * vocab_size + threadIdx.x; int right_logit_idx = (logits_token_idx_in_batch + 1) * vocab_size; /* step1. find max logit and rough Kth logit over the whole vocab */ __shared__ float s_max_logit, s_topk_logit; float rough_top_kth_logit = CUDA_FLOAT_INF_NEG; for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) { int logits_idx; if (in_col32) { int row_id = logits_token_idx_in_batch; int col_id = idx - logits_token_idx_in_batch * vocab_size; logits_idx = row_major2flat_col32(row_id, col_id, gridDim.x * logits_seq_len, vocab_size); } else { logits_idx = idx; } rough_top_kth_logit = fmaxf( rough_top_kth_logit, (float)(logits[logits_idx]) * dequant_scale + (float)__ldg(&logit_bias[idx - left_logit_idx + threadIdx.x])); } float max_logit = blockReduceMax(rough_top_kth_logit); rough_top_kth_logit = blockRoughTopK<float, k>(rough_top_kth_logit); if (threadIdx.x == 0) { s_topk_logit = rough_top_kth_logit; s_max_logit = max_logit; } __syncthreads(); __shared__ int s_tid; if (k != 1) { /* step2 hold one logit per thread which larger than Kth logit and sample * from them */ float topk_exp_sum, topk_exp = CUDA_FLOAT_INF_NEG; int topk_tid = vocab_size; // int test_num = 0; __shared__ float s_topk_exp_sum; for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) { int logits_idx; if (in_col32) { int row_id = logits_token_idx_in_batch; int col_id = idx - logits_token_idx_in_batch * vocab_size; logits_idx = row_major2flat_col32( row_id, col_id, gridDim.x * logits_seq_len, vocab_size); } else { logits_idx = idx; } float logit = (float)logits[logits_idx] * dequant_scale + (float)__ldg(&logit_bias[idx - left_logit_idx + threadIdx.x]); float logit_exp = expf(fmaxf(logit - s_max_logit, logit_thresh_min)); // if (logit >= s_topk_logit) test_num++; if (logit >= s_topk_logit && logit_exp > topk_exp) { topk_exp = logit_exp; topk_tid = idx - left_logit_idx + threadIdx.x; } } // test_num = blockReduceSum(test_num); // __shared__ int s_test_num; // if (threadIdx.x == 0) { // s_test_num = test_num; // if (s_test_num != 1) printf("sample from top %d\n", s_test_num); // // printf("sample from top %s", test_num); // } // __syncthreads(); if (topk_tid == vocab_size) topk_exp = 0; topk_exp_sum = blockReduceSum(topk_exp); if (threadIdx.x == 0) { s_topk_exp_sum = topk_exp_sum; } __syncthreads(); /* calculate cumulative probability */ float topk_prob = topk_exp / s_topk_exp_sum; float prefix_sum_prob; typedef cub::BlockScan<float, 1024> BlockScan; __shared__ typename BlockScan::TempStorage temp_storage; BlockScan(temp_storage).InclusiveSum(topk_prob, prefix_sum_prob); __shared__ float random_x; if (threadIdx.x == 0) { random_x = curand_uniform(curandstate + blockIdx.x); } __syncthreads(); if (threadIdx.x == 0) { s_tid = vocab_size; } __syncthreads(); int threadID = threadIdx.x; __shared__ int s_threadID; __shared__ float s_max_prob; if (random_x > prefix_sum_prob) threadID = blockDim.x; threadID = blockReduceMin(threadID); float max_prob = blockReduceMax(topk_prob); if (threadIdx.x == 0) { s_threadID = threadID; s_max_prob = max_prob; } __syncthreads(); if (threadIdx.x == s_threadID) { s_tid = topk_tid; } __syncthreads(); if (s_tid == vocab_size && topk_prob == s_max_prob) { s_tid = topk_tid; } __syncthreads(); } else { s_tid = vocab_size; for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) { int logits_idx; if (in_col32) { int row_id = logits_token_idx_in_batch; int col_id = idx - logits_token_idx_in_batch * vocab_size; logits_idx = row_major2flat_col32( row_id, col_id, gridDim.x * logits_seq_len, vocab_size); } else { logits_idx = idx; } float logit = (float)logits[logits_idx] * dequant_scale + (float)__ldg(&logit_bias[idx - left_logit_idx + threadIdx.x]); if (logit == s_max_logit) { s_tid = idx - left_logit_idx + threadIdx.x; } } __syncthreads(); } /* if new sampled tid is not EOS, set unfinish TRUE */ if (threadIdx.x == 0) { if (s_tid != eos_id) unfinished[0] = 1; } /* step3 write back new sampled ids */ if (threadIdx.x == 0) { old_input_ids[last_token_idx_in_batch + 1] = s_tid; } } template <typename T> void ker_topk_sample_i8I_launcher( int batch_size, int batch_seq_len, const int max_step, int logits_seq_len, int max_thread_per_block, cudaStream_t stream, const int8_t *logits, const T *logit_bias, int *old_input_ids, int *new_input_ids, const int vocab_size, const int k, int *unfinished, curandState *curandstate, int eos_id, float dequant_scale, bool in_col32) { if (k == 1) ker_topk_sample_i8I<T, 1><<<batch_size, max_thread_per_block, 0, stream>>>( logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step, batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id, dequant_scale, in_col32); else if (k == 2) ker_topk_sample_i8I<T, 2><<<batch_size, max_thread_per_block, 0, stream>>>( logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step, batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id, dequant_scale, in_col32); else if (k == 4) ker_topk_sample_i8I<T, 4><<<batch_size, max_thread_per_block, 0, stream>>>( logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step, batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id, dequant_scale, in_col32); else if (k == 8) ker_topk_sample_i8I<T, 8><<<batch_size, max_thread_per_block, 0, stream>>>( logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step, batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id, dequant_scale, in_col32); else if (k == 16) ker_topk_sample_i8I<T, 16><<<batch_size, max_thread_per_block, 0, stream>>>( logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step, batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id, dequant_scale, in_col32); else if (k == 32) ker_topk_sample_i8I<T, 32><<<batch_size, max_thread_per_block, 0, stream>>>( logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step, batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id, dequant_scale, in_col32); else { throw std::invalid_argument("topk argument should be in [1,2,4,8,16,32]"); } } template void ker_topk_sample_i8I_launcher<float>( int batch_size, int batch_seq_len, const int max_step, int logits_seq_len, int max_thread_per_block, cudaStream_t stream, const int8_t *logits, const float *logit_bias, int *old_input_ids, int *new_input_idx, const int vocab_size, const int k, int *unfinished, curandState *curandstate, int eos_id, float dequant_scale, bool in_col32); template void ker_topk_sample_i8I_launcher<__half>( int batch_size, int batch_seq_len, const int max_step, int logits_seq_len, int max_thread_per_block, cudaStream_t stream, const int8_t *logits, const __half *logit_bias, int *old_input_ids, int *new_input_idx, const int vocab_size, const int k, int *unfinished, curandState *curandstate, int eos_id, float dequant_scale, bool in_col32); template <typename T> __global__ void ker_topp_sample_i8I(const int8_t *logits, const T *logit_bias, int *old_input_ids, int *new_input_ids, const int vocab_size, const int max_step, const int batch_seq_len, int logits_seq_len, int *unfinished, float p, curandState *curandstate, int eos_id, float dequant_scale, bool in_col32) { int token_idx_in_batch = blockIdx.x * max_step + batch_seq_len - 1; /* add EOS to end if last token is EOS */ if (batch_seq_len > 1 && old_input_ids[token_idx_in_batch] == eos_id) { if (threadIdx.x == 0) { old_input_ids[token_idx_in_batch + 1] = eos_id; } return; } int logits_token_idx_in_batch = blockIdx.x * logits_seq_len + logits_seq_len - 1; int left_logit_idx = logits_token_idx_in_batch * vocab_size + threadIdx.x; int right_logit_idx = (logits_token_idx_in_batch + 1) * vocab_size; /* step1. find max logit in each thread and sample from these probs with * nucleus sampling */ __shared__ float s_max_logit; float max_logit = CUDA_FLOAT_INF_NEG; for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) { int logits_idx; if (in_col32) { int row_id = logits_token_idx_in_batch; int col_id = idx - logits_token_idx_in_batch * vocab_size; logits_idx = row_major2flat_col32(row_id, col_id, gridDim.x * logits_seq_len, vocab_size); } else { logits_idx = idx; } max_logit = fmaxf(max_logit, (float)logits[logits_idx] * dequant_scale) + (float)__ldg(&logit_bias[idx - left_logit_idx + threadIdx.x]); } float max_logit_array[1]; max_logit_array[0] = max_logit; typedef cub::BlockRadixSort<float, 1024, 1> BlockRadixSort; __shared__ typename BlockRadixSort::TempStorage sort_temp_storage; BlockRadixSort(sort_temp_storage).SortDescending(max_logit_array); float presum_max_logit_exp; max_logit = max_logit_array[0]; float block_max_logit = blockReduceMax(max_logit); if (threadIdx.x == 0) { s_max_logit = block_max_logit; } __syncthreads(); float biased_logit_exp = expf(fmaxf(max_logit - s_max_logit, logit_thresh_min)); typedef cub::BlockScan<float, 1024> BlockScan; __shared__ typename BlockScan::TempStorage presum_temp_storage; BlockScan(presum_temp_storage) .InclusiveSum(biased_logit_exp, presum_max_logit_exp); float topp_exp_threshold; if (threadIdx.x == blockDim.x - 1) { topp_exp_threshold = p * presum_max_logit_exp; } __shared__ float s_presum_logit_exp_threshold; if (presum_max_logit_exp > topp_exp_threshold) { presum_max_logit_exp = CUDA_FLOAT_INF_NEG; } float logit_exp_threshold = blockReduceMax(presum_max_logit_exp); if (threadIdx.x == 0) { s_presum_logit_exp_threshold = logit_exp_threshold; } __syncthreads(); __shared__ float s_logit_threshold; if (presum_max_logit_exp == s_presum_logit_exp_threshold) { s_logit_threshold = max_logit; } __syncthreads(); /* step2 hold one logit per thread which larger than Kth logit and sample * from them */ float topk_exp_sum, topk_exp = CUDA_FLOAT_INF_NEG; int topk_tid = vocab_size; int test_num = 0; __shared__ float s_topk_exp_sum; for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) { int logits_idx; if (in_col32) { int row_id = logits_token_idx_in_batch; int col_id = idx - logits_token_idx_in_batch * vocab_size; logits_idx = row_major2flat_col32(row_id, col_id, gridDim.x * logits_seq_len, vocab_size); } else { logits_idx = idx; } float logit = (float)logits[logits_idx] * dequant_scale + (float)__ldg(&logit_bias[idx - left_logit_idx + threadIdx.x]); float logit_exp = expf(fmaxf(logit - s_max_logit, logit_thresh_min)); if (logit >= s_logit_threshold) test_num++; if (logit >= s_logit_threshold && logit_exp > topk_exp) { topk_exp = logit_exp; topk_tid = idx - left_logit_idx + threadIdx.x; } } test_num = blockReduceSum(test_num); if (topk_tid == vocab_size) topk_exp = 0; topk_exp_sum = blockReduceSum(topk_exp); if (threadIdx.x == 0) { s_topk_exp_sum = topk_exp_sum; } __syncthreads(); /* calculate cumulative probability */ float topk_prob = topk_exp / s_topk_exp_sum; float prefix_sum_prob; BlockScan(presum_temp_storage).InclusiveSum(topk_prob, prefix_sum_prob); __shared__ float random_x; if (threadIdx.x == 0) { random_x = curand_uniform(curandstate + blockIdx.x); } __syncthreads(); __shared__ int s_tid; if (threadIdx.x == 0) { s_tid = vocab_size; } __syncthreads(); int threadID = threadIdx.x; __shared__ int s_threadID; __shared__ float s_max_prob; if (random_x > prefix_sum_prob) threadID = blockDim.x; threadID = blockReduceMin(threadID); float max_prob = blockReduceMax(topk_prob); if (threadIdx.x == 0) { s_threadID = threadID; s_max_prob = max_prob; } __syncthreads(); if (threadIdx.x == s_threadID) { s_tid = topk_tid; } __syncthreads(); if (s_tid == vocab_size && topk_prob == s_max_prob) { s_tid = topk_tid; } __syncthreads(); /* if new sampled tid is not EOS, set unfinish TRUE */ if (threadIdx.x == 0) { if (s_tid != eos_id) unfinished[0] = 1; } /* step3 write back new sampled ids */ if (threadIdx.x == 0) { old_input_ids[token_idx_in_batch + 1] = s_tid; } } template <typename T> void ker_topp_sample_i8I_launcher( int batch_size, int batch_seq_len, const int max_step, int logits_seq_len, int max_thread_per_block, cudaStream_t stream, const int8_t *logits, const T *logit_bias, int *old_input_ids, int *new_input_ids, const int vocab_size, const float p, int *unfinished, curandState *curandstate, int eos_id, float dequant_scale, bool in_col32) { ker_topp_sample_i8I<T><<<batch_size, max_thread_per_block, 0, stream>>>( logits, logit_bias, old_input_ids, new_input_ids, vocab_size, max_step, batch_seq_len, logits_seq_len, unfinished, p, curandstate, eos_id, dequant_scale, in_col32); } template void ker_topp_sample_i8I_launcher<float>( int batch_size, int batch_seq_len, const int max_step, int logits_seq_len, int max_thread_per_block, cudaStream_t stream, const int8_t *logits, const float *logit_bias, int *old_input_ids, int *new_input_idx, const int vocab_size, const float p, int *unfinished, curandState *curandstate, int eos_id, float dequant_scale, bool in_col32); template void ker_topp_sample_i8I_launcher<__half>( int batch_size, int batch_seq_len, const int max_step, int logits_seq_len, int max_thread_per_block, cudaStream_t stream, const int8_t *logits, const __half *logit_bias, int *old_input_ids, int *new_input_idx, const int vocab_size, const float p, int *unfinished, curandState *curandstate, int eos_id, float dequant_scale, bool in_col32); } // namespace cuda } // namespace lightseq
the_stack
#include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/warp_by_grid.hpp> #include <nbla/cuda/utils/atomic_add.cuh> #include <nbla/cuda/utils/nd_index.cuh> #include <nbla/variable.hpp> namespace nbla { template <typename T, bool align_corners = false> __forceinline__ __device__ T unnormalize_grid_with(T s, const int S) { if (align_corners) { // [-1, 1] <--> [0, S - 1] return (s + T(1)) * (S - T(1)) / T(2); } else { // [-1, 1] <--> [-0.5, S - 0.5] = [0 - 0.5, S - 1 + 0.5] return ((s + T(1)) * S - T(1)) / T(2); } } template <typename T> __forceinline__ __device__ T get_src_findex_with_zero_pad(const T s, const int S) { return s; } template <typename T> __forceinline__ __device__ T get_src_findex_with_repeat_pad(const T s, const int S) { if (s < 0) { return 0; } else if (s > S - 1) { return S - 1; } else { return s; } } template <typename T> __forceinline__ __device__ T reflect(const T s, const int L, const int U) { auto len = (U - L); if (s < L) { auto d = L - s; auto nf = d / len; auto n = static_cast<int>(nf); auto r = d - n * len; if (n % 2 == 0) { return L + r; } else { return U - r; } } else if (s > U) { auto d = s - U; auto nf = d / len; auto n = static_cast<int>(nf); auto r = d - n * len; if (n % 2 == 0) { return U - r; } else { return L + r; } } else { return s; } } template <typename T, bool align_corners = false> __forceinline__ __device__ T get_src_findex_with_reflect_pad(T s, const int S) { if (align_corners) { return reflect(s, T(0), T(S - 1)); } else { // address the borders {-0.5, S - 0.5} condition by two multiplication auto sf = reflect(T(2) * s, T(-1), T(2) * T(S) - T(1)); sf = sf * T(0.5); sf = get_src_findex_with_repeat_pad(sf, S); return sf; } } template <typename T> __forceinline__ __device__ T get_grad_coef_with_zero_pad(const T s, const int S) { return T(1); } template <typename T> __forceinline__ __device__ T get_grad_coef_with_repeat_pad(const T s, const int S) { if (s <= 0) { return 0; } else if (s >= S - 1) { return 0; } else { return 1; } } template <typename T> __forceinline__ __device__ T reflect_coef(const T s, const int L, const int U) { auto len = (U - L); if (s < L) { auto d = L - s; auto nf = d / len; auto n = static_cast<int>(nf); if (n % 2 == 0) { return T(-1); } else { return T(1); } } else if (s > U) { auto d = s - U; auto nf = d / len; auto n = static_cast<int>(nf); if (n % 2 == 0) { return T(-1); } else { return T(1); } } else { return T(1); } } template <typename T, bool align_corners = false> __forceinline__ __device__ T get_grad_coef_with_reflect_pad(T s, const int S) { if (align_corners) { return reflect_coef(s, T(0), T(S - 1)); } else { // address the borders {-0.5, S - 0.5} condition by two multiplication auto coef = reflect_coef(T(2) * s, T(-1), T(2) * T(S) - T(1)); auto sf = reflect(T(2) * s, T(-1), T(2) * T(S) - T(1)); sf = sf * T(0.5); coef *= get_grad_coef_with_repeat_pad(sf, S); return coef; } } template <typename T, bool channel_last = false> __forceinline__ __device__ T get_pixel_value_2d(const T *input, int b, int c, int h, int w, const int H, const int W, const int2 istride, const int iisize) { if ((h >= 0 && h < H) && (w >= 0 && w < W)) { auto ind_index = channel_last ? make_int3(h, w, c) : make_int3(c, h, w); auto iidx = device_3d_to_flat(ind_index, istride); auto b_iidx = iidx + b * iisize; return input[b_iidx]; } else { return T(0); } } template <typename T, bool channel_last = false> __forceinline__ __device__ T get_pixel_value_3d(const T *input, int b, int c, int d, int h, int w, const int D, const int H, const int W, const int3 istride, const int iisize) { if ((d >= 0 && d < D) && (h >= 0 && h < H) && (w >= 0 && w < W)) { auto ind_index = channel_last ? make_int4(d, h, w, c) : make_int4(c, d, h, w); auto iidx = device_4d_to_flat(ind_index, istride); auto b_iidx = iidx + b * iisize; return input[b_iidx]; } else { return T(0); } } template <typename T, bool channel_last = false> __forceinline__ __device__ void backward_data_2d(T *igrad, const T ograd, const T p, const T q, int b, int c, int h, int w, const int H, const int W, const int2 istride, const int iisize) { if ((h >= 0 && h < H) && (w >= 0 && w < W)) { auto ind_index = channel_last ? make_int3(h, w, c) : make_int3(c, h, w); auto iidx = device_3d_to_flat(ind_index, istride); auto b_iidx = iidx + b * iisize; atomic_add(igrad + b_iidx, ograd * p * q); } } template <typename T, bool channel_last = false> __forceinline__ __device__ void backward_data_3d(T *igrad, const T ograd, const T p, const T q, const T r, int b, int c, int d, int h, int w, const int D, const int H, const int W, const int3 istride, const int iisize) { if ((d >= 0 && d < D) && (h >= 0 && h < H) && (w >= 0 && w < W)) { auto ind_index = channel_last ? make_int4(d, h, w, c) : make_int4(c, d, h, w); auto iidx = device_4d_to_flat(ind_index, istride); auto b_iidx = iidx + b * iisize; atomic_add(igrad + b_iidx, ograd * p * q * r); } } /* Forward implementations */ template <typename T, warp_by_grid::PADDING_MODE padding_mode = warp_by_grid::PADDING_MODE::zero, bool align_corners = false, bool channel_last = false> __global__ void kernel_warp_linear_forward_2d( const int oisize, const int iisize, const int gisize, T *output, const T *input, const T *grid, const int3 ishape, const int2 istride, const int2 gstride, const int2 ostride, const int B) { auto Hi = channel_last ? ishape.x : ishape.y; auto Wi = channel_last ? ishape.y : ishape.z; auto unnormalize_grid = align_corners ? unnormalize_grid_with<T, true> : unnormalize_grid_with<T, false>; auto get_src_findex_with_pad = [&](const T s, const Size_t S) { if (padding_mode == warp_by_grid::PADDING_MODE::zero) { return get_src_findex_with_zero_pad(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::reflect) { return align_corners ? get_src_findex_with_reflect_pad<T, true>(s, S) : get_src_findex_with_reflect_pad<T, false>(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::repeat) { return get_src_findex_with_repeat_pad(s, S); } else { return T(-1); } }; NBLA_CUDA_KERNEL_LOOP(oidx, oisize) { auto ond_index = device_flat_to_3d(oidx, ostride); auto c = channel_last ? ond_index.z : ond_index.x; auto h = channel_last ? ond_index.x : ond_index.y; auto w = channel_last ? ond_index.y : ond_index.z; auto gnd_index = make_int3(h, w, 0); auto gidx = device_3d_to_flat(gnd_index, gstride); for (auto b = 0; b < B; ++b) { auto b_gidx = gidx + b * gisize; auto xn = grid[b_gidx + 0]; auto yn = grid[b_gidx + 1]; auto xf0 = unnormalize_grid(xn, Wi); auto yf0 = unnormalize_grid(yn, Hi); auto xf = get_src_findex_with_pad(xf0, Wi); auto yf = get_src_findex_with_pad(yf0, Hi); auto xi0 = static_cast<int>(std::floor(xf)); auto yi0 = static_cast<int>(std::floor(yf)); auto xi1 = xi0 + 1; auto yi1 = yi0 + 1; auto px0 = xf - xi0; auto py0 = yf - yi0; auto px1 = T(1) - px0; auto py1 = T(1) - py0; auto v_y0x0 = get_pixel_value_2d<T, channel_last>( input, b, c, yi0, xi0, Hi, Wi, istride, iisize); auto v_y0x1 = get_pixel_value_2d<T, channel_last>( input, b, c, yi0, xi1, Hi, Wi, istride, iisize); auto v_y1x0 = get_pixel_value_2d<T, channel_last>( input, b, c, yi1, xi0, Hi, Wi, istride, iisize); auto v_y1x1 = get_pixel_value_2d<T, channel_last>( input, b, c, yi1, xi1, Hi, Wi, istride, iisize); auto b_oidx = oidx + b * oisize; auto val = (v_y0x0 * py1 * px1) + (v_y0x1 * py1 * px0) + (v_y1x0 * py0 * px1) + (v_y1x1 * py0 * px0); output[b_oidx] = val; } } } template <typename T, warp_by_grid::PADDING_MODE padding_mode = warp_by_grid::PADDING_MODE::zero, bool align_corners = false, bool channel_last = false> __global__ void kernel_warp_linear_forward_3d( const int oisize, const int iisize, const int gisize, T *output, const T *input, const T *grid, const int4 ishape, const int3 istride, const int3 gstride, const int3 ostride, const int B) { auto Di = channel_last ? ishape.x : ishape.y; auto Hi = channel_last ? ishape.y : ishape.z; auto Wi = channel_last ? ishape.z : ishape.w; auto unnormalize_grid = align_corners ? unnormalize_grid_with<T, true> : unnormalize_grid_with<T, false>; auto get_src_findex_with_pad = [&](const T s, const Size_t S) { if (padding_mode == warp_by_grid::PADDING_MODE::zero) { return get_src_findex_with_zero_pad(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::reflect) { return align_corners ? get_src_findex_with_reflect_pad<T, true>(s, S) : get_src_findex_with_reflect_pad<T, false>(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::repeat) { return get_src_findex_with_repeat_pad(s, S); } else { return T(-1); } }; NBLA_CUDA_KERNEL_LOOP(oidx, oisize) { auto ond_index = device_flat_to_4d(oidx, ostride); auto c = channel_last ? ond_index.w : ond_index.x; auto d = channel_last ? ond_index.x : ond_index.y; auto h = channel_last ? ond_index.y : ond_index.z; auto w = channel_last ? ond_index.z : ond_index.w; auto gnd_index = make_int4(d, h, w, 0); auto gidx = device_4d_to_flat(gnd_index, gstride); for (auto b = 0; b < B; ++b) { auto b_gidx = gidx + b * gisize; auto xn = grid[b_gidx + 0]; auto yn = grid[b_gidx + 1]; auto zn = grid[b_gidx + 2]; auto xf0 = unnormalize_grid(xn, Wi); auto yf0 = unnormalize_grid(yn, Hi); auto zf0 = unnormalize_grid(zn, Di); auto xf = get_src_findex_with_pad(xf0, Wi); auto yf = get_src_findex_with_pad(yf0, Hi); auto zf = get_src_findex_with_pad(zf0, Di); auto xi0 = static_cast<int>(std::floor(xf)); auto yi0 = static_cast<int>(std::floor(yf)); auto zi0 = static_cast<int>(std::floor(zf)); auto xi1 = xi0 + 1; auto yi1 = yi0 + 1; auto zi1 = zi0 + 1; auto px0 = xf - xi0; auto py0 = yf - yi0; auto pz0 = zf - zi0; auto px1 = T(1) - px0; auto py1 = T(1) - py0; auto pz1 = T(1) - pz0; auto v_z0y0x0 = get_pixel_value_3d<T, channel_last>( input, b, c, zi0, yi0, xi0, Di, Hi, Wi, istride, iisize); auto v_z0y0x1 = get_pixel_value_3d<T, channel_last>( input, b, c, zi0, yi0, xi1, Di, Hi, Wi, istride, iisize); auto v_z0y1x0 = get_pixel_value_3d<T, channel_last>( input, b, c, zi0, yi1, xi0, Di, Hi, Wi, istride, iisize); auto v_z0y1x1 = get_pixel_value_3d<T, channel_last>( input, b, c, zi0, yi1, xi1, Di, Hi, Wi, istride, iisize); auto v_z1y0x0 = get_pixel_value_3d<T, channel_last>( input, b, c, zi1, yi0, xi0, Di, Hi, Wi, istride, iisize); auto v_z1y0x1 = get_pixel_value_3d<T, channel_last>( input, b, c, zi1, yi0, xi1, Di, Hi, Wi, istride, iisize); auto v_z1y1x0 = get_pixel_value_3d<T, channel_last>( input, b, c, zi1, yi1, xi0, Di, Hi, Wi, istride, iisize); auto v_z1y1x1 = get_pixel_value_3d<T, channel_last>( input, b, c, zi1, yi1, xi1, Di, Hi, Wi, istride, iisize); auto val = v_z0y0x0 * pz1 * py1 * px1 + v_z0y0x1 * pz1 * py1 * px0 + v_z0y1x0 * pz1 * py0 * px1 + v_z0y1x1 * pz1 * py0 * px0 + v_z1y0x0 * pz0 * py1 * px1 + v_z1y0x1 * pz0 * py1 * px0 + v_z1y1x0 * pz0 * py0 * px1 + v_z1y1x1 * pz0 * py0 * px0; auto b_oidx = oidx + b * oisize; output[b_oidx] = val; } } } template <typename T, warp_by_grid::PADDING_MODE padding_mode = warp_by_grid::PADDING_MODE::zero, bool align_corners = false, bool channel_last = false> __global__ void kernel_warp_nearest_forward_2d( const int oisize, const int iisize, const int gisize, T *output, const T *input, const T *grid, const int3 ishape, const int2 istride, const int2 gstride, const int2 ostride, const int B) { auto Hi = channel_last ? ishape.x : ishape.y; auto Wi = channel_last ? ishape.y : ishape.z; auto unnormalize_grid = align_corners ? unnormalize_grid_with<T, true> : unnormalize_grid_with<T, false>; auto get_src_findex_with_pad = [&](const T s, const Size_t S) { if (padding_mode == warp_by_grid::PADDING_MODE::zero) { return get_src_findex_with_zero_pad(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::reflect) { return align_corners ? get_src_findex_with_reflect_pad<T, true>(s, S) : get_src_findex_with_reflect_pad<T, false>(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::repeat) { return get_src_findex_with_repeat_pad(s, S); } else { return T(-1); } }; NBLA_CUDA_KERNEL_LOOP(oidx, oisize) { auto ond_index = device_flat_to_3d(oidx, ostride); auto c = channel_last ? ond_index.z : ond_index.x; auto h = channel_last ? ond_index.x : ond_index.y; auto w = channel_last ? ond_index.y : ond_index.z; auto gnd_index = make_int3(h, w, 0); auto gidx = device_3d_to_flat(gnd_index, gstride); for (auto b = 0; b < B; ++b) { auto b_gidx = gidx + b * gisize; auto xn = grid[b_gidx + 0]; auto yn = grid[b_gidx + 1]; auto xf0 = unnormalize_grid(xn, Wi); auto yf0 = unnormalize_grid(yn, Hi); auto xf = get_src_findex_with_pad(xf0, Wi); auto yf = get_src_findex_with_pad(yf0, Hi); auto xi = static_cast<int>(std::floor(xf + T(0.5))); auto yi = static_cast<int>(std::floor(yf + T(0.5))); auto b_oidx = oidx + b * oisize; auto val = get_pixel_value_2d<T, channel_last>(input, b, c, yi, xi, Hi, Wi, istride, iisize); output[b_oidx] = val; } } } template <typename T, warp_by_grid::PADDING_MODE padding_mode = warp_by_grid::PADDING_MODE::zero, bool align_corners = false, bool channel_last = false> __global__ void kernel_warp_nearest_forward_3d( const int oisize, const int iisize, const int gisize, T *output, const T *input, const T *grid, const int4 ishape, const int3 istride, const int3 gstride, const int3 ostride, const int B) { auto Di = channel_last ? ishape.x : ishape.y; auto Hi = channel_last ? ishape.y : ishape.z; auto Wi = channel_last ? ishape.z : ishape.w; auto unnormalize_grid = align_corners ? unnormalize_grid_with<T, true> : unnormalize_grid_with<T, false>; auto get_src_findex_with_pad = [&](const T s, const Size_t S) { if (padding_mode == warp_by_grid::PADDING_MODE::zero) { return get_src_findex_with_zero_pad(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::reflect) { return align_corners ? get_src_findex_with_reflect_pad<T, true>(s, S) : get_src_findex_with_reflect_pad<T, false>(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::repeat) { return get_src_findex_with_repeat_pad(s, S); } else { return T(-1); } }; NBLA_CUDA_KERNEL_LOOP(oidx, oisize) { auto ond_index = device_flat_to_4d(oidx, ostride); auto c = channel_last ? ond_index.w : ond_index.x; auto d = channel_last ? ond_index.x : ond_index.y; auto h = channel_last ? ond_index.y : ond_index.z; auto w = channel_last ? ond_index.z : ond_index.w; auto gnd_index = make_int4(d, h, w, 0); auto gidx = device_4d_to_flat(gnd_index, gstride); for (auto b = 0; b < B; ++b) { auto b_gidx = gidx + b * gisize; auto xn = grid[b_gidx + 0]; auto yn = grid[b_gidx + 1]; auto zn = grid[b_gidx + 2]; auto xf0 = unnormalize_grid(xn, Wi); auto yf0 = unnormalize_grid(yn, Hi); auto zf0 = unnormalize_grid(zn, Di); auto xf = get_src_findex_with_pad(xf0, Wi); auto yf = get_src_findex_with_pad(yf0, Hi); auto zf = get_src_findex_with_pad(zf0, Di); auto xi = static_cast<int>(std::floor(xf + T(0.5))); auto yi = static_cast<int>(std::floor(yf + T(0.5))); auto zi = static_cast<int>(std::floor(zf + T(0.5))); auto b_oidx = oidx + b * oisize; auto val = get_pixel_value_3d<T, channel_last>( input, b, c, zi, yi, xi, Di, Hi, Wi, istride, iisize); output[b_oidx] = val; } } } /* Backward implementations wrt data. */ template <typename T, warp_by_grid::PADDING_MODE padding_mode = warp_by_grid::PADDING_MODE::zero, bool align_corners = false, bool channel_last = false> __global__ void kernel_warp_linear_backward_data_2d( const int oisize, const int iisize, const int gisize, T *igrad, const T *ograd, const T *grid, const int3 ishape, const int2 istride, const int2 gstride, const int2 ostride, const int B) { auto Hi = channel_last ? ishape.x : ishape.y; auto Wi = channel_last ? ishape.y : ishape.z; auto unnormalize_grid = align_corners ? unnormalize_grid_with<T, true> : unnormalize_grid_with<T, false>; auto get_src_findex_with_pad = [&](const T s, const Size_t S) { if (padding_mode == warp_by_grid::PADDING_MODE::zero) { return get_src_findex_with_zero_pad(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::reflect) { return align_corners ? get_src_findex_with_reflect_pad<T, true>(s, S) : get_src_findex_with_reflect_pad<T, false>(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::repeat) { return get_src_findex_with_repeat_pad(s, S); } else { return T(-1); } }; NBLA_CUDA_KERNEL_LOOP(oidx, oisize) { auto ond_index = device_flat_to_3d(oidx, ostride); auto c = channel_last ? ond_index.z : ond_index.x; auto h = channel_last ? ond_index.x : ond_index.y; auto w = channel_last ? ond_index.y : ond_index.z; auto gnd_index = make_int3(h, w, 0); auto gidx = device_3d_to_flat(gnd_index, gstride); for (auto b = 0; b < B; ++b) { auto b_gidx = gidx + b * gisize; auto xn = grid[b_gidx + 0]; auto yn = grid[b_gidx + 1]; auto xf0 = unnormalize_grid(xn, Wi); auto yf0 = unnormalize_grid(yn, Hi); auto xf = get_src_findex_with_pad(xf0, Wi); auto yf = get_src_findex_with_pad(yf0, Hi); auto xi0 = static_cast<int>(std::floor(xf)); auto yi0 = static_cast<int>(std::floor(yf)); auto xi1 = xi0 + 1; auto yi1 = yi0 + 1; auto px0 = xf - xi0; auto py0 = yf - yi0; auto px1 = T(1) - px0; auto py1 = T(1) - py0; auto b_oidx = oidx + b * oisize; auto grad = ograd[b_oidx]; backward_data_2d<T, channel_last>(igrad, grad, py1, px1, b, c, yi0, xi0, Hi, Wi, istride, iisize); backward_data_2d<T, channel_last>(igrad, grad, py1, px0, b, c, yi0, xi1, Hi, Wi, istride, iisize); backward_data_2d<T, channel_last>(igrad, grad, py0, px1, b, c, yi1, xi0, Hi, Wi, istride, iisize); backward_data_2d<T, channel_last>(igrad, grad, py0, px0, b, c, yi1, xi1, Hi, Wi, istride, iisize); } } } template <typename T, warp_by_grid::PADDING_MODE padding_mode = warp_by_grid::PADDING_MODE::zero, bool align_corners = false, bool channel_last = false> __global__ void kernel_warp_nearest_backward_data_2d( const int oisize, const int iisize, const int gisize, T *igrad, const T *ograd, const T *grid, const int3 ishape, const int2 istride, const int2 gstride, const int2 ostride, const int B) { auto Hi = channel_last ? ishape.x : ishape.y; auto Wi = channel_last ? ishape.y : ishape.z; auto unnormalize_grid = align_corners ? unnormalize_grid_with<T, true> : unnormalize_grid_with<T, false>; auto get_src_findex_with_pad = [&](const T s, const Size_t S) { if (padding_mode == warp_by_grid::PADDING_MODE::zero) { return get_src_findex_with_zero_pad(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::reflect) { return align_corners ? get_src_findex_with_reflect_pad<T, true>(s, S) : get_src_findex_with_reflect_pad<T, false>(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::repeat) { return get_src_findex_with_repeat_pad(s, S); } else { return T(-1); } }; NBLA_CUDA_KERNEL_LOOP(oidx, oisize) { auto ond_index = device_flat_to_3d(oidx, ostride); auto c = channel_last ? ond_index.z : ond_index.x; auto h = channel_last ? ond_index.x : ond_index.y; auto w = channel_last ? ond_index.y : ond_index.z; auto gnd_index = make_int3(h, w, 0); auto gidx = device_3d_to_flat(gnd_index, gstride); for (auto b = 0; b < B; ++b) { auto b_gidx = gidx + b * gisize; auto xn = grid[b_gidx + 0]; auto yn = grid[b_gidx + 1]; auto xf0 = unnormalize_grid(xn, Wi); auto yf0 = unnormalize_grid(yn, Hi); auto xf = get_src_findex_with_pad(xf0, Wi); auto yf = get_src_findex_with_pad(yf0, Hi); auto xi = static_cast<int>(std::floor(xf + T(0.5))); auto yi = static_cast<int>(std::floor(yf + T(0.5))); auto b_oidx = oidx + b * oisize; auto grad = ograd[b_oidx]; backward_data_2d<T, channel_last>(igrad, grad, T(1.0), T(1.0), b, c, yi, xi, Hi, Wi, istride, iisize); } } } template <typename T, warp_by_grid::PADDING_MODE padding_mode = warp_by_grid::PADDING_MODE::zero, bool align_corners = false, bool channel_last = false> __global__ void kernel_warp_linear_backward_data_3d( const int oisize, const int iisize, const int gisize, T *igrad, const T *ograd, const T *grid, const int4 ishape, const int3 istride, const int3 gstride, const int3 ostride, const int B) { auto Di = channel_last ? ishape.x : ishape.y; auto Hi = channel_last ? ishape.y : ishape.z; auto Wi = channel_last ? ishape.z : ishape.w; auto unnormalize_grid = align_corners ? unnormalize_grid_with<T, true> : unnormalize_grid_with<T, false>; auto get_src_findex_with_pad = [&](const T s, const Size_t S) { if (padding_mode == warp_by_grid::PADDING_MODE::zero) { return get_src_findex_with_zero_pad(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::reflect) { return align_corners ? get_src_findex_with_reflect_pad<T, true>(s, S) : get_src_findex_with_reflect_pad<T, false>(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::repeat) { return get_src_findex_with_repeat_pad(s, S); } else { return T(-1); } }; NBLA_CUDA_KERNEL_LOOP(oidx, oisize) { auto ond_index = device_flat_to_4d(oidx, ostride); auto c = channel_last ? ond_index.w : ond_index.x; auto d = channel_last ? ond_index.x : ond_index.y; auto h = channel_last ? ond_index.y : ond_index.z; auto w = channel_last ? ond_index.z : ond_index.w; auto gnd_index = make_int4(d, h, w, 0); auto gidx = device_4d_to_flat(gnd_index, gstride); for (auto b = 0; b < B; ++b) { auto b_gidx = gidx + b * gisize; auto xn = grid[b_gidx + 0]; auto yn = grid[b_gidx + 1]; auto zn = grid[b_gidx + 2]; auto xf0 = unnormalize_grid(xn, Wi); auto yf0 = unnormalize_grid(yn, Hi); auto zf0 = unnormalize_grid(zn, Di); auto xf = get_src_findex_with_pad(xf0, Wi); auto yf = get_src_findex_with_pad(yf0, Hi); auto zf = get_src_findex_with_pad(zf0, Di); auto xi0 = static_cast<int>(std::floor(xf)); auto yi0 = static_cast<int>(std::floor(yf)); auto zi0 = static_cast<int>(std::floor(zf)); auto xi1 = xi0 + 1; auto yi1 = yi0 + 1; auto zi1 = zi0 + 1; auto px0 = xf - xi0; auto py0 = yf - yi0; auto pz0 = zf - zi0; auto px1 = T(1) - px0; auto py1 = T(1) - py0; auto pz1 = T(1) - pz0; auto b_oidx = oidx + b * oisize; auto grad = ograd[b_oidx]; if (channel_last) { backward_data_3d<T, true>(igrad, grad, pz1, py1, px1, b, c, zi0, yi0, xi0, Di, Hi, Wi, istride, iisize); backward_data_3d<T, true>(igrad, grad, pz1, py1, px0, b, c, zi0, yi0, xi1, Di, Hi, Wi, istride, iisize); backward_data_3d<T, true>(igrad, grad, pz1, py0, px1, b, c, zi0, yi1, xi0, Di, Hi, Wi, istride, iisize); backward_data_3d<T, true>(igrad, grad, pz1, py0, px0, b, c, zi0, yi1, xi1, Di, Hi, Wi, istride, iisize); backward_data_3d<T, true>(igrad, grad, pz0, py1, px1, b, c, zi1, yi0, xi0, Di, Hi, Wi, istride, iisize); backward_data_3d<T, true>(igrad, grad, pz0, py1, px0, b, c, zi1, yi0, xi1, Di, Hi, Wi, istride, iisize); backward_data_3d<T, true>(igrad, grad, pz0, py0, px1, b, c, zi1, yi1, xi0, Di, Hi, Wi, istride, iisize); backward_data_3d<T, true>(igrad, grad, pz0, py0, px0, b, c, zi1, yi1, xi1, Di, Hi, Wi, istride, iisize); } else { backward_data_3d<T, false>(igrad, grad, pz1, py1, px1, b, c, zi0, yi0, xi0, Di, Hi, Wi, istride, iisize); backward_data_3d<T, false>(igrad, grad, pz1, py1, px0, b, c, zi0, yi0, xi1, Di, Hi, Wi, istride, iisize); backward_data_3d<T, false>(igrad, grad, pz1, py0, px1, b, c, zi0, yi1, xi0, Di, Hi, Wi, istride, iisize); backward_data_3d<T, false>(igrad, grad, pz1, py0, px0, b, c, zi0, yi1, xi1, Di, Hi, Wi, istride, iisize); backward_data_3d<T, false>(igrad, grad, pz0, py1, px1, b, c, zi1, yi0, xi0, Di, Hi, Wi, istride, iisize); backward_data_3d<T, false>(igrad, grad, pz0, py1, px0, b, c, zi1, yi0, xi1, Di, Hi, Wi, istride, iisize); backward_data_3d<T, false>(igrad, grad, pz0, py0, px1, b, c, zi1, yi1, xi0, Di, Hi, Wi, istride, iisize); backward_data_3d<T, false>(igrad, grad, pz0, py0, px0, b, c, zi1, yi1, xi1, Di, Hi, Wi, istride, iisize); } } } } template <typename T, warp_by_grid::PADDING_MODE padding_mode = warp_by_grid::PADDING_MODE::zero, bool align_corners = false, bool channel_last = false> __global__ void kernel_warp_nearest_backward_data_3d( const int oisize, const int iisize, const int gisize, T *igrad, const T *ograd, const T *grid, const int4 ishape, const int3 istride, const int3 gstride, const int3 ostride, const int B) { auto Di = channel_last ? ishape.x : ishape.y; auto Hi = channel_last ? ishape.y : ishape.z; auto Wi = channel_last ? ishape.z : ishape.w; auto unnormalize_grid = align_corners ? unnormalize_grid_with<T, true> : unnormalize_grid_with<T, false>; auto get_src_findex_with_pad = [&](const T s, const Size_t S) { if (padding_mode == warp_by_grid::PADDING_MODE::zero) { return get_src_findex_with_zero_pad(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::reflect) { return align_corners ? get_src_findex_with_reflect_pad<T, true>(s, S) : get_src_findex_with_reflect_pad<T, false>(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::repeat) { return get_src_findex_with_repeat_pad(s, S); } else { return T(-1); } }; NBLA_CUDA_KERNEL_LOOP(oidx, oisize) { auto ond_index = device_flat_to_4d(oidx, ostride); auto c = channel_last ? ond_index.w : ond_index.x; auto d = channel_last ? ond_index.x : ond_index.y; auto h = channel_last ? ond_index.y : ond_index.z; auto w = channel_last ? ond_index.z : ond_index.w; auto gnd_index = make_int4(d, h, w, 0); auto gidx = device_4d_to_flat(gnd_index, gstride); for (auto b = 0; b < B; ++b) { auto b_gidx = gidx + b * gisize; auto xn = grid[b_gidx + 0]; auto yn = grid[b_gidx + 1]; auto zn = grid[b_gidx + 2]; auto xf0 = unnormalize_grid(xn, Wi); auto yf0 = unnormalize_grid(yn, Hi); auto zf0 = unnormalize_grid(zn, Di); auto xf = get_src_findex_with_pad(xf0, Wi); auto yf = get_src_findex_with_pad(yf0, Hi); auto zf = get_src_findex_with_pad(zf0, Di); auto xi = static_cast<int>(std::floor(xf + T(0.5))); auto yi = static_cast<int>(std::floor(yf + T(0.5))); auto zi = static_cast<int>(std::floor(zf + T(0.5))); auto b_oidx = oidx + b * oisize; auto grad = ograd[b_oidx]; backward_data_3d<T, channel_last>(igrad, grad, T(1.0), T(1.0), T(1.0), b, c, zi, yi, xi, Di, Hi, Wi, istride, iisize); } } } /* Backward implementations wrt grid. */ template <typename T, warp_by_grid::PADDING_MODE padding_mode = warp_by_grid::PADDING_MODE::zero, bool align_corners = false, bool channel_last = false> __global__ void kernel_warp_linear_backward_grid_2d( const int oisize, const int iisize, const int gisize, T *ggrad, const T *ograd, const T *input, const T *grid, const int3 ishape, const int2 istride, const int2 gstride, const int2 ostride, const int B) { auto Hi = channel_last ? ishape.x : ishape.y; auto Wi = channel_last ? ishape.y : ishape.z; auto unnormalize_grid = align_corners ? unnormalize_grid_with<T, true> : unnormalize_grid_with<T, false>; auto get_src_findex_with_pad = [&](const T s, const Size_t S) { if (padding_mode == warp_by_grid::PADDING_MODE::zero) { return get_src_findex_with_zero_pad(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::reflect) { return align_corners ? get_src_findex_with_reflect_pad<T, true>(s, S) : get_src_findex_with_reflect_pad<T, false>(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::repeat) { return get_src_findex_with_repeat_pad(s, S); } else { return T(-1); } }; auto get_grad_coef_with_pad = [&](const T s, const Size_t S) { T coef; if (padding_mode == warp_by_grid::PADDING_MODE::zero) { coef = get_grad_coef_with_zero_pad(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::reflect) { coef = align_corners ? get_grad_coef_with_reflect_pad<T, true>(s, S) : get_grad_coef_with_reflect_pad<T, false>(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::repeat) { coef = get_grad_coef_with_repeat_pad(s, S); } else { return T(0); } return align_corners ? coef * T(S - 1) / T(2) : coef * T(S) / T(2); }; NBLA_CUDA_KERNEL_LOOP(oidx, oisize) { auto ond_index = device_flat_to_3d(oidx, ostride); auto c = channel_last ? ond_index.z : ond_index.x; auto h = channel_last ? ond_index.x : ond_index.y; auto w = channel_last ? ond_index.y : ond_index.z; auto gnd_index = make_int3(h, w, 0); auto gidx = device_3d_to_flat(gnd_index, gstride); for (auto b = 0; b < B; ++b) { auto b_gidx = gidx + b * gisize; auto xn = grid[b_gidx + 0]; auto yn = grid[b_gidx + 1]; auto xf0 = unnormalize_grid(xn, Wi); auto yf0 = unnormalize_grid(yn, Hi); auto xf = get_src_findex_with_pad(xf0, Wi); auto yf = get_src_findex_with_pad(yf0, Hi); auto xi0 = static_cast<int>(std::floor(xf)); auto yi0 = static_cast<int>(std::floor(yf)); auto xi1 = xi0 + 1; auto yi1 = yi0 + 1; auto px0 = xf - xi0; auto py0 = yf - yi0; auto px1 = T(1) - px0; auto py1 = T(1) - py0; auto v_y0x0 = get_pixel_value_2d<T, channel_last>( input, b, c, yi0, xi0, Hi, Wi, istride, iisize); auto v_y0x1 = get_pixel_value_2d<T, channel_last>( input, b, c, yi0, xi1, Hi, Wi, istride, iisize); auto v_y1x0 = get_pixel_value_2d<T, channel_last>( input, b, c, yi1, xi0, Hi, Wi, istride, iisize); auto v_y1x1 = get_pixel_value_2d<T, channel_last>( input, b, c, yi1, xi1, Hi, Wi, istride, iisize); auto b_oidx = oidx + b * oisize; auto grad = ograd[b_oidx]; // d_grid = d_output * local_grad{output/pad(x)} * local_grad{pad(x)/x} * // unnormalized_coef auto grad_x = grad * ((v_y0x1 - v_y0x0) * py1 + (v_y1x1 - v_y1x0) * py0); auto grad_y = grad * ((v_y1x0 - v_y0x0) * px1 + (v_y1x1 - v_y0x1) * px0); auto coef_x = get_grad_coef_with_pad(xf0, Wi); auto coef_y = get_grad_coef_with_pad(yf0, Hi); atomic_add(ggrad + b_gidx + 0, grad_x * coef_x); atomic_add(ggrad + b_gidx + 1, grad_y * coef_y); } } } template <typename T, warp_by_grid::PADDING_MODE padding_mode = warp_by_grid::PADDING_MODE::zero, bool align_corners = false, bool channel_last = false> __global__ void kernel_warp_linear_backward_grid_3d( const int oisize, const int iisize, const int gisize, T *ggrad, const T *ograd, const T *input, const T *grid, const int4 ishape, const int3 istride, const int3 gstride, const int3 ostride, const int B) { auto Di = channel_last ? ishape.x : ishape.y; auto Hi = channel_last ? ishape.y : ishape.z; auto Wi = channel_last ? ishape.z : ishape.w; auto unnormalize_grid = align_corners ? unnormalize_grid_with<T, true> : unnormalize_grid_with<T, false>; auto get_src_findex_with_pad = [&](const T s, const Size_t S) { if (padding_mode == warp_by_grid::PADDING_MODE::zero) { return get_src_findex_with_zero_pad(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::reflect) { return align_corners ? get_src_findex_with_reflect_pad<T, true>(s, S) : get_src_findex_with_reflect_pad<T, false>(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::repeat) { return get_src_findex_with_repeat_pad(s, S); } else { return T(-1); } }; auto get_grad_coef_with_pad = [&](const T s, const Size_t S) { T coef; if (padding_mode == warp_by_grid::PADDING_MODE::zero) { coef = get_grad_coef_with_zero_pad(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::reflect) { coef = align_corners ? get_grad_coef_with_reflect_pad<T, true>(s, S) : get_grad_coef_with_reflect_pad<T, false>(s, S); } else if (padding_mode == warp_by_grid::PADDING_MODE::repeat) { coef = get_grad_coef_with_repeat_pad(s, S); } else { return T(0); } return align_corners ? coef * T(S - 1) / T(2) : coef * T(S) / T(2); }; NBLA_CUDA_KERNEL_LOOP(oidx, oisize) { auto ond_index = device_flat_to_4d(oidx, ostride); auto c = channel_last ? ond_index.w : ond_index.x; auto d = channel_last ? ond_index.x : ond_index.y; auto h = channel_last ? ond_index.y : ond_index.z; auto w = channel_last ? ond_index.z : ond_index.w; auto gnd_index = make_int4(d, h, w, 0); auto gidx = device_4d_to_flat(gnd_index, gstride); for (auto b = 0; b < B; ++b) { auto b_gidx = gidx + b * gisize; auto xn = grid[b_gidx + 0]; auto yn = grid[b_gidx + 1]; auto zn = grid[b_gidx + 2]; auto xf0 = unnormalize_grid(xn, Wi); auto yf0 = unnormalize_grid(yn, Hi); auto zf0 = unnormalize_grid(zn, Di); auto xf = get_src_findex_with_pad(xf0, Wi); auto yf = get_src_findex_with_pad(yf0, Hi); auto zf = get_src_findex_with_pad(zf0, Di); auto xi0 = static_cast<int>(std::floor(xf)); auto yi0 = static_cast<int>(std::floor(yf)); auto zi0 = static_cast<int>(std::floor(zf)); auto xi1 = xi0 + 1; auto yi1 = yi0 + 1; auto zi1 = zi0 + 1; auto px0 = xf - xi0; auto py0 = yf - yi0; auto pz0 = zf - zi0; auto px1 = T(1) - px0; auto py1 = T(1) - py0; auto pz1 = T(1) - pz0; auto v_z0y0x0 = get_pixel_value_3d<T, channel_last>( input, b, c, zi0, yi0, xi0, Di, Hi, Wi, istride, iisize); auto v_z0y0x1 = get_pixel_value_3d<T, channel_last>( input, b, c, zi0, yi0, xi1, Di, Hi, Wi, istride, iisize); auto v_z0y1x0 = get_pixel_value_3d<T, channel_last>( input, b, c, zi0, yi1, xi0, Di, Hi, Wi, istride, iisize); auto v_z0y1x1 = get_pixel_value_3d<T, channel_last>( input, b, c, zi0, yi1, xi1, Di, Hi, Wi, istride, iisize); auto v_z1y0x0 = get_pixel_value_3d<T, channel_last>( input, b, c, zi1, yi0, xi0, Di, Hi, Wi, istride, iisize); auto v_z1y0x1 = get_pixel_value_3d<T, channel_last>( input, b, c, zi1, yi0, xi1, Di, Hi, Wi, istride, iisize); auto v_z1y1x0 = get_pixel_value_3d<T, channel_last>( input, b, c, zi1, yi1, xi0, Di, Hi, Wi, istride, iisize); auto v_z1y1x1 = get_pixel_value_3d<T, channel_last>( input, b, c, zi1, yi1, xi1, Di, Hi, Wi, istride, iisize); auto b_oidx = oidx + b * oisize; auto grad = ograd[b_oidx]; // d_grid = d_output * local_grad{output/pad(x)} * local_grad{pad(x)/x} * // unnormalized_coef auto grad_x = grad * ((v_z0y0x1 - v_z0y0x0) * pz1 * py1 + (v_z0y1x1 - v_z0y1x0) * pz1 * py0 + (v_z1y0x1 - v_z1y0x0) * pz0 * py1 + (v_z1y1x1 - v_z1y1x0) * pz0 * py0); auto grad_y = grad * ((v_z0y1x0 - v_z0y0x0) * pz1 * px1 + (v_z0y1x1 - v_z0y0x1) * pz1 * px0 + (v_z1y1x0 - v_z1y0x0) * pz0 * px1 + (v_z1y1x1 - v_z1y0x1) * pz0 * px0); auto grad_z = grad * ((v_z1y0x0 - v_z0y0x0) * py1 * px1 + (v_z1y0x1 - v_z0y0x1) * py1 * px0 + (v_z1y1x0 - v_z0y1x0) * py0 * px1 + (v_z1y1x1 - v_z0y1x1) * py0 * px0); auto coef_x = get_grad_coef_with_pad(xf0, Wi); auto coef_y = get_grad_coef_with_pad(yf0, Hi); auto coef_z = get_grad_coef_with_pad(zf0, Di); atomic_add(ggrad + b_gidx + 0, grad_x * coef_x); atomic_add(ggrad + b_gidx + 1, grad_y * coef_y); atomic_add(ggrad + b_gidx + 2, grad_z * coef_z); } } } template <typename T> void WarpByGridCuda<T>::setup_impl(const Variables &inputs, const Variables &outputs) { WarpByGrid<T>::setup_impl(inputs, outputs); cuda_set_device(this->device_); } template <typename T> void WarpByGridCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); auto ndims = inputs[1]->shape().size(); auto channel_last = this->channel_last_; auto align_corners = this->align_corners_; auto padding_mode_t = this->padding_mode_t_; using PADDING_MODE = warp_by_grid::PADDING_MODE; auto zero = PADDING_MODE::zero; auto repeat = PADDING_MODE::repeat; auto reflect = PADDING_MODE::reflect; if (ndims == 4) { auto B = inputs[0]->shape()[0]; auto Ci = channel_last ? inputs[0]->shape()[3] : inputs[0]->shape()[1]; auto Hi = channel_last ? inputs[0]->shape()[1] : inputs[0]->shape()[2]; auto Wi = channel_last ? inputs[0]->shape()[2] : inputs[0]->shape()[3]; auto Ho = channel_last ? outputs[0]->shape()[1] : outputs[0]->shape()[2]; auto Wo = channel_last ? outputs[0]->shape()[2] : outputs[0]->shape()[3]; auto ishape = channel_last ? make_int3(Hi, Wi, Ci) : make_int3(Ci, Hi, Wi); auto istride = channel_last ? make_int2(Wi * Ci, Ci) : make_int2(Hi * Wi, Wi); auto gstride = make_int2(Wo * 2, 2); auto ostride = channel_last ? make_int2(Wo * Ci, Ci) : make_int2(Ho * Wo, Wo); auto output = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_); auto input = inputs[0]->get_data_pointer<Tcu>(this->ctx_); auto grid = inputs[1]->get_data_pointer<Tcu>(this->ctx_); auto oisize = Ci * Ho * Wo; auto iisize = Ci * Hi * Wi; auto gisize = Ho * Wo * 2; if (this->mode_ == "linear") { if (channel_last) { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_linear_forward_2d<Tcu, PADDING_MODE::zero, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_linear_forward_2d<Tcu, PADDING_MODE::repeat, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_linear_forward_2d<Tcu, PADDING_MODE::reflect, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_linear_forward_2d<Tcu, PADDING_MODE::zero, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_linear_forward_2d<Tcu, PADDING_MODE::repeat, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_linear_forward_2d<Tcu, PADDING_MODE::reflect, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } } else { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_linear_forward_2d<Tcu, PADDING_MODE::zero, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_linear_forward_2d<Tcu, PADDING_MODE::repeat, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_linear_forward_2d<Tcu, PADDING_MODE::reflect, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_linear_forward_2d<Tcu, PADDING_MODE::zero, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_linear_forward_2d<Tcu, PADDING_MODE::repeat, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_linear_forward_2d<Tcu, PADDING_MODE::reflect, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } } } else if (this->mode_ == "nearest") { if (channel_last) { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_nearest_forward_2d<Tcu, PADDING_MODE::zero, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_nearest_forward_2d<Tcu, PADDING_MODE::repeat, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_nearest_forward_2d<Tcu, PADDING_MODE::reflect, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_nearest_forward_2d<Tcu, PADDING_MODE::zero, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_nearest_forward_2d<Tcu, PADDING_MODE::repeat, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_nearest_forward_2d<Tcu, PADDING_MODE::reflect, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } } else { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_nearest_forward_2d<Tcu, PADDING_MODE::zero, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_nearest_forward_2d<Tcu, PADDING_MODE::repeat, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_nearest_forward_2d<Tcu, PADDING_MODE::reflect, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_nearest_forward_2d<Tcu, PADDING_MODE::zero, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_nearest_forward_2d<Tcu, PADDING_MODE::repeat, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_nearest_forward_2d<Tcu, PADDING_MODE::reflect, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } } } } else if (ndims == 5) { auto B = inputs[0]->shape()[0]; auto Ci = channel_last ? inputs[0]->shape()[4] : inputs[0]->shape()[1]; auto Di = channel_last ? inputs[0]->shape()[1] : inputs[0]->shape()[2]; auto Hi = channel_last ? inputs[0]->shape()[2] : inputs[0]->shape()[3]; auto Wi = channel_last ? inputs[0]->shape()[3] : inputs[0]->shape()[4]; auto Do = channel_last ? outputs[0]->shape()[1] : outputs[0]->shape()[2]; auto Ho = channel_last ? outputs[0]->shape()[2] : outputs[0]->shape()[3]; auto Wo = channel_last ? outputs[0]->shape()[3] : outputs[0]->shape()[4]; auto ishape = channel_last ? make_int4(Di, Hi, Wi, Ci) : make_int4(Ci, Di, Hi, Wi); auto istride = channel_last ? make_int3(Hi * Wi * Ci, Wi * Ci, Ci) : make_int3(Di * Hi * Wi, Hi * Wi, Wi); auto gstride = make_int3(Ho * Wo * 3, Wo * 3, 3); auto ostride = channel_last ? make_int3(Ho * Wo * Ci, Wo * Ci, Ci) : make_int3(Do * Ho * Wo, Ho * Wo, Wo); auto output = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_); auto input = inputs[0]->get_data_pointer<Tcu>(this->ctx_); auto grid = inputs[1]->get_data_pointer<Tcu>(this->ctx_); auto oisize = Ci * Do * Ho * Wo; auto iisize = Ci * Di * Hi * Wi; auto gisize = Do * Ho * Wo * 3; if (this->mode_ == "linear") { if (channel_last) { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_linear_forward_3d<Tcu, PADDING_MODE::zero, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_linear_forward_3d<Tcu, PADDING_MODE::repeat, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_linear_forward_3d<Tcu, PADDING_MODE::reflect, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_linear_forward_3d<Tcu, PADDING_MODE::zero, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_linear_forward_3d<Tcu, PADDING_MODE::repeat, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_linear_forward_3d<Tcu, PADDING_MODE::reflect, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } } else { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_linear_forward_3d<Tcu, PADDING_MODE::zero, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_linear_forward_3d<Tcu, PADDING_MODE::repeat, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_linear_forward_3d<Tcu, PADDING_MODE::reflect, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_linear_forward_3d<Tcu, PADDING_MODE::zero, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_linear_forward_3d<Tcu, PADDING_MODE::repeat, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_linear_forward_3d<Tcu, PADDING_MODE::reflect, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } } } else if (this->mode_ == "nearest") { if (channel_last) { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_nearest_forward_3d<Tcu, PADDING_MODE::zero, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_nearest_forward_3d<Tcu, PADDING_MODE::repeat, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_nearest_forward_3d<Tcu, PADDING_MODE::reflect, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_nearest_forward_3d<Tcu, PADDING_MODE::zero, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_nearest_forward_3d<Tcu, PADDING_MODE::repeat, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_nearest_forward_3d<Tcu, PADDING_MODE::reflect, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } } else { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_nearest_forward_3d<Tcu, PADDING_MODE::zero, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_nearest_forward_3d<Tcu, PADDING_MODE::repeat, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_nearest_forward_3d<Tcu, PADDING_MODE::reflect, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_nearest_forward_3d<Tcu, PADDING_MODE::zero, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_nearest_forward_3d<Tcu, PADDING_MODE::repeat, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_nearest_forward_3d<Tcu, PADDING_MODE::reflect, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, output, input, grid, ishape, istride, gstride, ostride, B); } } } } } template <typename T> void WarpByGridCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0] || propagate_down[1])) { return; } cuda_set_device(this->device_); auto ndims = inputs[1]->shape().size(); auto channel_last = this->channel_last_; auto align_corners = this->align_corners_; auto padding_mode_t = this->padding_mode_t_; using PADDING_MODE = warp_by_grid::PADDING_MODE; auto zero = PADDING_MODE::zero; auto repeat = PADDING_MODE::repeat; auto reflect = PADDING_MODE::reflect; // w.r.t. data if (propagate_down[0]) { if (ndims == 4) { auto B = inputs[0]->shape()[0]; auto Ci = channel_last ? inputs[0]->shape()[3] : inputs[0]->shape()[1]; auto Hi = channel_last ? inputs[0]->shape()[1] : inputs[0]->shape()[2]; auto Wi = channel_last ? inputs[0]->shape()[2] : inputs[0]->shape()[3]; auto Ho = channel_last ? outputs[0]->shape()[1] : outputs[0]->shape()[2]; auto Wo = channel_last ? outputs[0]->shape()[2] : outputs[0]->shape()[3]; auto ishape = channel_last ? make_int3(Hi, Wi, Ci) : make_int3(Ci, Hi, Wi); auto istride = channel_last ? make_int2(Wi * Ci, Ci) : make_int2(Hi * Wi, Wi); auto gstride = make_int2(Wo * 2, 2); auto ostride = channel_last ? make_int2(Wo * Ci, Ci) : make_int2(Ho * Wo, Wo); auto output = outputs[0]->get_data_pointer<Tcu>(this->ctx_); auto input = inputs[0]->get_data_pointer<Tcu>(this->ctx_); auto grid = inputs[1]->get_data_pointer<Tcu>(this->ctx_); auto ograd = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); auto igrad = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_); auto ggrad = inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_); auto oisize = Ci * Ho * Wo; auto iisize = Ci * Hi * Wi; auto gisize = Ho * Wo * 2; if (this->mode_ == "linear") { if (channel_last) { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_linear_backward_data_2d<Tcu, PADDING_MODE::zero, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_linear_backward_data_2d<Tcu, PADDING_MODE::repeat, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_linear_backward_data_2d<Tcu, PADDING_MODE::reflect, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_linear_backward_data_2d<Tcu, PADDING_MODE::zero, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_linear_backward_data_2d<Tcu, PADDING_MODE::repeat, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_linear_backward_data_2d<Tcu, PADDING_MODE::reflect, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } } else { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_linear_backward_data_2d<Tcu, PADDING_MODE::zero, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_linear_backward_data_2d<Tcu, PADDING_MODE::repeat, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_linear_backward_data_2d<Tcu, PADDING_MODE::reflect, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_linear_backward_data_2d<Tcu, PADDING_MODE::zero, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_linear_backward_data_2d<Tcu, PADDING_MODE::repeat, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_linear_backward_data_2d<Tcu, PADDING_MODE::reflect, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } } } else if (this->mode_ == "nearest") { if (channel_last) { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_nearest_backward_data_2d<Tcu, PADDING_MODE::zero, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_nearest_backward_data_2d<Tcu, PADDING_MODE::repeat, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_nearest_backward_data_2d<Tcu, PADDING_MODE::reflect, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_nearest_backward_data_2d<Tcu, PADDING_MODE::zero, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_nearest_backward_data_2d<Tcu, PADDING_MODE::repeat, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_nearest_backward_data_2d<Tcu, PADDING_MODE::reflect, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } } else { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_nearest_backward_data_2d<Tcu, PADDING_MODE::zero, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_nearest_backward_data_2d<Tcu, PADDING_MODE::repeat, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_nearest_backward_data_2d<Tcu, PADDING_MODE::reflect, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_nearest_backward_data_2d<Tcu, PADDING_MODE::zero, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_nearest_backward_data_2d<Tcu, PADDING_MODE::repeat, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_nearest_backward_data_2d<Tcu, PADDING_MODE::reflect, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } } } } else if (ndims == 5) { auto B = inputs[0]->shape()[0]; auto Ci = channel_last ? inputs[0]->shape()[4] : inputs[0]->shape()[1]; auto Di = channel_last ? inputs[0]->shape()[1] : inputs[0]->shape()[2]; auto Hi = channel_last ? inputs[0]->shape()[2] : inputs[0]->shape()[3]; auto Wi = channel_last ? inputs[0]->shape()[3] : inputs[0]->shape()[4]; auto Do = channel_last ? outputs[0]->shape()[1] : outputs[0]->shape()[2]; auto Ho = channel_last ? outputs[0]->shape()[2] : outputs[0]->shape()[3]; auto Wo = channel_last ? outputs[0]->shape()[3] : outputs[0]->shape()[4]; auto ishape = channel_last ? make_int4(Di, Hi, Wi, Ci) : make_int4(Ci, Di, Hi, Wi); auto istride = channel_last ? make_int3(Hi * Wi * Ci, Wi * Ci, Ci) : make_int3(Di * Hi * Wi, Hi * Wi, Wi); auto gstride = make_int3(Ho * Wo * 3, Wo * 3, 3); auto ostride = channel_last ? make_int3(Ho * Wo * Ci, Wo * Ci, Ci) : make_int3(Do * Ho * Wo, Ho * Wo, Wo); auto output = outputs[0]->get_data_pointer<Tcu>(this->ctx_); auto input = inputs[0]->get_data_pointer<Tcu>(this->ctx_); auto grid = inputs[1]->get_data_pointer<Tcu>(this->ctx_); auto ograd = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); auto igrad = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_); auto ggrad = inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_); auto oisize = Ci * Do * Ho * Wo; auto iisize = Ci * Di * Hi * Wi; auto gisize = Do * Ho * Wo * 3; if (this->mode_ == "linear") { if (channel_last) { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_linear_backward_data_3d<Tcu, PADDING_MODE::zero, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_linear_backward_data_3d<Tcu, PADDING_MODE::repeat, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_linear_backward_data_3d<Tcu, PADDING_MODE::reflect, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_linear_backward_data_3d<Tcu, PADDING_MODE::zero, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_linear_backward_data_3d<Tcu, PADDING_MODE::repeat, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_linear_backward_data_3d<Tcu, PADDING_MODE::reflect, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } } else { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_linear_backward_data_3d<Tcu, PADDING_MODE::zero, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_linear_backward_data_3d<Tcu, PADDING_MODE::repeat, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_linear_backward_data_3d<Tcu, PADDING_MODE::reflect, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_linear_backward_data_3d<Tcu, PADDING_MODE::zero, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_linear_backward_data_3d<Tcu, PADDING_MODE::repeat, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_linear_backward_data_3d<Tcu, PADDING_MODE::reflect, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } } } else if (this->mode_ == "nearest") { if (channel_last) { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_nearest_backward_data_3d<Tcu, PADDING_MODE::zero, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_nearest_backward_data_3d<Tcu, PADDING_MODE::repeat, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_nearest_backward_data_3d<Tcu, PADDING_MODE::reflect, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_nearest_backward_data_3d<Tcu, PADDING_MODE::zero, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_nearest_backward_data_3d<Tcu, PADDING_MODE::repeat, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_nearest_backward_data_3d<Tcu, PADDING_MODE::reflect, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } } else { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_nearest_backward_data_3d<Tcu, PADDING_MODE::zero, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_nearest_backward_data_3d<Tcu, PADDING_MODE::repeat, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_nearest_backward_data_3d<Tcu, PADDING_MODE::reflect, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_nearest_backward_data_3d<Tcu, PADDING_MODE::zero, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_nearest_backward_data_3d<Tcu, PADDING_MODE::repeat, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_nearest_backward_data_3d<Tcu, PADDING_MODE::reflect, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, igrad, ograd, grid, ishape, istride, gstride, ostride, B); } } } } // w.r.t. grid if (propagate_down[1]) { if (ndims == 4) { auto B = inputs[0]->shape()[0]; auto Ci = channel_last ? inputs[0]->shape()[3] : inputs[0]->shape()[1]; auto Hi = channel_last ? inputs[0]->shape()[1] : inputs[0]->shape()[2]; auto Wi = channel_last ? inputs[0]->shape()[2] : inputs[0]->shape()[3]; auto Ho = channel_last ? outputs[0]->shape()[1] : outputs[0]->shape()[2]; auto Wo = channel_last ? outputs[0]->shape()[2] : outputs[0]->shape()[3]; auto ishape = channel_last ? make_int3(Hi, Wi, Ci) : make_int3(Ci, Hi, Wi); auto istride = channel_last ? make_int2(Wi * Ci, Ci) : make_int2(Hi * Wi, Wi); auto gstride = make_int2(Wo * 2, 2); auto ostride = channel_last ? make_int2(Wo * Ci, Ci) : make_int2(Ho * Wo, Wo); auto output = outputs[0]->get_data_pointer<Tcu>(this->ctx_); auto input = inputs[0]->get_data_pointer<Tcu>(this->ctx_); auto grid = inputs[1]->get_data_pointer<Tcu>(this->ctx_); auto ograd = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); auto igrad = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_); auto ggrad = inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_); auto oisize = Ci * Ho * Wo; auto iisize = Ci * Hi * Wi; auto gisize = Ho * Wo * 2; if (this->mode_ == "linear") { if (channel_last) { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_linear_backward_grid_2d<Tcu, PADDING_MODE::zero, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_linear_backward_grid_2d<Tcu, PADDING_MODE::repeat, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_linear_backward_grid_2d< Tcu, PADDING_MODE::reflect, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_linear_backward_grid_2d<Tcu, PADDING_MODE::zero, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_linear_backward_grid_2d<Tcu, PADDING_MODE::repeat, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_linear_backward_grid_2d< Tcu, PADDING_MODE::reflect, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } } else { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_linear_backward_grid_2d<Tcu, PADDING_MODE::zero, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_linear_backward_grid_2d<Tcu, PADDING_MODE::repeat, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_linear_backward_grid_2d< Tcu, PADDING_MODE::reflect, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_linear_backward_grid_2d<Tcu, PADDING_MODE::zero, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_linear_backward_grid_2d<Tcu, PADDING_MODE::repeat, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_linear_backward_grid_2d< Tcu, PADDING_MODE::reflect, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } } } else if (this->mode_ == "nearest") { NBLA_ERROR( error_code::not_implemented, "Backward wrt the grid is not supported in the nearest mode. " "Use the `linear` mode."); } } else if (ndims == 5) { auto B = inputs[0]->shape()[0]; auto Ci = channel_last ? inputs[0]->shape()[4] : inputs[0]->shape()[1]; auto Di = channel_last ? inputs[0]->shape()[1] : inputs[0]->shape()[2]; auto Hi = channel_last ? inputs[0]->shape()[2] : inputs[0]->shape()[3]; auto Wi = channel_last ? inputs[0]->shape()[3] : inputs[0]->shape()[4]; auto Do = channel_last ? outputs[0]->shape()[1] : outputs[0]->shape()[2]; auto Ho = channel_last ? outputs[0]->shape()[2] : outputs[0]->shape()[3]; auto Wo = channel_last ? outputs[0]->shape()[3] : outputs[0]->shape()[4]; auto ishape = channel_last ? make_int4(Di, Hi, Wi, Ci) : make_int4(Ci, Di, Hi, Wi); auto istride = channel_last ? make_int3(Hi * Wi * Ci, Wi * Ci, Ci) : make_int3(Di * Hi * Wi, Hi * Wi, Wi); auto gstride = make_int3(Ho * Wo * 3, Wo * 3, 3); auto ostride = channel_last ? make_int3(Ho * Wo * Ci, Wo * Ci, Ci) : make_int3(Do * Ho * Wo, Ho * Wo, Wo); auto output = outputs[0]->get_data_pointer<Tcu>(this->ctx_); auto input = inputs[0]->get_data_pointer<Tcu>(this->ctx_); auto grid = inputs[1]->get_data_pointer<Tcu>(this->ctx_); auto ograd = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); auto igrad = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_); auto ggrad = inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_); auto oisize = Ci * Do * Ho * Wo; auto iisize = Ci * Di * Hi * Wi; auto gisize = Do * Ho * Wo * 3; if (this->mode_ == "linear") { if (channel_last) { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_linear_backward_grid_3d<Tcu, PADDING_MODE::zero, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_linear_backward_grid_3d<Tcu, PADDING_MODE::repeat, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_linear_backward_grid_3d< Tcu, PADDING_MODE::reflect, true, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_linear_backward_grid_3d<Tcu, PADDING_MODE::zero, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_linear_backward_grid_3d<Tcu, PADDING_MODE::repeat, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_linear_backward_grid_3d< Tcu, PADDING_MODE::reflect, false, true>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } } else { if (padding_mode_t == zero && align_corners) { auto kernel = kernel_warp_linear_backward_grid_3d<Tcu, PADDING_MODE::zero, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && align_corners) { auto kernel = kernel_warp_linear_backward_grid_3d<Tcu, PADDING_MODE::repeat, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && align_corners) { auto kernel = kernel_warp_linear_backward_grid_3d< Tcu, PADDING_MODE::reflect, true, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == zero && !align_corners) { auto kernel = kernel_warp_linear_backward_grid_3d<Tcu, PADDING_MODE::zero, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == repeat && !align_corners) { auto kernel = kernel_warp_linear_backward_grid_3d<Tcu, PADDING_MODE::repeat, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } else if (padding_mode_t == reflect && !align_corners) { auto kernel = kernel_warp_linear_backward_grid_3d< Tcu, PADDING_MODE::reflect, false, false>; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, oisize, iisize, gisize, ggrad, ograd, input, grid, ishape, istride, gstride, ostride, B); } } } else if (this->mode_ == "nearest") { NBLA_ERROR( error_code::not_implemented, "Backward wrt the grid is not supported in the nearest mode. " "Use the `linear` mode."); } } } } } }
the_stack
/** Fast integer multiplication */ #define FMUL(x,y) (__mul24(x,y)) //#define FMUL(x,y) ((x)*(y)) // X block count which will be processed by one thread block #define GPUJPEG_DCT_BLOCK_COUNT_X 4 // Y block count which will be processed by one thread block #define GPUJPEG_DCT_BLOCK_COUNT_Y 4 // Thread block width #define GPUJPEG_DCT_THREAD_BLOCK_WIDTH (GPUJPEG_BLOCK_SIZE * GPUJPEG_DCT_BLOCK_COUNT_X) // Thread block height #define GPUJPEG_DCT_THREAD_BLOCK_HEIGHT (GPUJPEG_BLOCK_SIZE * GPUJPEG_DCT_BLOCK_COUNT_Y) // Stride of shared memory buffer (short kernel) #define GPUJPEG_DCT_THREAD_BLOCK_STRIDE (GPUJPEG_DCT_THREAD_BLOCK_WIDTH + 4) #define IMAD(a, b, c) ( ((a) * (b)) + (c) ) #define IMUL(a, b) ((a) * (b)) #define SIN_1_4 0x5A82 #define COS_1_4 0x5A82 #define SIN_1_8 0x30FC #define COS_1_8 0x7642 #define OSIN_1_16 0x063E #define OSIN_3_16 0x11C7 #define OSIN_5_16 0x1A9B #define OSIN_7_16 0x1F63 #define OCOS_1_16 0x1F63 #define OCOS_3_16 0x1A9B #define OCOS_5_16 0x11C7 #define OCOS_7_16 0x063E /** * Package of 2 shorts into 1 int - designed to perform i/o by integers to avoid bank conflicts */ union PackedInteger { struct __align__(8) { int16_t hShort1; int16_t hShort2; }; int32_t hInt; }; /** * Converts fixed point value to short value */ __device__ inline int16_t unfixh(int x) { return (int16_t)((x + 0x8000) >> 16); } /** * Converts fixed point value to short value */ __device__ inline int unfixo(int x) { return (x + 0x1000) >> 13; } /** * Performs in-place IDCT of vector of 8 elements (used to access columns in shared memory). * * @param SrcDst [IN/OUT] - Pointer to the first element of vector * @param Stride [IN] - Value to add to ptr to access other elements * @return None */ __device__ void gpujpeg_idct_gpu_kernel_inplace(int16_t* SrcDst, int Stride) { int in0, in1, in2, in3, in4, in5, in6, in7; int tmp10, tmp11, tmp12, tmp13; int tmp20, tmp21, tmp22, tmp23; int tmp30, tmp31; int tmp40, tmp41, tmp42, tmp43; int tmp50, tmp51, tmp52, tmp53; int16_t *DstPtr = SrcDst; in0 = *DstPtr; DstPtr += Stride; in1 = *DstPtr; DstPtr += Stride; in2 = *DstPtr; DstPtr += Stride; in3 = *DstPtr; DstPtr += Stride; in4 = *DstPtr; DstPtr += Stride; in5 = *DstPtr; DstPtr += Stride; in6 = *DstPtr; DstPtr += Stride; in7 = *DstPtr; tmp10 = FMUL(in0 + in4, COS_1_4); tmp11 = FMUL(in0 - in4, COS_1_4); tmp12 = FMUL(in2, SIN_1_8) - FMUL(in6, COS_1_8); tmp13 = FMUL(in6, SIN_1_8) + FMUL(in2, COS_1_8); tmp20 = tmp10 + tmp13; tmp21 = tmp11 + tmp12; tmp22 = tmp11 - tmp12; tmp23 = tmp10 - tmp13; tmp30 = unfixo(FMUL(in3 + in5, COS_1_4)); tmp31 = unfixo(FMUL(in3 - in5, COS_1_4)); in1 <<= 2; in7 <<= 2; tmp40 = in1 + tmp30; tmp41 = in7 + tmp31; tmp42 = in1 - tmp30; tmp43 = in7 - tmp31; tmp50 = FMUL(tmp40, OCOS_1_16) + FMUL(tmp41, OSIN_1_16); tmp51 = FMUL(tmp40, OSIN_1_16) - FMUL(tmp41, OCOS_1_16); tmp52 = FMUL(tmp42, OCOS_5_16) + FMUL(tmp43, OSIN_5_16); tmp53 = FMUL(tmp42, OSIN_5_16) - FMUL(tmp43, OCOS_5_16); DstPtr = SrcDst; *DstPtr = unfixh(tmp20 + tmp50); DstPtr += Stride; *DstPtr = unfixh(tmp21 + tmp53); DstPtr += Stride; *DstPtr = unfixh(tmp22 + tmp52); DstPtr += Stride; *DstPtr = unfixh(tmp23 + tmp51); DstPtr += Stride; *DstPtr = unfixh(tmp23 - tmp51); DstPtr += Stride; *DstPtr = unfixh(tmp22 - tmp52); DstPtr += Stride; *DstPtr = unfixh(tmp21 - tmp53); DstPtr += Stride; *DstPtr = unfixh(tmp20 - tmp50); } /** * Performs in-place IDCT of vector of 8 elements (used to access rows in shared memory). * * @param V8 [IN/OUT] - Pointer to the first two elements of vector * @return None */ __device__ void gpujpeg_idct_gpu_kernel_inplace(uint32_t* V8) { int in0, in1, in2, in3, in4, in5, in6, in7; int tmp10, tmp11, tmp12, tmp13; int tmp20, tmp21, tmp22, tmp23; int tmp30, tmp31; int tmp40, tmp41, tmp42, tmp43; int tmp50, tmp51, tmp52, tmp53; PackedInteger sh0, sh1, sh2, sh3; sh0.hInt = V8[0]; sh1.hInt = V8[1]; sh2.hInt = V8[2]; sh3.hInt = V8[3]; in0 = sh0.hShort1; in1 = sh0.hShort2; in2 = sh1.hShort1; in3 = sh1.hShort2; in4 = sh2.hShort1; in5 = sh2.hShort2; in6 = sh3.hShort1; in7 = sh3.hShort2; tmp10 = FMUL(in0 + in4, COS_1_4); tmp11 = FMUL(in0 - in4, COS_1_4); tmp12 = FMUL(in2, SIN_1_8) - FMUL(in6, COS_1_8); tmp13 = FMUL(in6, SIN_1_8) + FMUL(in2, COS_1_8); tmp20 = tmp10 + tmp13; tmp21 = tmp11 + tmp12; tmp22 = tmp11 - tmp12; tmp23 = tmp10 - tmp13; tmp30 = unfixo(FMUL(in3 + in5, COS_1_4)); tmp31 = unfixo(FMUL(in3 - in5, COS_1_4)); in1 <<= 2; in7 <<= 2; tmp40 = in1 + tmp30; tmp41 = in7 + tmp31; tmp42 = in1 - tmp30; tmp43 = in7 - tmp31; tmp50 = FMUL(tmp40, OCOS_1_16) + FMUL(tmp41, OSIN_1_16); tmp51 = FMUL(tmp40, OSIN_1_16) - FMUL(tmp41, OCOS_1_16); tmp52 = FMUL(tmp42, OCOS_5_16) + FMUL(tmp43, OSIN_5_16); tmp53 = FMUL(tmp42, OSIN_5_16) - FMUL(tmp43, OCOS_5_16); sh0.hShort1 = unfixh(tmp20 + tmp50); sh0.hShort2 = unfixh(tmp21 + tmp53); sh1.hShort1 = unfixh(tmp22 + tmp52); sh1.hShort2 = unfixh(tmp23 + tmp51); sh2.hShort1 = unfixh(tmp23 - tmp51); sh2.hShort2 = unfixh(tmp22 - tmp52); sh3.hShort1 = unfixh(tmp21 - tmp53); sh3.hShort2 = unfixh(tmp20 - tmp50); V8[0] = sh0.hInt; V8[1] = sh1.hInt; V8[2] = sh2.hInt; V8[3] = sh3.hInt; } /** * 1D 8point DCT, with optional level shift (must be premultiplied). * Based on based on Arai, Agui, and Nakajima's DCT algorithm. (Trans. IEICE E-71(11):1095) * Implementation inspired by Independent JPEG Group JPEG implementation, file jfdctflt.c, * but optimized for CUDA (cheap floating point MAD instructions). */ template <typename T> __device__ static inline void gpujpeg_dct_gpu(const T in0, const T in1, const T in2, const T in3, const T in4, const T in5, const T in6, const T in7, T & out0, T & out1, T & out2, T & out3, T & out4, T & out5, T & out6, T & out7, const float level_shift_8 = 0.0f) { const float diff0 = in0 + in7; const float diff1 = in1 + in6; const float diff2 = in2 + in5; const float diff3 = in3 + in4; const float diff4 = in3 - in4; const float diff5 = in2 - in5; const float diff6 = in1 - in6; const float diff7 = in0 - in7; const float even0 = diff0 + diff3; const float even1 = diff1 + diff2; const float even2 = diff1 - diff2; const float even3 = diff0 - diff3; const float even_diff = even2 + even3; const float odd0 = diff4 + diff5; const float odd1 = diff5 + diff6; const float odd2 = diff6 + diff7; const float odd_diff5 = (odd0 - odd2) * 0.382683433f; const float odd_diff4 = 1.306562965f * odd2 + odd_diff5; const float odd_diff3 = diff7 - odd1 * 0.707106781f; const float odd_diff2 = 0.541196100f * odd0 + odd_diff5; const float odd_diff1 = diff7 + odd1 * 0.707106781f; out0 = even0 + even1 + level_shift_8; out1 = odd_diff1 + odd_diff4; out2 = even3 + even_diff * 0.707106781f; out3 = odd_diff3 - odd_diff2; out4 = even0 - even1; out5 = odd_diff3 + odd_diff2; out6 = even3 - even_diff * 0.707106781f; out7 = odd_diff1 - odd_diff4; } /** Constant memory copy of transposed quantization table pre-divided with DCT output weights. */ __constant__ float gpujpeg_dct_gpu_quantization_table_const[64]; /** * Performs 8x8 block-wise Forward Discrete Cosine Transform of the given * image plane and outputs result to the array of coefficients. Short implementation. * This kernel is designed to process image by blocks of blocks8x8 that * utilize maximum warps capacity, assuming that it is enough of 8 threads * per block8x8. * * @param source [IN] - Source coefficients * @param source_stride [IN] - Stride of source * @param output [OUT] - Source coefficients * @param output_stride [OUT] - Stride of source * @param quant_table [IN] - Quantization table, pre-divided with DCT output scales * @return None */ template <int WARP_COUNT> __global__ void gpujpeg_dct_gpu_kernel(int block_count_x, int block_count_y, uint8_t* source, const unsigned int source_stride, int16_t* output, int output_stride, const float * const quant_table) { // each warp processes 4 8x8 blocks (horizontally neighboring) const int block_idx_x = threadIdx.x >> 3; const int block_idx_y = threadIdx.y; // offset of threadblocks's blocks in the image (along both axes) const int block_offset_x = blockIdx.x * 4; const int block_offset_y = blockIdx.y * WARP_COUNT; // stop if thread's block is out of image const bool processing = block_offset_x + block_idx_x < block_count_x && block_offset_y + block_idx_y < block_count_y; if(!processing) { return; } // index of row/column processed by this thread within its 8x8 block const int dct_idx = threadIdx.x & 7; // data type of transformed coefficients typedef float dct_t; // dimensions of shared buffer (compile time constants) enum { // 4 8x8 blocks, padded to odd number of 4byte banks SHARED_STRIDE = ((32 * sizeof(dct_t)) | 4) / sizeof(dct_t), // number of shared buffer items needed for 1 warp SHARED_SIZE_WARP = SHARED_STRIDE * 8, // total number of items in shared buffer SHARED_SIZE_TOTAL = SHARED_SIZE_WARP * WARP_COUNT }; // buffer for transpositions of all blocks __shared__ dct_t s_transposition_all[SHARED_SIZE_TOTAL]; // pointer to begin of transposition buffer for thread's block dct_t * const s_transposition = s_transposition_all + block_idx_y * SHARED_SIZE_WARP + block_idx_x * 8; // input coefficients pointer (each thread loads 1 column of 8 coefficients from its 8x8 block) const int in_x = (block_offset_x + block_idx_x) * 8 + dct_idx; const int in_y = (block_offset_y + block_idx_y) * 8; const int in_offset = in_x + in_y * source_stride; const uint8_t * in = source + in_offset; // load all 8 coefficients of thread's column, but do NOT apply level shift now - will be applied as part of DCT dct_t src0 = *in; in += source_stride; dct_t src1 = *in; in += source_stride; dct_t src2 = *in; in += source_stride; dct_t src3 = *in; in += source_stride; dct_t src4 = *in; in += source_stride; dct_t src5 = *in; in += source_stride; dct_t src6 = *in; in += source_stride; dct_t src7 = *in; // destination pointer into shared transpose buffer (each thread saves one column) dct_t * const s_dest = s_transposition + dct_idx; // transform the column (vertically) and save it into the transpose buffer gpujpeg_dct_gpu(src0, src1, src2, src3, src4, src5, src6, src7, s_dest[SHARED_STRIDE * 0], s_dest[SHARED_STRIDE * 1], s_dest[SHARED_STRIDE * 2], s_dest[SHARED_STRIDE * 3], s_dest[SHARED_STRIDE * 4], s_dest[SHARED_STRIDE * 5], s_dest[SHARED_STRIDE * 6], s_dest[SHARED_STRIDE * 7], -1024.0f // = 8 * -128 ... level shift sum for all 8 coefficients ); // read coefficients back - each thread reads one row (no need to sync - only threads within same warp work on each block) // ... and transform the row horizontally volatile dct_t * s_src = s_transposition + SHARED_STRIDE * dct_idx; dct_t dct0, dct1, dct2, dct3, dct4, dct5, dct6, dct7; gpujpeg_dct_gpu(s_src[0], s_src[1], s_src[2], s_src[3], s_src[4], s_src[5], s_src[6], s_src[7], dct0, dct1, dct2, dct3, dct4, dct5, dct6, dct7); // apply quantization to the row of coefficients (quantization table is actually transposed in global memory for coalesced memory acceses) #if __CUDA_ARCH__ < 200 const float * const quantization_row = gpujpeg_dct_gpu_quantization_table_const + dct_idx; // Quantization table in constant memory for CCs < 2.0 #else const float * const quantization_row = quant_table + dct_idx; // Cached global memory reads for CCs >= 2.0 #endif const int out0 = rintf(dct0 * quantization_row[0 * 8]); const int out1 = rintf(dct1 * quantization_row[1 * 8]); const int out2 = rintf(dct2 * quantization_row[2 * 8]); const int out3 = rintf(dct3 * quantization_row[3 * 8]); const int out4 = rintf(dct4 * quantization_row[4 * 8]); const int out5 = rintf(dct5 * quantization_row[5 * 8]); const int out6 = rintf(dct6 * quantization_row[6 * 8]); const int out7 = rintf(dct7 * quantization_row[7 * 8]); // using single write, save output row packed into 16 bytes const int out_x = (block_offset_x + block_idx_x) * 64; // 64 coefficients per one transformed and quantized block const int out_y = (block_offset_y + block_idx_y) * output_stride; ((uint4*)(output + out_x + out_y))[dct_idx] = make_uint4( (out0 & 0xFFFF) + (out1 << 16), (out2 & 0xFFFF) + (out3 << 16), (out4 & 0xFFFF) + (out5 << 16), // ... & 0xFFFF keeps only lower 16 bits - useful for negative numbers, which have 1s in upper bits (out6 & 0xFFFF) + (out7 << 16) ); } /** Quantization table */ __constant__ uint16_t gpujpeg_idct_gpu_quantization_table[64]; /** * Performs 8x8 block-wise Inverse Discrete Cosine Transform of the given * image plane and outputs result to the array of coefficients. Short implementation. * This kernel is designed to process image by blocks of blocks8x8 that * utilize maximum warps capacity, assuming that it is enough of 8 threads * per block8x8. * * @param source [IN] - Source coefficients * @param source_stride [IN] - Stride of source * @param output [OUT] - Source coefficients * @param output_stride [OUT] - Stride of source * @param table [IN] - Quantization table * @return None */ __global__ void gpujpeg_idct_gpu_kernel(int block_count_x, int block_count_y, int16_t* source, int source_stride, uint8_t* output, int output_stride, uint16_t* quantization_table) { // For pre-fermi GPUs, quantization table in constant memory is faster #if __CUDA_ARCH__ < 200 quantization_table = gpujpeg_idct_gpu_quantization_table; #endif // Shared data __shared__ int16_t block[GPUJPEG_DCT_THREAD_BLOCK_HEIGHT * GPUJPEG_DCT_THREAD_BLOCK_STRIDE]; // Block position int block_x = IMAD(blockIdx.x, GPUJPEG_DCT_BLOCK_COUNT_X, threadIdx.y); int block_y = IMAD(blockIdx.y, GPUJPEG_DCT_BLOCK_COUNT_Y, threadIdx.z); // Thread position in thread block int thread_x = IMAD(threadIdx.y, GPUJPEG_BLOCK_SIZE, threadIdx.x); int thread_y = IMUL(threadIdx.z, GPUJPEG_BLOCK_SIZE); int thread_x_permutated = (thread_x & 0xFFFFFFE0) | (((thread_x << 1) | ((thread_x >> 4) & 0x1)) & 0x1F); // Determine position into shared buffer int16_t* block_ptr = block + IMAD(thread_y, GPUJPEG_DCT_THREAD_BLOCK_STRIDE, thread_x); // Determine position in source buffer and apply it int source_x = IMAD(block_x, GPUJPEG_BLOCK_SQUARED_SIZE, threadIdx.x * 2); int source_y = block_y; source += IMAD(source_y, source_stride, source_x); // Load data to shared memory, only half of threads in each cell performs data moving (each thread moves 2 shorts) if ( block_x < block_count_x && block_y < block_count_y ) { int16_t* block_load_ptr = block_ptr + threadIdx.x; // Shortcut for "IMAD(..., threadIdx.x * 2)" if ( threadIdx.x < (GPUJPEG_BLOCK_SIZE / 2) ) { #pragma unroll for(int i = 0; i < GPUJPEG_BLOCK_SIZE; i++) ((int*)block_load_ptr)[i * (GPUJPEG_DCT_THREAD_BLOCK_STRIDE / 2)] = ((int*)source)[i * (GPUJPEG_BLOCK_SIZE / 2)]; } } __syncthreads(); // Quantization for(int i = 0; i < GPUJPEG_BLOCK_SIZE; i++) { int16_t quantization = quantization_table[i * GPUJPEG_BLOCK_SIZE + threadIdx.x]; int16_t coefficient = block_ptr[i * GPUJPEG_DCT_THREAD_BLOCK_STRIDE]; coefficient = coefficient * quantization; block_ptr[i * GPUJPEG_DCT_THREAD_BLOCK_STRIDE] = coefficient; } // Perform IDCT __syncthreads(); gpujpeg_idct_gpu_kernel_inplace(block + thread_y * GPUJPEG_DCT_THREAD_BLOCK_STRIDE + thread_x_permutated, GPUJPEG_DCT_THREAD_BLOCK_STRIDE); __syncthreads(); gpujpeg_idct_gpu_kernel_inplace((uint32_t*)(block + (thread_y + threadIdx.x) * GPUJPEG_DCT_THREAD_BLOCK_STRIDE + threadIdx.y * GPUJPEG_BLOCK_SIZE)); __syncthreads(); // Determine position in output buffer and apply it int output_x = IMAD(blockIdx.x, GPUJPEG_DCT_THREAD_BLOCK_WIDTH, thread_x); int output_y = IMAD(blockIdx.y, GPUJPEG_DCT_THREAD_BLOCK_HEIGHT, thread_y); output += IMAD(output_y, output_stride, output_x); // For pre-fermi GPUs, storing to global memory by 4 bytes is faster #if __CUDA_ARCH__ < 200 __shared__ uint8_t block_byte[GPUJPEG_DCT_THREAD_BLOCK_HEIGHT * GPUJPEG_DCT_THREAD_BLOCK_STRIDE]; uint8_t* block_byte_ptr = block_byte + IMAD(thread_y, GPUJPEG_DCT_THREAD_BLOCK_STRIDE, thread_x); uint8_t* __output = output; int __output_stride = output_stride; output = block_byte_ptr; output_stride = GPUJPEG_DCT_THREAD_BLOCK_STRIDE; #endif // Store data to global memory if ( block_x < block_count_x && block_y < block_count_y ) { #pragma unroll for(int i = 0; i < GPUJPEG_BLOCK_SIZE; i++) { int16_t coefficient = block_ptr[i * GPUJPEG_DCT_THREAD_BLOCK_STRIDE]; coefficient += 128; if ( coefficient > 255 ) coefficient = 255; if ( coefficient < 0 ) coefficient = 0; output[i * output_stride] = (uint8_t)coefficient; } // For pre-fermi GPUs, storing to global memory by 4 bytes is faster #if __CUDA_ARCH__ < 200 if ( threadIdx.x % 4 == 0 ) { #pragma unroll for(int i = 0; i < GPUJPEG_BLOCK_SIZE; i++) ((uint32_t*)__output)[i * (__output_stride / 4)] = ((uint32_t*)block_byte_ptr)[i * (GPUJPEG_DCT_THREAD_BLOCK_STRIDE / 4)]; } #endif } } /** Documented at declaration */ int gpujpeg_dct_gpu(struct gpujpeg_encoder* encoder) { // Get coder struct gpujpeg_coder* coder = &encoder->coder; // Encode each component for ( int comp = 0; comp < coder->param_image.comp_count; comp++ ) { // Get component struct gpujpeg_component* component = &coder->component[comp]; // Get quantization table enum gpujpeg_component_type type = (comp == 0) ? GPUJPEG_COMPONENT_LUMINANCE : GPUJPEG_COMPONENT_CHROMINANCE; const float* const d_quantization_table = encoder->table_quantization[type].d_table_forward; // copy the quantization table into constant memory for devices of CC < 2.0 if( encoder->coder.cuda_cc_major < 2 ) { cudaMemcpyToSymbol( gpujpeg_dct_gpu_quantization_table_const, d_quantization_table, sizeof(gpujpeg_dct_gpu_quantization_table_const), 0, cudaMemcpyDeviceToDevice ); cudaThreadSynchronize(); gpujpeg_cuda_check_error("Quantization table memcpy failed", return -1); } int roi_width = component->data_width; int roi_height = component->data_height; assert(GPUJPEG_BLOCK_SIZE == 8); int block_count_x = roi_width / GPUJPEG_BLOCK_SIZE; int block_count_y = roi_height / GPUJPEG_BLOCK_SIZE; enum { WARP_COUNT = 4 }; // Perform block-wise DCT processing dim3 dct_grid( gpujpeg_div_and_round_up(block_count_x, 4), gpujpeg_div_and_round_up(block_count_y, WARP_COUNT), 1 ); dim3 dct_block(4 * 8, WARP_COUNT); gpujpeg_dct_gpu_kernel<WARP_COUNT><<<dct_grid, dct_block>>>( block_count_x, block_count_y, component->d_data, component->data_width, component->d_data_quantized, component->data_width * GPUJPEG_BLOCK_SIZE, d_quantization_table ); cudaThreadSynchronize(); gpujpeg_cuda_check_error("Forward DCT failed", return -1); } return 0; } /** Documented at declaration */ int gpujpeg_idct_gpu(struct gpujpeg_decoder* decoder) { // Get coder struct gpujpeg_coder* coder = &decoder->coder; // Encode each component for ( int comp = 0; comp < coder->param_image.comp_count; comp++ ) { // Get component struct gpujpeg_component* component = &coder->component[comp]; // Determine table type enum gpujpeg_component_type type = (comp == 0) ? GPUJPEG_COMPONENT_LUMINANCE : GPUJPEG_COMPONENT_CHROMINANCE; int roi_width = component->data_width; int roi_height = component->data_height; assert(GPUJPEG_BLOCK_SIZE == 8); int block_count_x = roi_width / GPUJPEG_BLOCK_SIZE; int block_count_y = roi_height / GPUJPEG_BLOCK_SIZE; // Get quantization table uint16_t* d_quantization_table = decoder->table_quantization[type].d_table; // Copy quantization table to constant memory cudaMemcpyToSymbol( gpujpeg_idct_gpu_quantization_table, d_quantization_table, 64 * sizeof(uint16_t), 0, cudaMemcpyDeviceToDevice ); gpujpeg_cuda_check_error("Copy IDCT quantization table to constant memory", return -1); // Perform block-wise IDCT processing dim3 dct_grid( gpujpeg_div_and_round_up(block_count_x, GPUJPEG_DCT_BLOCK_COUNT_X), gpujpeg_div_and_round_up(block_count_y, GPUJPEG_DCT_BLOCK_COUNT_Y), 1 ); dim3 dct_block( GPUJPEG_BLOCK_SIZE, GPUJPEG_DCT_BLOCK_COUNT_X, GPUJPEG_DCT_BLOCK_COUNT_Y ); gpujpeg_idct_gpu_kernel<<<dct_grid, dct_block>>>( block_count_x, block_count_y, component->d_data_quantized, component->data_width * GPUJPEG_BLOCK_SIZE, component->d_data, component->data_width, d_quantization_table ); cudaThreadSynchronize(); gpujpeg_cuda_check_error("Inverse Integer DCT failed", return -1); } return 0; }
the_stack
#include <algorithm> namespace faiss { namespace gpu { // // IVF list metadata aupdate // // Updates the device-size array of list start pointers for codes and indices __global__ void runUpdateListPointers( Tensor<int, 1, true> listIds, Tensor<int, 1, true> newListLength, Tensor<void*, 1, true> newCodePointers, Tensor<void*, 1, true> newIndexPointers, int* listLengths, void** listCodes, void** listIndices) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < listIds.getSize(0)) { int listId = listIds[i]; listLengths[listId] = newListLength[i]; listCodes[listId] = newCodePointers[i]; listIndices[listId] = newIndexPointers[i]; } } void runUpdateListPointers( Tensor<int, 1, true>& listIds, Tensor<int, 1, true>& newListLength, Tensor<void*, 1, true>& newCodePointers, Tensor<void*, 1, true>& newIndexPointers, thrust::device_vector<int>& listLengths, thrust::device_vector<void*>& listCodes, thrust::device_vector<void*>& listIndices, cudaStream_t stream) { int numThreads = std::min(listIds.getSize(0), getMaxThreadsCurrentDevice()); int numBlocks = utils::divUp(listIds.getSize(0), numThreads); dim3 grid(numBlocks); dim3 block(numThreads); runUpdateListPointers<<<grid, block, 0, stream>>>( listIds, newListLength, newCodePointers, newIndexPointers, listLengths.data().get(), listCodes.data().get(), listIndices.data().get()); CUDA_TEST_ERROR(); } // Appends new indices for vectors being added to the IVF indices lists __global__ void ivfIndicesAppend( Tensor<int, 1, true> listIds, Tensor<int, 1, true> listOffset, Tensor<Index::idx_t, 1, true> indices, IndicesOptions opt, void** listIndices) { int vec = blockIdx.x * blockDim.x + threadIdx.x; if (vec >= listIds.getSize(0)) { return; } int listId = listIds[vec]; int offset = listOffset[vec]; // Add vector could be invalid (contains NaNs etc) if (listId == -1 || offset == -1) { return; } auto index = indices[vec]; if (opt == INDICES_32_BIT) { // FIXME: there could be overflow here, but where should we check this? ((int*)listIndices[listId])[offset] = (int)index; } else if (opt == INDICES_64_BIT) { ((Index::idx_t*)listIndices[listId])[offset] = index; } } void runIVFIndicesAppend( Tensor<int, 1, true>& listIds, Tensor<int, 1, true>& listOffset, Tensor<Index::idx_t, 1, true>& indices, IndicesOptions opt, thrust::device_vector<void*>& listIndices, cudaStream_t stream) { FAISS_ASSERT( opt == INDICES_CPU || opt == INDICES_IVF || opt == INDICES_32_BIT || opt == INDICES_64_BIT); if (opt != INDICES_CPU && opt != INDICES_IVF) { int num = listIds.getSize(0); int threads = std::min(num, getMaxThreadsCurrentDevice()); int blocks = utils::divUp(num, threads); ivfIndicesAppend<<<blocks, threads, 0, stream>>>( listIds, listOffset, indices, opt, listIndices.data().get()); CUDA_TEST_ERROR(); } } // // IVF non-interleaved append // template <typename Codec> __global__ void ivfFlatAppend( Tensor<int, 1, true> listIds, Tensor<int, 1, true> listOffset, Tensor<float, 2, true> vecs, void** listData, Codec codec) { int vec = blockIdx.x; int listId = listIds[vec]; int offset = listOffset[vec]; // Add vector could be invalid (contains NaNs etc) if (listId == -1 || offset == -1) { return; } // Handle whole encoding (only thread 0 will handle the remainder) int limit = utils::divDown(vecs.getSize(1), Codec::kDimPerIter); int i; for (i = threadIdx.x; i < limit; i += blockDim.x) { int realDim = i * Codec::kDimPerIter; float toEncode[Codec::kDimPerIter]; #pragma unroll for (int j = 0; j < Codec::kDimPerIter; ++j) { toEncode[j] = vecs[vec][realDim + j]; } codec.encode(listData[listId], offset, i, toEncode); } // Handle remainder with a single thread, if any if (Codec::kDimPerIter > 1) { int realDim = limit * Codec::kDimPerIter; // Was there any remainder? if (realDim < vecs.getSize(1)) { if (threadIdx.x == 0) { float toEncode[Codec::kDimPerIter]; // How many remaining that we need to encode int remaining = vecs.getSize(1) - realDim; #pragma unroll for (int j = 0; j < Codec::kDimPerIter; ++j) { int idx = realDim + j; toEncode[j] = idx < vecs.getSize(1) ? vecs[vec][idx] : 0.0f; } codec.encodePartial( listData[listId], offset, i, remaining, toEncode); } } } } void runIVFFlatAppend( Tensor<int, 1, true>& listIds, Tensor<int, 1, true>& listOffset, Tensor<float, 2, true>& vecs, GpuScalarQuantizer* scalarQ, thrust::device_vector<void*>& listData, cudaStream_t stream) { int dim = vecs.getSize(1); int maxThreads = getMaxThreadsCurrentDevice(); // Each block will handle appending a single vector #define RUN_APPEND \ do { \ dim3 grid(vecs.getSize(0)); \ dim3 block(std::min(dim / codec.kDimPerIter, maxThreads)); \ ivfFlatAppend<<<grid, block, 0, stream>>>( \ listIds, listOffset, vecs, listData.data().get(), codec); \ } while (0) if (!scalarQ) { CodecFloat codec(dim * sizeof(float)); RUN_APPEND; } else { switch (scalarQ->qtype) { case ScalarQuantizer::QuantizerType::QT_8bit: { Codec<ScalarQuantizer::QuantizerType::QT_8bit, 1> codec( scalarQ->code_size, scalarQ->gpuTrained.data(), scalarQ->gpuTrained.data() + dim); RUN_APPEND; } break; case ScalarQuantizer::QuantizerType::QT_8bit_uniform: { Codec<ScalarQuantizer::QuantizerType::QT_8bit_uniform, 1> codec( scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]); RUN_APPEND; } break; case ScalarQuantizer::QuantizerType::QT_fp16: { Codec<ScalarQuantizer::QuantizerType::QT_fp16, 1> codec( scalarQ->code_size); RUN_APPEND; } break; case ScalarQuantizer::QuantizerType::QT_8bit_direct: { Codec<ScalarQuantizer::QuantizerType::QT_8bit_direct, 1> codec( scalarQ->code_size); RUN_APPEND; } break; case ScalarQuantizer::QuantizerType::QT_4bit: { Codec<ScalarQuantizer::QuantizerType::QT_4bit, 1> codec( scalarQ->code_size, scalarQ->gpuTrained.data(), scalarQ->gpuTrained.data() + dim); RUN_APPEND; } break; case ScalarQuantizer::QuantizerType::QT_4bit_uniform: { Codec<ScalarQuantizer::QuantizerType::QT_4bit_uniform, 1> codec( scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]); RUN_APPEND; } break; default: // unimplemented, should be handled at a higher level FAISS_ASSERT(false); } } CUDA_TEST_ERROR(); #undef RUN_APPEND } __global__ void ivfpqAppend( Tensor<int, 1, true> listIds, Tensor<int, 1, true> listOffset, Tensor<uint8_t, 2, true> encodings, void** listCodes) { int encodingToAdd = blockIdx.x * blockDim.x + threadIdx.x; if (encodingToAdd >= listIds.getSize(0)) { return; } int listId = listIds[encodingToAdd]; int vectorNumInList = listOffset[encodingToAdd]; // Add vector could be invalid (contains NaNs etc) if (listId == -1 || vectorNumInList == -1) { return; } auto encoding = encodings[encodingToAdd]; // Layout with dimensions innermost uint8_t* codeStart = ((uint8_t*)listCodes[listId]) + vectorNumInList * encodings.getSize(1); // FIXME: stride with threads instead of single thread for (int i = 0; i < encodings.getSize(1); ++i) { codeStart[i] = encoding[i]; } } void runIVFPQAppend( Tensor<int, 1, true>& listIds, Tensor<int, 1, true>& listOffset, Tensor<uint8_t, 2, true>& encodings, thrust::device_vector<void*>& listCodes, cudaStream_t stream) { int threads = std::min(listIds.getSize(0), getMaxThreadsCurrentDevice()); int blocks = utils::divUp(listIds.getSize(0), threads); ivfpqAppend<<<threads, blocks, 0, stream>>>( listIds, listOffset, encodings, listCodes.data().get()); CUDA_TEST_ERROR(); } // // IVF interleaved append // // Scalar encode a vector to Codec::EncodeT word-sized values; previously this // was fused into a single append kernel but was refactored so that Flat, SQ and // PQ all use the same arbitrary bitwidth append kernel template <typename Codec> __global__ void sqEncode( Tensor<float, 2, true> vecs, Tensor<typename Codec::EncodeT, 2, true> encodedVecs, Codec codec) { int vec = blockIdx.x; for (int d = threadIdx.x; d < vecs.getSize(1); d += blockDim.x) { encodedVecs[vec][d] = codec.encodeNew(d, vecs[vec][d]); } } template <typename Codec> void runSQEncode( Tensor<float, 2, true>& vecs, Tensor<typename Codec::EncodeT, 2, true>& encodedVecs, Codec codec, cudaStream_t stream) { int threads = std::min(vecs.getSize(1), getMaxThreadsCurrentDevice()); int blocks = vecs.getSize(0); sqEncode<<<blocks, threads, 0, stream>>>(vecs, encodedVecs, codec); } // Handles appending encoded vectors (one per EncodeT word) packed into // EncodeBits interleaved by 32 vectors. // This is used by Flat, SQ and PQ code for the interleaved format. template <typename EncodeT, int EncodeBits> __global__ void ivfInterleavedAppend( // the IDs (offset in listData) of the unique lists // being added to Tensor<int, 1, true> uniqueLists, // For each of the list IDs in uniqueLists, the start // offset in vectorsByUniqueList for the vectors that // we are adding to that list Tensor<int, 1, true> uniqueListVectorStart, // IDs in vecs of the vectors being added to each // unique list // The vectors (offset in vecs) added to // uniqueLists[i] is: // {vBUL[uLVS[i]], ..., vBUL[uLVS[i+1] - 1]} Tensor<int, 1, true> vectorsByUniqueList, // For each of the list IDs in uniqueLists, the start // offset (by vector) within that list where we begin // appending Tensor<int, 1, true> uniqueListStartOffset, // The EncodeT-sized encoded vectors Tensor<EncodeT, 2, true> encodedVecs, // The set of addresses for each of the lists void** listData) { // FIXME: some issue with getLaneId() and CUDA 10.1 and P4 GPUs? int laneId = threadIdx.x % kWarpSize; int warpId = threadIdx.x / kWarpSize; int warpsPerBlock = blockDim.x / kWarpSize; // Each block is dedicated to a separate list int listId = uniqueLists[blockIdx.x]; // The vecs we add to the list are at indices [vBUL[vecIdStart], // vBUL[vecIdEnd]) int vecIdStart = uniqueListVectorStart[blockIdx.x]; // uLVS is explicitly terminated for us with one more than the number of // blocks that we have int vecIdEnd = uniqueListVectorStart[blockIdx.x + 1]; // How many vectors we are adding to this list int numVecsAdding = vecIdEnd - vecIdStart; // The first vector we are updating within the list auto listVecStart = uniqueListStartOffset[blockIdx.x]; // These are the actual vec IDs that we are adding (in vecs) int* listVecIds = vectorsByUniqueList[vecIdStart].data(); // All data is written by groups of 32 vectors (to mirror the warp). // listVecStart could be in the middle of this, or even, for sub-byte // encodings, mean that the first vector piece of data that we need to // update is in the high part of a byte. // // WarpPackedBits allows writing of arbitrary bit packed data in groups of // 32, but we ensure that it only operates on the group of 32 vectors. In // order to do this we need to actually start updating vectors at the next // lower multiple of 32 from listVecStart. int alignedListVecStart = utils::roundDown(listVecStart, 32); // Each block of 32 vectors fully encodes into this many bytes constexpr int bytesPerVectorBlockDim = EncodeBits * 32 / 8; constexpr int wordsPerVectorBlockDim = bytesPerVectorBlockDim / sizeof(EncodeT); int wordsPerVectorBlock = wordsPerVectorBlockDim * encodedVecs.getSize(1); EncodeT* listStart = ((EncodeT*)listData[listId]); // Each warp within the block handles a different chunk of 32 int warpVec = alignedListVecStart + warpId * 32; // The warp data starts here EncodeT* warpData = listStart + (warpVec / 32) * wordsPerVectorBlock; // Each warp encodes a single block for (; warpVec < listVecStart + numVecsAdding; // but block stride warpVec += blockDim.x, // the new warp data base strides by how many vector blocks we are // encoding, which is one per warp warpData += warpsPerBlock * wordsPerVectorBlock) { // This lane is adding this vec (if it is within bounds) int laneVec = warpVec + laneId; // Which vector does this correspond to in the set of vectors that we // need to add? If this is < 0, then this particular thread is not // encoding / appending a new vector int laneVecAdding = laneVec - listVecStart; // We are actually adding a new vector if this is within range bool valid = laneVecAdding >= 0 && laneVecAdding < numVecsAdding; // Now, which actual vector in vecs is this? int vecId = valid ? listVecIds[laneVecAdding] : 0; // Each warp that has some vector data available needs to write out the // vector components EncodeT* data = warpData; for (int dim = 0; dim < encodedVecs.getSize(1); ++dim) { EncodeT enc = valid ? encodedVecs[vecId][dim] : (EncodeT)0; WarpPackedBits<EncodeT, EncodeBits>::write( laneId, enc, valid, data); data += wordsPerVectorBlockDim; } } } void runIVFFlatInterleavedAppend( Tensor<int, 1, true>& listIds, Tensor<int, 1, true>& listOffset, Tensor<int, 1, true>& uniqueLists, Tensor<int, 1, true>& vectorsByUniqueList, Tensor<int, 1, true>& uniqueListVectorStart, Tensor<int, 1, true>& uniqueListStartOffset, Tensor<float, 2, true>& vecs, GpuScalarQuantizer* scalarQ, thrust::device_vector<void*>& listData, GpuResources* res, cudaStream_t stream) { int dim = vecs.getSize(1); #define RUN_APPEND(ENCODE_T, ENCODE_BITS, DATA) \ do { \ dim3 grid(uniqueLists.getSize(0)); \ dim3 block(128); \ ivfInterleavedAppend<ENCODE_T, ENCODE_BITS> \ <<<grid, block, 0, stream>>>( \ uniqueLists, \ uniqueListVectorStart, \ vectorsByUniqueList, \ uniqueListStartOffset, \ DATA, \ listData.data().get()); \ } while (0) if (!scalarQ) { // No encoding is needed, we just append directly RUN_APPEND(float, 32, vecs); return; } // only implemented at the moment FAISS_ASSERT(scalarQ->bits == 16 || scalarQ->bits <= 8); if (scalarQ->bits == 16) { FAISS_ASSERT(scalarQ->qtype == ScalarQuantizer::QuantizerType::QT_fp16); using CodecT = Codec<ScalarQuantizer::QuantizerType::QT_fp16, 1>; CodecT codec(scalarQ->qtype); DeviceTensor<half, 2, true> encodedVecs( res, makeTempAlloc(AllocType::Other, stream), {vecs.getSize(0), vecs.getSize(1)}); runSQEncode(vecs, encodedVecs, codec, stream); RUN_APPEND(CodecT::EncodeT, CodecT::kEncodeBits, encodedVecs); } else if (scalarQ->bits <= 8) { DeviceTensor<uint8_t, 2, true> encodedVecs( res, makeTempAlloc(AllocType::Other, stream), {vecs.getSize(0), vecs.getSize(1)}); switch (scalarQ->qtype) { case ScalarQuantizer::QuantizerType::QT_8bit: { using CodecT = Codec<ScalarQuantizer::QuantizerType::QT_8bit, 1>; CodecT codec( scalarQ->code_size, scalarQ->gpuTrained.data(), scalarQ->gpuTrained.data() + dim); runSQEncode(vecs, encodedVecs, codec, stream); RUN_APPEND(CodecT::EncodeT, CodecT::kEncodeBits, encodedVecs); } break; case ScalarQuantizer::QuantizerType::QT_8bit_uniform: { using CodecT = Codec<ScalarQuantizer::QuantizerType::QT_8bit_uniform, 1>; CodecT codec( scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]); runSQEncode(vecs, encodedVecs, codec, stream); RUN_APPEND(CodecT::EncodeT, CodecT::kEncodeBits, encodedVecs); } break; case ScalarQuantizer::QuantizerType::QT_8bit_direct: { using CodecT = Codec<ScalarQuantizer::QuantizerType::QT_8bit_direct, 1>; CodecT codec(scalarQ->code_size); runSQEncode(vecs, encodedVecs, codec, stream); RUN_APPEND(CodecT::EncodeT, CodecT::kEncodeBits, encodedVecs); } break; case ScalarQuantizer::QuantizerType::QT_6bit: { using CodecT = Codec<ScalarQuantizer::QuantizerType::QT_6bit, 1>; CodecT codec( scalarQ->code_size, scalarQ->gpuTrained.data(), scalarQ->gpuTrained.data() + dim); runSQEncode(vecs, encodedVecs, codec, stream); RUN_APPEND(CodecT::EncodeT, CodecT::kEncodeBits, encodedVecs); } break; case ScalarQuantizer::QuantizerType::QT_4bit: { using CodecT = Codec<ScalarQuantizer::QuantizerType::QT_4bit, 1>; CodecT codec( scalarQ->code_size, scalarQ->gpuTrained.data(), scalarQ->gpuTrained.data() + dim); runSQEncode(vecs, encodedVecs, codec, stream); RUN_APPEND(CodecT::EncodeT, CodecT::kEncodeBits, encodedVecs); } break; case ScalarQuantizer::QuantizerType::QT_4bit_uniform: { using CodecT = Codec<ScalarQuantizer::QuantizerType::QT_4bit_uniform, 1>; CodecT codec( scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]); runSQEncode(vecs, encodedVecs, codec, stream); RUN_APPEND(CodecT::EncodeT, CodecT::kEncodeBits, encodedVecs); } break; default: // unimplemented, should be handled at a higher level FAISS_ASSERT(false); } } #undef RUN_APPEND CUDA_TEST_ERROR(); } void runIVFPQInterleavedAppend( Tensor<int, 1, true>& listIds, Tensor<int, 1, true>& listOffset, Tensor<int, 1, true>& uniqueLists, Tensor<int, 1, true>& vectorsByUniqueList, Tensor<int, 1, true>& uniqueListVectorStart, Tensor<int, 1, true>& uniqueListStartOffset, int bitsPerCode, Tensor<uint8_t, 2, true>& encodings, thrust::device_vector<void*>& listCodes, cudaStream_t stream) { // limitation for now FAISS_ASSERT(bitsPerCode <= 8); #define RUN_APPEND(ENCODE_T, ENCODE_BITS) \ do { \ dim3 grid(uniqueLists.getSize(0)); \ dim3 block(128); \ \ ivfInterleavedAppend<ENCODE_T, ENCODE_BITS> \ <<<grid, block, 0, stream>>>( \ uniqueLists, \ uniqueListVectorStart, \ vectorsByUniqueList, \ uniqueListStartOffset, \ encodings, \ listCodes.data().get()); \ } while (0) switch (bitsPerCode) { case 4: { RUN_APPEND(uint8_t, 4); break; } case 5: { RUN_APPEND(uint8_t, 5); break; } case 6: { RUN_APPEND(uint8_t, 6); break; } case 8: { RUN_APPEND(uint8_t, 8); break; } default: // unhandled FAISS_ASSERT(false); break; } #undef RUN_APPEND CUDA_TEST_ERROR(); } } // namespace gpu } // namespace faiss
the_stack
* \file * AgentRadixSortDownsweep implements a stateful abstraction of CUDA thread blocks for participating in device-wide radix sort downsweep . */ #pragma once #include <stdint.h> #include "../thread/thread_load.cuh" #include "../block/block_load.cuh" #include "../block/block_store.cuh" #include "../block/block_radix_rank.cuh" #include "../block/block_exchange.cuh" #include "../util_type.cuh" #include "../iterator/cache_modified_input_iterator.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Tuning policy types ******************************************************************************/ /** * Radix ranking algorithm */ enum RadixRankAlgorithm { RADIX_RANK_BASIC, RADIX_RANK_MEMOIZE, RADIX_RANK_MATCH }; /** * Parameterizable tuning policy type for AgentRadixSortDownsweep */ template < int _BLOCK_THREADS, ///< Threads per thread block int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading keys (and values) RadixRankAlgorithm _RANK_ALGORITHM, ///< The radix ranking algorithm to use BlockScanAlgorithm _SCAN_ALGORITHM, ///< The block scan algorithm to use int _RADIX_BITS> ///< The number of radix bits, i.e., log2(bins) struct AgentRadixSortDownsweepPolicy { enum { BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) RADIX_BITS = _RADIX_BITS, ///< The number of radix bits, i.e., log2(bins) }; static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading keys (and values) static const RadixRankAlgorithm RANK_ALGORITHM = _RANK_ALGORITHM; ///< The radix ranking algorithm to use static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use }; /****************************************************************************** * Thread block abstractions ******************************************************************************/ /** * \brief AgentRadixSortDownsweep implements a stateful abstraction of CUDA thread blocks for participating in device-wide radix sort downsweep . */ template < typename AgentRadixSortDownsweepPolicy, ///< Parameterized AgentRadixSortDownsweepPolicy tuning policy type bool IS_DESCENDING, ///< Whether or not the sorted-order is high-to-low typename KeyT, ///< KeyT type typename ValueT, ///< ValueT type typename OffsetT> ///< Signed integer type for global offsets struct AgentRadixSortDownsweep { //--------------------------------------------------------------------- // Type definitions and constants //--------------------------------------------------------------------- // Appropriate unsigned-bits representation of KeyT typedef typename Traits<KeyT>::UnsignedBits UnsignedBits; static const UnsignedBits LOWEST_KEY = Traits<KeyT>::LOWEST_KEY; static const UnsignedBits MAX_KEY = Traits<KeyT>::MAX_KEY; static const BlockLoadAlgorithm LOAD_ALGORITHM = AgentRadixSortDownsweepPolicy::LOAD_ALGORITHM; static const CacheLoadModifier LOAD_MODIFIER = AgentRadixSortDownsweepPolicy::LOAD_MODIFIER; static const RadixRankAlgorithm RANK_ALGORITHM = AgentRadixSortDownsweepPolicy::RANK_ALGORITHM; static const BlockScanAlgorithm SCAN_ALGORITHM = AgentRadixSortDownsweepPolicy::SCAN_ALGORITHM; enum { BLOCK_THREADS = AgentRadixSortDownsweepPolicy::BLOCK_THREADS, ITEMS_PER_THREAD = AgentRadixSortDownsweepPolicy::ITEMS_PER_THREAD, RADIX_BITS = AgentRadixSortDownsweepPolicy::RADIX_BITS, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, RADIX_DIGITS = 1 << RADIX_BITS, KEYS_ONLY = Equals<ValueT, NullType>::VALUE, }; // Input iterator wrapper type (for applying cache modifier)s typedef CacheModifiedInputIterator<LOAD_MODIFIER, UnsignedBits, OffsetT> KeysItr; typedef CacheModifiedInputIterator<LOAD_MODIFIER, ValueT, OffsetT> ValuesItr; // Radix ranking type to use typedef typename If<(RANK_ALGORITHM == RADIX_RANK_BASIC), BlockRadixRank<BLOCK_THREADS, RADIX_BITS, IS_DESCENDING, false, SCAN_ALGORITHM>, typename If<(RANK_ALGORITHM == RADIX_RANK_MEMOIZE), BlockRadixRank<BLOCK_THREADS, RADIX_BITS, IS_DESCENDING, true, SCAN_ALGORITHM>, BlockRadixRankMatch<BLOCK_THREADS, RADIX_BITS, IS_DESCENDING, SCAN_ALGORITHM> >::Type >::Type BlockRadixRankT; enum { /// Number of bin-starting offsets tracked per thread BINS_TRACKED_PER_THREAD = BlockRadixRankT::BINS_TRACKED_PER_THREAD }; // BlockLoad type (keys) typedef BlockLoad< UnsignedBits, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM> BlockLoadKeysT; // BlockLoad type (values) typedef BlockLoad< ValueT, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM> BlockLoadValuesT; // Value exchange array type typedef ValueT ValueExchangeT[TILE_ITEMS]; /** * Shared memory storage layout */ union __align__(16) _TempStorage { typename BlockLoadKeysT::TempStorage load_keys; typename BlockLoadValuesT::TempStorage load_values; typename BlockRadixRankT::TempStorage radix_rank; struct { UnsignedBits exchange_keys[TILE_ITEMS]; OffsetT relative_bin_offsets[RADIX_DIGITS]; }; Uninitialized<ValueExchangeT> exchange_values; OffsetT exclusive_digit_prefix[RADIX_DIGITS]; }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; //--------------------------------------------------------------------- // Thread fields //--------------------------------------------------------------------- // Shared storage for this CTA _TempStorage &temp_storage; // Input and output device pointers KeysItr d_keys_in; ValuesItr d_values_in; UnsignedBits *d_keys_out; ValueT *d_values_out; // The global scatter base offset for each digit (valid in the first RADIX_DIGITS threads) OffsetT bin_offset[BINS_TRACKED_PER_THREAD]; // The least-significant bit position of the current digit to extract int current_bit; // Number of bits in current digit int num_bits; // Whether to short-cirucit int short_circuit; //--------------------------------------------------------------------- // Utility methods //--------------------------------------------------------------------- /** * Scatter ranked keys through shared memory, then to device-accessible memory */ template <bool FULL_TILE> __device__ __forceinline__ void ScatterKeys( UnsignedBits (&twiddled_keys)[ITEMS_PER_THREAD], OffsetT (&relative_bin_offsets)[ITEMS_PER_THREAD], int (&ranks)[ITEMS_PER_THREAD], OffsetT valid_items) { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { temp_storage.exchange_keys[ranks[ITEM]] = twiddled_keys[ITEM]; } CTA_SYNC(); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { UnsignedBits key = temp_storage.exchange_keys[threadIdx.x + (ITEM * BLOCK_THREADS)]; UnsignedBits digit = BFE(key, current_bit, num_bits); relative_bin_offsets[ITEM] = temp_storage.relative_bin_offsets[digit]; // Un-twiddle key = Traits<KeyT>::TwiddleOut(key); if (FULL_TILE || (static_cast<OffsetT>(threadIdx.x + (ITEM * BLOCK_THREADS)) < valid_items)) { d_keys_out[relative_bin_offsets[ITEM] + threadIdx.x + (ITEM * BLOCK_THREADS)] = key; } } } /** * Scatter ranked values through shared memory, then to device-accessible memory */ template <bool FULL_TILE> __device__ __forceinline__ void ScatterValues( ValueT (&values)[ITEMS_PER_THREAD], OffsetT (&relative_bin_offsets)[ITEMS_PER_THREAD], int (&ranks)[ITEMS_PER_THREAD], OffsetT valid_items) { CTA_SYNC(); ValueExchangeT &exchange_values = temp_storage.exchange_values.Alias(); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { exchange_values[ranks[ITEM]] = values[ITEM]; } CTA_SYNC(); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { ValueT value = exchange_values[threadIdx.x + (ITEM * BLOCK_THREADS)]; if (FULL_TILE || (static_cast<OffsetT>(threadIdx.x + (ITEM * BLOCK_THREADS)) < valid_items)) { d_values_out[relative_bin_offsets[ITEM] + threadIdx.x + (ITEM * BLOCK_THREADS)] = value; } } } /** * Load a tile of keys (specialized for full tile, any ranking algorithm) */ template <int _RANK_ALGORITHM> __device__ __forceinline__ void LoadKeys( UnsignedBits (&keys)[ITEMS_PER_THREAD], OffsetT block_offset, OffsetT valid_items, UnsignedBits oob_item, Int2Type<true> is_full_tile, Int2Type<_RANK_ALGORITHM> rank_algorithm) { BlockLoadKeysT(temp_storage.load_keys).Load( d_keys_in + block_offset, keys); CTA_SYNC(); } /** * Load a tile of keys (specialized for partial tile, any ranking algorithm) */ template <int _RANK_ALGORITHM> __device__ __forceinline__ void LoadKeys( UnsignedBits (&keys)[ITEMS_PER_THREAD], OffsetT block_offset, OffsetT valid_items, UnsignedBits oob_item, Int2Type<false> is_full_tile, Int2Type<_RANK_ALGORITHM> rank_algorithm) { // Register pressure work-around: moving valid_items through shfl prevents compiler // from reusing guards/addressing from prior guarded loads valid_items = ShuffleIndex(valid_items, 0, CUB_PTX_WARP_THREADS, 0xffffffff); BlockLoadKeysT(temp_storage.load_keys).Load( d_keys_in + block_offset, keys, valid_items, oob_item); CTA_SYNC(); } /** * Load a tile of keys (specialized for full tile, match ranking algorithm) */ __device__ __forceinline__ void LoadKeys( UnsignedBits (&keys)[ITEMS_PER_THREAD], OffsetT block_offset, OffsetT valid_items, UnsignedBits oob_item, Int2Type<true> is_full_tile, Int2Type<RADIX_RANK_MATCH> rank_algorithm) { LoadDirectWarpStriped(threadIdx.x, d_keys_in + block_offset, keys); } /** * Load a tile of keys (specialized for partial tile, match ranking algorithm) */ __device__ __forceinline__ void LoadKeys( UnsignedBits (&keys)[ITEMS_PER_THREAD], OffsetT block_offset, OffsetT valid_items, UnsignedBits oob_item, Int2Type<false> is_full_tile, Int2Type<RADIX_RANK_MATCH> rank_algorithm) { // Register pressure work-around: moving valid_items through shfl prevents compiler // from reusing guards/addressing from prior guarded loads valid_items = ShuffleIndex(valid_items, 0, CUB_PTX_WARP_THREADS, 0xffffffff); LoadDirectWarpStriped(threadIdx.x, d_keys_in + block_offset, keys, valid_items, oob_item); } /** * Load a tile of values (specialized for full tile, any ranking algorithm) */ template <int _RANK_ALGORITHM> __device__ __forceinline__ void LoadValues( ValueT (&values)[ITEMS_PER_THREAD], OffsetT block_offset, OffsetT valid_items, Int2Type<true> is_full_tile, Int2Type<_RANK_ALGORITHM> rank_algorithm) { BlockLoadValuesT(temp_storage.load_values).Load( d_values_in + block_offset, values); CTA_SYNC(); } /** * Load a tile of values (specialized for partial tile, any ranking algorithm) */ template <int _RANK_ALGORITHM> __device__ __forceinline__ void LoadValues( ValueT (&values)[ITEMS_PER_THREAD], OffsetT block_offset, OffsetT valid_items, Int2Type<false> is_full_tile, Int2Type<_RANK_ALGORITHM> rank_algorithm) { // Register pressure work-around: moving valid_items through shfl prevents compiler // from reusing guards/addressing from prior guarded loads valid_items = ShuffleIndex(valid_items, 0, CUB_PTX_WARP_THREADS, 0xffffffff); BlockLoadValuesT(temp_storage.load_values).Load( d_values_in + block_offset, values, valid_items); CTA_SYNC(); } /** * Load a tile of items (specialized for full tile, match ranking algorithm) */ __device__ __forceinline__ void LoadValues( ValueT (&values)[ITEMS_PER_THREAD], OffsetT block_offset, OffsetT valid_items, Int2Type<true> is_full_tile, Int2Type<RADIX_RANK_MATCH> rank_algorithm) { LoadDirectWarpStriped(threadIdx.x, d_values_in + block_offset, values); } /** * Load a tile of items (specialized for partial tile, match ranking algorithm) */ __device__ __forceinline__ void LoadValues( ValueT (&values)[ITEMS_PER_THREAD], OffsetT block_offset, OffsetT valid_items, Int2Type<false> is_full_tile, Int2Type<RADIX_RANK_MATCH> rank_algorithm) { // Register pressure work-around: moving valid_items through shfl prevents compiler // from reusing guards/addressing from prior guarded loads valid_items = ShuffleIndex(valid_items, 0, CUB_PTX_WARP_THREADS, 0xffffffff); LoadDirectWarpStriped(threadIdx.x, d_values_in + block_offset, values, valid_items); } /** * Truck along associated values */ template <bool FULL_TILE> __device__ __forceinline__ void GatherScatterValues( OffsetT (&relative_bin_offsets)[ITEMS_PER_THREAD], int (&ranks)[ITEMS_PER_THREAD], OffsetT block_offset, OffsetT valid_items, Int2Type<false> /*is_keys_only*/) { ValueT values[ITEMS_PER_THREAD]; CTA_SYNC(); LoadValues( values, block_offset, valid_items, Int2Type<FULL_TILE>(), Int2Type<RANK_ALGORITHM>()); ScatterValues<FULL_TILE>( values, relative_bin_offsets, ranks, valid_items); } /** * Truck along associated values (specialized for key-only sorting) */ template <bool FULL_TILE> __device__ __forceinline__ void GatherScatterValues( OffsetT (&/*relative_bin_offsets*/)[ITEMS_PER_THREAD], int (&/*ranks*/)[ITEMS_PER_THREAD], OffsetT /*block_offset*/, OffsetT /*valid_items*/, Int2Type<true> /*is_keys_only*/) {} /** * Process tile */ template <bool FULL_TILE> __device__ __forceinline__ void ProcessTile( OffsetT block_offset, const OffsetT &valid_items = TILE_ITEMS) { UnsignedBits keys[ITEMS_PER_THREAD]; int ranks[ITEMS_PER_THREAD]; OffsetT relative_bin_offsets[ITEMS_PER_THREAD]; // Assign default (min/max) value to all keys UnsignedBits default_key = (IS_DESCENDING) ? LOWEST_KEY : MAX_KEY; // Load tile of keys LoadKeys( keys, block_offset, valid_items, default_key, Int2Type<FULL_TILE>(), Int2Type<RANK_ALGORITHM>()); // Twiddle key bits if necessary #pragma unroll for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++) { keys[KEY] = Traits<KeyT>::TwiddleIn(keys[KEY]); } // Rank the twiddled keys int exclusive_digit_prefix[BINS_TRACKED_PER_THREAD]; BlockRadixRankT(temp_storage.radix_rank).RankKeys( keys, ranks, current_bit, num_bits, exclusive_digit_prefix); CTA_SYNC(); // Share exclusive digit prefix #pragma unroll for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) { int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) { // Store exclusive prefix temp_storage.exclusive_digit_prefix[bin_idx] = exclusive_digit_prefix[track]; } } CTA_SYNC(); // Get inclusive digit prefix int inclusive_digit_prefix[BINS_TRACKED_PER_THREAD]; #pragma unroll for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) { int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) { if (IS_DESCENDING) { // Get inclusive digit prefix from exclusive prefix (higher bins come first) inclusive_digit_prefix[track] = (bin_idx == 0) ? (BLOCK_THREADS * ITEMS_PER_THREAD) : temp_storage.exclusive_digit_prefix[bin_idx - 1]; } else { // Get inclusive digit prefix from exclusive prefix (lower bins come first) inclusive_digit_prefix[track] = (bin_idx == RADIX_DIGITS - 1) ? (BLOCK_THREADS * ITEMS_PER_THREAD) : temp_storage.exclusive_digit_prefix[bin_idx + 1]; } } } CTA_SYNC(); // Update global scatter base offsets for each digit #pragma unroll for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) { int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) { bin_offset[track] -= exclusive_digit_prefix[track]; temp_storage.relative_bin_offsets[bin_idx] = bin_offset[track]; bin_offset[track] += inclusive_digit_prefix[track]; } } CTA_SYNC(); // Scatter keys ScatterKeys<FULL_TILE>(keys, relative_bin_offsets, ranks, valid_items); // Gather/scatter values GatherScatterValues<FULL_TILE>(relative_bin_offsets , ranks, block_offset, valid_items, Int2Type<KEYS_ONLY>()); } //--------------------------------------------------------------------- // Copy shortcut //--------------------------------------------------------------------- /** * Copy tiles within the range of input */ template < typename InputIteratorT, typename T> __device__ __forceinline__ void Copy( InputIteratorT d_in, T *d_out, OffsetT block_offset, OffsetT block_end) { // Simply copy the input while (block_offset + TILE_ITEMS <= block_end) { T items[ITEMS_PER_THREAD]; LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_in + block_offset, items); CTA_SYNC(); StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_out + block_offset, items); block_offset += TILE_ITEMS; } // Clean up last partial tile with guarded-I/O if (block_offset < block_end) { OffsetT valid_items = block_end - block_offset; T items[ITEMS_PER_THREAD]; LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_in + block_offset, items, valid_items); CTA_SYNC(); StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_out + block_offset, items, valid_items); } } /** * Copy tiles within the range of input (specialized for NullType) */ template <typename InputIteratorT> __device__ __forceinline__ void Copy( InputIteratorT /*d_in*/, NullType * /*d_out*/, OffsetT /*block_offset*/, OffsetT /*block_end*/) {} //--------------------------------------------------------------------- // Interface //--------------------------------------------------------------------- /** * Constructor */ __device__ __forceinline__ AgentRadixSortDownsweep( TempStorage &temp_storage, OffsetT (&bin_offset)[BINS_TRACKED_PER_THREAD], OffsetT num_items, const KeyT *d_keys_in, KeyT *d_keys_out, const ValueT *d_values_in, ValueT *d_values_out, int current_bit, int num_bits) : temp_storage(temp_storage.Alias()), d_keys_in(reinterpret_cast<const UnsignedBits*>(d_keys_in)), d_values_in(d_values_in), d_keys_out(reinterpret_cast<UnsignedBits*>(d_keys_out)), d_values_out(d_values_out), current_bit(current_bit), num_bits(num_bits), short_circuit(1) { #pragma unroll for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) { this->bin_offset[track] = bin_offset[track]; int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) { // Short circuit if the histogram has only bin counts of only zeros or problem-size short_circuit = short_circuit && ((bin_offset[track] == 0) || (bin_offset[track] == num_items)); } } short_circuit = CTA_SYNC_AND(short_circuit); } /** * Constructor */ __device__ __forceinline__ AgentRadixSortDownsweep( TempStorage &temp_storage, OffsetT num_items, OffsetT *d_spine, const KeyT *d_keys_in, KeyT *d_keys_out, const ValueT *d_values_in, ValueT *d_values_out, int current_bit, int num_bits) : temp_storage(temp_storage.Alias()), d_keys_in(reinterpret_cast<const UnsignedBits*>(d_keys_in)), d_values_in(d_values_in), d_keys_out(reinterpret_cast<UnsignedBits*>(d_keys_out)), d_values_out(d_values_out), current_bit(current_bit), num_bits(num_bits), short_circuit(1) { #pragma unroll for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) { int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; // Load digit bin offsets (each of the first RADIX_DIGITS threads will load an offset for that digit) if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) { if (IS_DESCENDING) bin_idx = RADIX_DIGITS - bin_idx - 1; // Short circuit if the first block's histogram has only bin counts of only zeros or problem-size OffsetT first_block_bin_offset = d_spine[gridDim.x * bin_idx]; short_circuit = short_circuit && ((first_block_bin_offset == 0) || (first_block_bin_offset == num_items)); // Load my block's bin offset for my bin bin_offset[track] = d_spine[(gridDim.x * bin_idx) + blockIdx.x]; } } short_circuit = CTA_SYNC_AND(short_circuit); } /** * Distribute keys from a segment of input tiles. */ __device__ __forceinline__ void ProcessRegion( OffsetT block_offset, OffsetT block_end) { if (short_circuit) { // Copy keys Copy(d_keys_in, d_keys_out, block_offset, block_end); // Copy values Copy(d_values_in, d_values_out, block_offset, block_end); } else { // Process full tiles of tile_items #pragma unroll 1 while (block_offset + TILE_ITEMS <= block_end) { ProcessTile<true>(block_offset); block_offset += TILE_ITEMS; CTA_SYNC(); } // Clean up last partial tile with guarded-I/O if (block_offset < block_end) { ProcessTile<false>(block_offset, block_end - block_offset); } } } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
#include "LearningFilter.h" #include "Matrix.h" #include "LinearFilter.h" #include <iostream> #include <fstream> #include <cmath> using namespace std; #include "ErrorCode.h" // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // Kernel 函数:_matToImg(类型转换) // 将 MatrixCuda 数据写入到 ImageCuda 类之中 static __global__ void _matToImg( ImageCuda img, // 目标 ImageCuda 类 MatrixCuda matrixImg // 目标 MatrixCuda 类 ); // Kernel 函数:_imgToMat(类型转换) // 将 MatrixCuda 数据写入到 ImageCuda 类之中 static __global__ void _imgToMat( ImageCuda img, // 目标 ImageCuda 类 MatrixCuda matrixImg // 目标 MatrixCuda 类 ); // Kernel 函数:_subtract(Matrix 减法) // 矩阵 img、p 之间做减法,结果放在矩阵 out 中 static __global__ void _subtract( MatrixCuda img, // 被减数 MatrixCuda p, // 减数 MatrixCuda out // 结果 ); // Kernel 函数:_add(Matrix 加法) // 矩阵 img、p 之间做加法,结果放在矩阵 out 中 static __global__ void _add( MatrixCuda img, // 被加数 MatrixCuda p, // 加数 MatrixCuda out // 结果 ); // Kernel 函数:_multiply(Matrix 乘法) // 矩阵 img、p 之间做乘法,结果放在矩阵 out 中 static __global__ void _multiply( MatrixCuda img, // 被乘数 MatrixCuda p, // 乘数 MatrixCuda out // 结果 ); // Kernel 函数:_divide(Matrix 除法) // 矩阵 img、p 之间做除法,结果放在矩阵 out 中 static __global__ void _divide( MatrixCuda img, // 被除数 MatrixCuda p, // 除数 MatrixCuda out // 结果 ); // Kernel 函数:_addWeighted // 矩阵 img 的每个元素增加 eps 大小的值 static __global__ void _addWeighted( MatrixCuda img, // 目标矩阵 float eps, // 调整值 MatrixCuda out // 输出值 ); // Kernel 函数:_linearFilter(归一化滤波) // 根据率比半径进行滤波计算 static __global__ void _linearFilter( MatrixCuda in, // 输入图像 MatrixCuda out, // 输出图像 float ra // 滤波半径 ); // Kernel 函数:_beTop(像素置白) // 通过操作使图片的像素全都变成255 static __global__ void _beTop( MatrixCuda img // 进行操作的图片 ); static __global__ void _imgToMat(ImageCuda img, MatrixCuda matrixImg){ int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= img.imgMeta.width || r >= img.imgMeta.height) return; int inidx = r * img.pitchBytes + c; int indexM = r * matrixImg.pitchWords + c; matrixImg.matMeta.matData[indexM] = (float)(img.imgMeta.imgData)[inidx]; for(int i = 1; i <= 3; i++){ if (++r >= img.imgMeta.height) return; inidx = r * img.pitchBytes + c; indexM = r * matrixImg.pitchWords + c; matrixImg.matMeta.matData[indexM] = (float)(img.imgMeta.imgData)[inidx]; } } // 从矩阵写回到图像 static __global__ void _matToImg(ImageCuda img, MatrixCuda matrixImg){ int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= img.imgMeta.width || r >= img.imgMeta.height) return; int inidx = r * img.pitchBytes + c; int indexM = r * matrixImg.pitchWords + c; (img.imgMeta.imgData)[inidx] = (int)matrixImg.matMeta.matData[indexM]; for(int i = 1; i <= 3; i++){ if (++r >= img.imgMeta.height) return; inidx = r * img.pitchBytes + c; indexM = r * matrixImg.pitchWords + c; (img.imgMeta.imgData)[inidx] = (int)matrixImg.matMeta.matData[indexM]; } } static __global__ void _subtract(MatrixCuda img, MatrixCuda p, MatrixCuda out){ // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= img.matMeta.width || r >= img.matMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx1 = r * img.pitchWords + c; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx2 = r * p.pitchWords + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int outidx = r * out.pitchWords + c; // 关于float的大小判断需要重新写,写成减法形式 out.matMeta.matData[outidx] = img.matMeta.matData[inidx1] - p.matMeta.matData[inidx2]; if(out.matMeta.matData[outidx] < 0){ out.matMeta.matData[outidx] = 0; } // 处理剩下的三个像素点。 for (int i =0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点之间没有变化,故不用检查。 if (++r >= out.matMeta.height) return; inidx1 = r * img.pitchWords + c; inidx2 = r * p.pitchWords + c; outidx = r * out.pitchWords + c; out.matMeta.matData[outidx] = img.matMeta.matData[inidx1] - p.matMeta.matData[inidx2]; if(out.matMeta.matData[outidx] < 0){ out.matMeta.matData[outidx] = 0; } } } static __global__ void _add(MatrixCuda img, MatrixCuda p, MatrixCuda out){ // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= img.matMeta.width || r >= img.matMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx1 = r * img.pitchWords + c; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx2 = r * p.pitchWords + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int outidx = r * out.pitchWords + c; // 关于float的大小判断需要重新写,写成减法形式 out.matMeta.matData[outidx] = img.matMeta.matData[inidx1] + p.matMeta.matData[inidx2]; if(out.matMeta.matData[outidx] > 255){ out.matMeta.matData[outidx] = 255; } // 处理剩下的三个像素点。 for (int i =0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点之间没有变化,故不用检查。 if (++r >= out.matMeta.height) return; inidx1 = r * img.pitchWords + c; inidx2 = r * p.pitchWords + c; outidx = r * out.pitchWords + c; out.matMeta.matData[outidx] = img.matMeta.matData[inidx1] + p.matMeta.matData[inidx2]; if(out.matMeta.matData[outidx] > 255){ out.matMeta.matData[outidx] = 255; } } } static __global__ void _multiply(MatrixCuda img, MatrixCuda p, MatrixCuda out){ // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= img.matMeta.width || r >= img.matMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx1 = r * img.pitchWords + c; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx2 = r * p.pitchWords + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int outidx = r * out.pitchWords + c; float ratio1, ratio2, ratioO; ratio1 = img.matMeta.matData[inidx1] / 255; ratio2 = p.matMeta.matData[inidx2] / 255; // 关于float的大小判断需要重新写,写成减法形式 ratioO = ratio1 * ratio2; out.matMeta.matData[outidx] = 255 * ratioO; // 处理剩下的三个像素点。 for (int i =0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点之间没有变化,故不用检查。 if (++r >= out.matMeta.height) return; inidx1 = r * img.pitchWords + c; inidx2 = r * p.pitchWords + c; outidx = r * out.pitchWords + c; ratio1 = img.matMeta.matData[inidx1] / 255; ratio2 = p.matMeta.matData[inidx2] / 255; ratioO = ratio1 * ratio2; out.matMeta.matData[outidx] = 255 * ratioO; } } static __global__ void _divide(MatrixCuda img, MatrixCuda p, MatrixCuda out){ // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= img.matMeta.width || r >= img.matMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx1 = r * img.pitchWords + c; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx2 = r * p.pitchWords + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int outidx = r * out.pitchWords + c; float ratio1, ratio2, ratioO; ratio1 = img.matMeta.matData[inidx1] / 255; ratio2 = p.matMeta.matData[inidx2] / 255; // 关于float的大小判断需要重新写,写成减法形式 if(ratio2 != 0.0) ratioO = ratio1 * ratio2; else ratioO = 0.0; out.matMeta.matData[outidx] = 255 * ratioO; // 处理剩下的三个像素点。 for (int i =0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点之间没有变化,故不用检查。 if (++r >= out.matMeta.height) return; inidx1 = r * img.pitchWords + c; inidx2 = r * p.pitchWords + c; outidx = r * out.pitchWords + c; ratio1 = img.matMeta.matData[inidx1] / 255; ratio2 = p.matMeta.matData[inidx2] / 255; if(ratio2 != 0.0) ratioO = ratio1 * ratio2; else ratioO = 0.0; out.matMeta.matData[outidx] = 255 * ratioO; } } static __global__ void _addWeighted(MatrixCuda img, float eps, MatrixCuda out){ // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= img.matMeta.width || r >= img.matMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx1 = r * img.pitchWords + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int outidx = r * out.pitchWords + c; out.matMeta.matData[outidx] = out.matMeta.matData[inidx1] + eps; if(out.matMeta.matData[outidx] > 255) out.matMeta.matData[outidx] = 255; // 处理剩下的三个像素点。 for (int i =0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点之间没有变化,故不用检查。 if (++r >= out.matMeta.height) return; inidx1 = r * img.pitchWords + c; outidx = r * out.pitchWords + c; out.matMeta.matData[outidx] = out.matMeta.matData[inidx1] + eps; if(out.matMeta.matData[outidx] > 255) out.matMeta.matData[outidx] = 255; } } static __global__ void _linearFilter(MatrixCuda in, MatrixCuda out, float ra){ // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= in.matMeta.width || r >= in.matMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 //int inidx1 = r * in.pitchWords + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int outidx = r * out.pitchWords + c; int zhouweiSum = 0; int zhouweizuobiao = 0; int count = 0; int cNow = 0, rNow = 0; int cEnd = c + ra / 2, rEnd = r + ra / 2; if(ra / 2 >= c){ cNow = 0; } else{ cNow = c - ra / 2; } if(ra / 2 >= r){ rNow = 0; } else{ rNow = r - ra / 2; } count = (cEnd - cNow + 1) * (rEnd - rNow + 1); for(int i = cNow; i != cEnd; i++){ for( int j=rNow; j != rEnd ;j++ ){ zhouweizuobiao = j * in.pitchWords + i; zhouweiSum += in.matMeta.matData[zhouweizuobiao]; } } out.matMeta.matData[outidx] = zhouweiSum / count; // 处理剩下的三个像素点。 for (int i =0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点之间没有变化,故不用检查。 // 算法可以改进点:把上一次的计算结果应用上 if (++r >= out.matMeta.height) return; //inidx1 = r * in.pitchWords + c; outidx = r * out.pitchWords + c; zhouweiSum = 0; zhouweizuobiao = 0; count = 0; if(ra / 2 >= c){ cNow = 0; } else{ cNow = c - ra / 2; } if(ra / 2 >= r){ rNow = 0; } else{ rNow = r - ra / 2; } count = (cEnd - cNow + 1) * (rEnd - rNow + 1); for(int i = cNow; i != cEnd; i++){ for( int j=rNow; j != rEnd ;j++ ){ zhouweizuobiao = j * in.pitchWords + i; zhouweiSum += in.matMeta.matData[zhouweizuobiao]; } } out.matMeta.matData[outidx] = zhouweiSum / count; } } static __global__ void _beTop(MatrixCuda img){ // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= img.matMeta.width || r >= img.matMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx1 = r * img.pitchWords + c; // 颜色致白 img.matMeta.matData[inidx1] = 255; // 处理剩下的三个像素点。 for (int i =0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点之间没有变化,故不用检查。 if (++r >= img.matMeta.height) return; inidx1 = r * img.pitchWords + c; img.matMeta.matData[inidx1] = 255; } } __host__ int LearningFilter::learningFilter(Image *inimg1, Image *inimg2, Image *outimg) { // 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg1 == NULL || inimg2==NULL || outimg==NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg1); if (errcode != NO_ERROR) return errcode; errcode = ImageBasicOp::copyToCurrentDevice(inimg2); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图 // 像的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg1->roiX2 - inimg1->roiX1, inimg1->roiY2 - inimg1->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud1; errcode = ImageBasicOp::roiSubImage(inimg1, &insubimgCud1); if (errcode != NO_ERROR) return errcode; ImageCuda insubimgCud2; errcode = ImageBasicOp::roiSubImage(inimg2, &insubimgCud2); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一 if (insubimgCud1.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud1.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud1.imgMeta.width; if (insubimgCud1.imgMeta.height > outsubimgCud.imgMeta.height){ insubimgCud1.imgMeta.height = outsubimgCud.imgMeta.height; insubimgCud2.imgMeta.height = outsubimgCud.imgMeta.height; } else outsubimgCud.imgMeta.height = insubimgCud1.imgMeta.height; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); int width = insubimgCud1.imgMeta.width; int height = insubimgCud1.imgMeta.height; // Matrix *matrixImg1, *matrixImg2, *matrixOut; // 特征值矩阵 // 创建特征值 matrix 指针 errcode = MatrixBasicOp::newMatrix(&matrixImg1); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&matrixImg2); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&matrixOut); if (errcode != NO_ERROR) return CUDA_ERROR; // 在设备端申请 matrix 空间 errcode = MatrixBasicOp::makeAtCurrentDevice(matrixImg1, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::makeAtCurrentDevice(matrixImg2, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::makeAtCurrentDevice(matrixOut, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; // 创建 MatrixCuda 指针 MatrixCuda *matrixImg1cuda, *matrixImg2cuda, *matrixOutcuda; // 特征值 // 设备端矩阵 // 通过预定义的宏将 Matrix 指针转化为 MatrixCuda 类型的指针 matrixImg1cuda = MATRIX_CUDA(matrixImg1); matrixImg2cuda = MATRIX_CUDA(matrixImg2); matrixOutcuda = MATRIX_CUDA(matrixOut); // 矩阵赋值 _imgToMat<<<gridsize, blocksize>>>(insubimgCud1, *matrixImg1cuda); _imgToMat<<<gridsize, blocksize>>>(insubimgCud2, *matrixImg2cuda); _imgToMat<<<gridsize, blocksize>>>(outsubimgCud, *matrixOutcuda); // 所有变量声明: Matrix *tBI, *tBp, *Ip, *tBIp, *mean_I, *mean_p, *mean_mean_Ip, *mean_Ip, *cov_Ip, *tBII, *mean_II, *mean_I_mean_I, *var_I, *a, *b, *var_Ieps, *var_Ieps2, *tt, *mean_a, *mean_b, *tBa, *tBb, *ttt, *N; //MatrixCuda *tBICuda, *tBpCuda, *IpCuda, *tBIpCuda, *mean_ICuda, *mean_pCuda, *mean_mean_IpCuda, *mean_IpCuda, *cov_IpCuda, //*tBIICuda, *mean_IICuda, *mean_I_mean_ICuda, *var_ICuda, *aCuda, *bCuda, *var_IepsCuda, *var_Ieps2Cuda, *ttCuda, *mean_aCuda, //*mean_bCuda, *tBaCuda, *tBbCuda, *tttCuda; MatrixCuda *NCuda; // 初始化变量 errcode = MatrixBasicOp::newMatrix(&N); errcode = MatrixBasicOp::makeAtCurrentDevice(N, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&tBI); errcode = MatrixBasicOp::makeAtCurrentDevice(tBI, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&tBp); errcode = MatrixBasicOp::makeAtCurrentDevice(tBp, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&Ip); errcode = MatrixBasicOp::makeAtCurrentDevice(Ip, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&tBIp); errcode = MatrixBasicOp::makeAtCurrentDevice(tBIp, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&mean_I); errcode = MatrixBasicOp::makeAtCurrentDevice(mean_I, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&mean_p); errcode = MatrixBasicOp::makeAtCurrentDevice(mean_p, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&mean_mean_Ip); errcode = MatrixBasicOp::makeAtCurrentDevice(mean_mean_Ip, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&mean_Ip); errcode = MatrixBasicOp::makeAtCurrentDevice(mean_Ip, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&cov_Ip); errcode = MatrixBasicOp::makeAtCurrentDevice(cov_Ip, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&tBII); errcode = MatrixBasicOp::makeAtCurrentDevice(tBII, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&mean_II); errcode = MatrixBasicOp::makeAtCurrentDevice(mean_II, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&mean_I_mean_I); errcode = MatrixBasicOp::makeAtCurrentDevice(mean_I_mean_I, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&var_I); errcode = MatrixBasicOp::makeAtCurrentDevice(var_I, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&a); errcode = MatrixBasicOp::makeAtCurrentDevice(a, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&b); errcode = MatrixBasicOp::makeAtCurrentDevice(b, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&var_Ieps); errcode = MatrixBasicOp::makeAtCurrentDevice(var_Ieps, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&var_Ieps2); errcode = MatrixBasicOp::makeAtCurrentDevice(var_Ieps2, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&tt); errcode = MatrixBasicOp::makeAtCurrentDevice(tt, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&mean_a); errcode = MatrixBasicOp::makeAtCurrentDevice(mean_a, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&mean_b); errcode = MatrixBasicOp::makeAtCurrentDevice(mean_b, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&tBa); errcode = MatrixBasicOp::makeAtCurrentDevice(tBa, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&tBb); errcode = MatrixBasicOp::makeAtCurrentDevice(tBb, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; errcode = MatrixBasicOp::newMatrix(&ttt); errcode = MatrixBasicOp::makeAtCurrentDevice(ttt, width, height); if (errcode != NO_ERROR) return CUDA_ERROR; //tBICuda = MATRIX_CUDA(tBI); //tBpCuda = MATRIX_CUDA(tBp); //IpCuda = MATRIX_CUDA(Ip); //tBIpCuda = MATRIX_CUDA(tBIp); //mean_ICuda = MATRIX_CUDA(mean_I); //mean_pCuda = MATRIX_CUDA(mean_p); //mean_mean_IpCuda = MATRIX_CUDA(mean_mean_Ip); //mean_IpCuda = MATRIX_CUDA(mean_Ip); //cov_IpCuda = MATRIX_CUDA(cov_Ip); //tBIICuda = MATRIX_CUDA(tBII); //mean_IICuda = MATRIX_CUDA(mean_II); //mean_I_mean_ICuda = MATRIX_CUDA(mean_I_mean_I); //var_ICuda = MATRIX_CUDA(var_I); //aCuda = MATRIX_CUDA(a); //bCuda = MATRIX_CUDA(b); //var_IepsCuda = MATRIX_CUDA(var_Ieps); //var_Ieps2Cuda = MATRIX_CUDA(var_Ieps2); //ttCuda = MATRIX_CUDA(tt); //mean_aCuda = MATRIX_CUDA(mean_a); //mean_bCuda = MATRIX_CUDA(mean_b); //tBaCuda = MATRIX_CUDA(tBa); //tBbCuda = MATRIX_CUDA(tBb); //tttCuda = MATRIX_CUDA(ttt); NCuda = MATRIX_CUDA(N); // 初始化 N // cv::Mat N = cv::Mat::ones (hei,wid,CV_32FC1) _beTop<<<gridsize, blocksize>>>(*NCuda); // cv::boxFilter(N,N,N.depth(),cv::Size(r,r),cv::Point(-1,-1),true,cv::BORDER_REFLECT);//boxfiter;N-->the size of each local patch _linearFilter<<<gridsize, blocksize>>>(*NCuda, *NCuda, r); // _subtract<<<gridsize, blocksize>>> _add<<<gridsize, blocksize>>> // _multiply<<<gridsize, blocksize>>> _divide<<<gridsize, blocksize>>> // _addWeighted<<<gridsize, blocksize>>> _linearFilter<<<gridsize, blocksize>>> // /* End define variables*/ // // 输入,输出, 像素位数, 内核尺寸, 锚点, 数值是否正规化 , // cv::boxFilter(image,tBI,image.depth(),cv::Size(r, r),cv::Point(-1,-1),true,cv::BORDER_REFLECT);//box fitering _linearFilter<<<gridsize, blocksize>>>(*matrixImg1cuda, *matrixOutcuda, r); // cv::boxFilter(p,tBp,p.depth(),cv::Size(r, r),cv::Point(-1,-1),true,cv::BORDER_REFLECT); /* _linearFilter<<<gridsize, blocksize>>>(*matrixImg2cuda, *tBpCuda, r); // cv::multiply(image,p,Ip); _multiply<<<gridsize, blocksize>>>(*matrixImg1cuda, *matrixImg2cuda, *IpCuda); // cv::boxFilter(Ip,tBIp,Ip.depth(),cv::Size(r, r),cv::Point(-1,-1),true,cv::BORDER_REFLECT); _linearFilter<<<gridsize, blocksize>>>(*IpCuda, *tBIpCuda, r); // cv::divide(tBI,N,mean_I); _divide<<<gridsize, blocksize>>>(*tBICuda, *NCuda, *mean_ICuda); // cv::divide(tBp,N,mean_p); _divide<<<gridsize, blocksize>>>(*tBpCuda, *NCuda, *mean_pCuda); // cv::divide(tBIp,N,mean_Ip); // mean_Ip = tBIp/N (前面数组中的每个个元素除以后面数组中的每个元素) _divide<<<gridsize, blocksize>>>(*tBIpCuda, *NCuda, *mean_IpCuda); // cv::multiply(mean_I,mean_p,mean_mean_Ip); _multiply<<<gridsize, blocksize>>>(*mean_ICuda, *mean_pCuda, *mean_mean_IpCuda); // cv::subtract(mean_Ip,mean_mean_Ip,cov_Ip); //this is the covariance of (image, p) in each local patch _subtract<<<gridsize, blocksize>>>(*mean_IpCuda, *mean_mean_IpCuda, *cov_IpCuda); // cv::multiply(image,image,tBII); //Ip = image*p; _multiply<<<gridsize, blocksize>>>(*matrixImg1cuda, *matrixImg1cuda, *tBIICuda); // cv::boxFilter(tBII,tBII,tBII.depth(),cv::Size(r, r),cv::Point(-1,-1),true,cv::BORDER_REFLECT); _linearFilter<<<gridsize, blocksize>>>(*tBIICuda, *tBIICuda, r); // cv::divide(tBII,N,mean_II); _divide<<<gridsize, blocksize>>>(*tBIICuda, *NCuda, *mean_IICuda); // cv::multiply(mean_I,mean_I,mean_I_mean_I); _multiply<<<gridsize, blocksize>>>(*mean_ICuda, *mean_ICuda, *mean_I_mean_ICuda); // cv::subtract(mean_II,mean_I_mean_I,var_I); _subtract<<<gridsize, blocksize>>>(*mean_IICuda, *mean_I_mean_ICuda, *var_ICuda); // cv::addWeighted(var_I, 1 ,var_I, 0,eps,var_Ieps); // _addWeighted(MatrixCuda img, float eps, MatrixCuda out) _addWeighted<<<gridsize, blocksize>>>(*var_ICuda, eps, *var_ICuda); // cv::divide(cov_Ip,var_Ieps,a); _divide<<<gridsize, blocksize>>>(*cov_IpCuda, *var_IepsCuda, *aCuda); // cv::multiply(a,mean_I,tt); _multiply<<<gridsize, blocksize>>>(*aCuda, *mean_ICuda, *ttCuda); // cv::subtract(mean_p,tt,b); _subtract<<<gridsize, blocksize>>>(*mean_pCuda, *ttCuda, *bCuda); // cv::boxFilter(a,tBa,a.depth(),cv::Size(r, r),cv::Point(-1,-1),true,cv::BORDER_REFLECT); _linearFilter<<<gridsize, blocksize>>>(*aCuda, *tBaCuda, r); // cv::boxFilter(b,tBb,b.depth(),cv::Size(r, r),cv::Point(-1,-1),true,cv::BORDER_REFLECT); _linearFilter<<<gridsize, blocksize>>>(*bCuda, *tBbCuda, r); // cv::divide(tBa,N,mean_a); _divide<<<gridsize, blocksize>>>(*tBaCuda, *NCuda, *mean_aCuda); // cv::divide(tBb,N,mean_b); _divide<<<gridsize, blocksize>>>(*tBbCuda, *NCuda, *mean_bCuda); // cv::multiply(mean_a,image,ttt); _multiply<<<gridsize, blocksize>>>(*mean_aCuda, *matrixImg1cuda, *tttCuda); // cv::add(ttt, mean_b, Out); _add<<<gridsize, blocksize>>>(*tttCuda, *mean_bCuda, *matrixOutcuda); */ _matToImg<<<gridsize, blocksize>>>(outsubimgCud, *matrixOutcuda); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕,退出。 return NO_ERROR; } void __learningFilter_Dummy() { MatrixCuda *tmp = new MatrixCuda; _subtract<<<1,1>>>(*tmp, *tmp, *tmp); _add<<<1,1>>>(*tmp, *tmp, *tmp); _multiply<<<1,1>>>(*tmp, *tmp, *tmp); _divide<<<1,1>>>(*tmp, *tmp, *tmp); _addWeighted<<<1,1>>>(*tmp, 0.0f, *tmp); }
the_stack
#include "mask/seamFinder.hpp" #include "backend/common/imageOps.hpp" #include "backend/cuda/deviceBuffer.hpp" #include "backend/cuda/deviceStream.hpp" #include "cuda/util.hpp" #include "core/rect.hpp" #include "gpu/vectorTypes.hpp" #include "gpu/memcpy.hpp" #include "mask/mergerMaskConstant.hpp" namespace VideoStitch { namespace MergerMask { #define SEAM_FINDER_KERNEL_SIZE_X 16 #define SEAM_FINDER_KERNEL_SIZE_Y 16 const __constant__ int border_dir_rows[8] = {-1, 0, 0, 1, -1, -1, 1, 1}; const __constant__ int border_dir_cols[8] = {0, -1, 1, 0, -1, 1, -1, 1}; __device__ bool isInRange(const int2 coord, const int2 size) { if (coord.x < 0 || coord.y < 0 || coord.x >= size.x || coord.y >= size.y) { return false; } return true; } __device__ bool isValidPixel(const int2 coord, const int2 size, const uint32_t* const buffer) { if (!isInRange(coord, size)) { return false; } if (buffer[coord.y * size.x + coord.x] == INVALID_VALUE) { return false; } return true; } __device__ bool isBorder(const int wrapWidth, const int directionCount, const int2 coord, const int2 size, const unsigned char id, const unsigned char* const __restrict__ inputsMap) { if (!isInRange(coord, size)) { return false; } if ((inputsMap[coord.y * size.x + coord.x] & id) == 0) { return false; } if (coord.y == 0 || coord.y == size.y - 1) { return true; } // For detecting borders, need to use 8-connected neighbors, // or else, it will cause a lot of errors detecting joint points of the 2 borders for (int i = 0; i < directionCount; i++) { const int2 newCoord = make_int2((coord.x + border_dir_rows[i] + wrapWidth) % wrapWidth, coord.y + border_dir_cols[i]); if (newCoord.x >= 0 && newCoord.x < size.x && newCoord.y >= 0 && newCoord.y < size.y) { // If one of the surrounding pixel is invalid, the current pixel can be consider as border if ((inputsMap[newCoord.y * size.x + newCoord.x] & id) == 0) { return true; } } } return false; } __global__ void bordersBufferKernel(const int directionCount, const int wrapWidth, const int2 offset, const int2 size, const unsigned char* const __restrict__ inputsMap, unsigned char* const __restrict__ bordersBuffer) { unsigned x0 = blockIdx.x * blockDim.x + threadIdx.x; unsigned y0 = blockIdx.y * blockDim.y + threadIdx.y; if (x0 < size.x && y0 < size.y) { uint32_t index = y0 * size.x + x0; bordersBuffer[index] = 0; const int x = x0 + offset.x; const int y = y0 + offset.y; if (isBorder(wrapWidth, directionCount, make_int2(x, y), size, 1 << 0, inputsMap)) { bordersBuffer[index] |= (1 << 0); } if (isBorder(wrapWidth, directionCount, make_int2(x, y), size, 1 << 1, inputsMap)) { bordersBuffer[index] |= (1 << 1); } } } Status SeamFinder::findBordersBuffer(const Core::Rect rect, const GPU::Buffer<const unsigned char> mapBuffer, GPU::Buffer<unsigned char> bordersBuffer, const int directionCount) { dim3 dimBlock(SEAM_FINDER_KERNEL_SIZE_X, SEAM_FINDER_KERNEL_SIZE_Y, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv((int)rect.getWidth(), dimBlock.x), (unsigned)Cuda::ceilDiv((int)rect.getHeight(), dimBlock.y), 1); bordersBufferKernel<<<dimGrid, dimBlock, 0, stream.get()>>>( directionCount, wrapWidth, make_int2((int)rect.left(), (int)rect.top()), make_int2((int)rect.getWidth(), (int)rect.getHeight()), mapBuffer.get(), bordersBuffer.get()); return CUDA_STATUS; } __global__ void validMaskKernel(const int2 size, const uint32_t* const __restrict__ inputBuffer, unsigned char* const __restrict__ maskBuffer) { unsigned x = blockIdx.x * blockDim.x + threadIdx.x; unsigned y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size.x && y < size.y) { uint32_t index = y * size.x + x; if (isValidPixel(make_int2(x, y), size, inputBuffer)) { maskBuffer[index] = 1; } else { maskBuffer[index] = 0; } } } Status SeamFinder::findValidMask(const Core::Rect rect, const GPU::Buffer<const uint32_t> inputBuffer, std::vector<unsigned char>& mask, GPU::Stream stream) { dim3 dimBlock(SEAM_FINDER_KERNEL_SIZE_X, SEAM_FINDER_KERNEL_SIZE_Y, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(rect.getWidth(), dimBlock.x), (unsigned)Cuda::ceilDiv(rect.getHeight(), dimBlock.y), SEAM_DIRECTION); validMaskKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(make_int2((int)rect.getWidth(), (int)rect.getHeight()), inputBuffer.get(), workMask.borrow().get()); mask.resize(inputBuffer.numElements()); if (mask.size() > 0) { return GPU::memcpyBlocking(mask.data(), workMask.borrow_const(), workMask.borrow_const().byteSize()); } else { return CUDA_STATUS; } } // The cost function is computed as describe in // Summa et al., Panorama Weaving: Fast and Flexible Seam Processing, Siggraph 2012 // Fig. 4b __global__ void costsBufferKernel(const int kernelSize, const int warpWidth, const int2 offset0, const int2 size0, const uint32_t* const __restrict__ input0Buffer, const int2 offset1, const int2 size1, const uint32_t* const __restrict__ input1Buffer, const int2 offset, const int2 size, float* const __restrict__ costsBuffer) { unsigned x = blockIdx.x * blockDim.x + threadIdx.x; unsigned y = blockIdx.y * blockDim.y + threadIdx.y; unsigned z = blockIdx.z * blockDim.z + threadIdx.z; if (x < size.x && y < size.y && z < SEAM_DIRECTION) { uint32_t index = SEAM_DIRECTION * (y * size.x + x) + z; costsBuffer[index] = MAX_COST; int count = 0; const int perpendicular_dir = perpendicular_dirs[z]; for (int i = -(kernelSize - 1); i <= kernelSize; i++) { const int xi = x + seam_dir_advance[z] * seam_dir_rows[z] + i * seam_dir_rows[perpendicular_dir]; const int yi = y + seam_dir_advance[z] * seam_dir_cols[z] + i * seam_dir_cols[perpendicular_dir]; if (xi >= 0 && xi < size.x && yi >= 0 && yi < size.y) { const int x0 = (offset.x + xi - offset0.x + warpWidth) % warpWidth; const int y0 = offset.y + yi - offset0.y; const int x1 = (offset.x + xi - offset1.x + warpWidth) % warpWidth; const int y1 = offset.y + yi - offset1.y; if (isValidPixel(make_int2(x0, y0), size0, input0Buffer) && isValidPixel(make_int2(x1, y1), size1, input1Buffer)) { const uint32_t color0 = input0Buffer[y0 * size0.x + x0]; const uint32_t color1 = input1Buffer[y1 * size1.x + x1]; const float lWeight = 1.0f; const float aWeight = 1.0f; const float bWeight = 1.0f; const float labDifference = (lWeight * abs((float(Image::RGBA::r(color0)) - float(Image::RGBA::r(color1))) / 255.0f) + aWeight * abs((float(Image::RGBA::g(color0)) - float(Image::RGBA::g(color1))) / 255.0f) + bWeight * abs((float(Image::RGBA::b(color0)) - float(Image::RGBA::b(color1))) / 255.0f)) / (lWeight + aWeight + bWeight); count++; const float gradientDifference = abs((float(Image::RGBA::a(color0)) - Image::RGBA::a(color1)) / 255.0f); float gradientWeight = 0.6f; const float sad = (labDifference + gradientWeight * gradientDifference) / (1.0f + gradientWeight); if (costsBuffer[index] == MAX_COST) { costsBuffer[index] = 0.0f; } costsBuffer[index] += sad; } } } if (count) { if (count < kernelSize * 2) { costsBuffer[index] += (kernelSize * 2 - count) * PENALTY_COST; count += kernelSize * 2 - count; } costsBuffer[index] /= count; } costsBuffer[index] = costsBuffer[index] + MIN_PENALTY_COST; if (x <= 1 || y <= 1 || x >= size.x - 2 || y >= size.y - 2) { costsBuffer[index] += MAX_COST; } } } Status SeamFinder::prepareSeamCostBuffer(const Core::Rect rect, GPU::Buffer<float> costsBuffer) { dim3 dimBlock(SEAM_FINDER_KERNEL_SIZE_X, SEAM_FINDER_KERNEL_SIZE_Y, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv((int)rect.getWidth(), dimBlock.x), (unsigned)Cuda::ceilDiv((int)rect.getHeight(), dimBlock.y), SEAM_DIRECTION); costsBufferKernel<<<dimGrid, dimBlock, 0, stream.get()>>>( kernelSize, wrapWidth, make_int2((int)rect0.left(), (int)rect0.top()), make_int2((int)rect0.getWidth(), (int)rect0.getHeight()), input0Buffer.get(), make_int2((int)rect1.left(), (int)rect1.top()), make_int2((int)rect1.getWidth(), (int)rect1.getHeight()), input1Buffer.get(), make_int2((int)rect.left(), (int)rect.top()), make_int2((int)rect.getWidth(), (int)rect.getHeight()), costsBuffer.get()); return CUDA_STATUS; } __global__ void inputsMapKernel(const int warpWidth, const videoreaderid_t id0, const int2 offset0, const int2 size0, const uint32_t* const __restrict__ input0Buffer, const videoreaderid_t id1, const int2 offset1, const int2 size1, const uint32_t* const __restrict__ input1Buffer, const int2 offset, const int2 size, unsigned char* const __restrict__ inputsMap) { unsigned x = blockIdx.x * blockDim.x + threadIdx.x; unsigned y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size.x && y < size.y) { const int index = y * size.x + x; inputsMap[index] = 0; const int x0 = (offset.x + x - offset0.x + warpWidth) % warpWidth; const int y0 = offset.y + y - offset0.y; if (isValidPixel(make_int2(x0, y0), size0, input0Buffer)) { inputsMap[index] += 1 << id0; } const int x1 = (offset.x + x - offset1.x + warpWidth) % warpWidth; const int y1 = offset.y + y - offset1.y; if (isValidPixel(make_int2(x1, y1), size1, input1Buffer)) { inputsMap[index] += 1 << id1; } } } Status SeamFinder::findInputsMap() { dim3 dimBlock(SEAM_FINDER_KERNEL_SIZE_X, SEAM_FINDER_KERNEL_SIZE_Y, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv((int)rect.getWidth(), dimBlock.x), (unsigned)Cuda::ceilDiv((int)rect.getHeight(), dimBlock.y), SEAM_DIRECTION); inputsMapKernel<<<dimGrid, dimBlock, 0, stream.get()>>>( wrapWidth, id0, make_int2((int)rect0.left(), (int)rect0.top()), make_int2((int)rect0.getWidth(), (int)rect0.getHeight()), input0Buffer.get(), id1, make_int2((int)rect1.left(), (int)rect1.top()), make_int2((int)rect1.getWidth(), (int)rect1.getHeight()), input1Buffer.get(), make_int2((int)rect.left(), (int)rect.top()), make_int2((int)rect.getWidth(), (int)rect.getHeight()), inputsMapBuffer.borrow().get()); return CUDA_STATUS; } __global__ void blendImagesKernel(const int warpWidth, const videoreaderid_t id0, const int2 offset0, const int2 size0, const uint32_t* const __restrict__ input0Buffer, const videoreaderid_t id1, const int2 offset1, const int2 size1, const uint32_t* const __restrict__ input1Buffer, const int2 offset, const int2 size, const unsigned char* const __restrict__ inputsMap, uint32_t* __restrict__ outputBuffer) { unsigned x = blockIdx.x * blockDim.x + threadIdx.x; unsigned y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size.x && y < size.y) { const int index = y * size.x + x; outputBuffer[index] = 0; const uint32_t pixelMap = inputsMap[index]; const int x1 = (offset.x + x - offset1.x + warpWidth) % warpWidth; const int y1 = offset.y + y - offset1.y; if (x1 >= 0 && x1 < size1.x && y1 >= 0 && y1 < size1.y && ((pixelMap & (1 << id1)) == (1 << id1))) { outputBuffer[index] = input1Buffer[y1 * size1.x + x1]; } const int x0 = (offset.x + x - offset0.x + warpWidth) % warpWidth; const int y0 = offset.y + y - offset0.y; if (x0 >= 0 && x0 < size0.x && y0 >= 0 && y0 < size0.y && ((pixelMap & (1 << id0)) == (1 << id0))) { outputBuffer[index] = input0Buffer[y0 * size0.x + x0]; } } } Status SeamFinder::blendImages(GPU::Buffer<uint32_t> outputBuffer) { dim3 dimBlock(SEAM_FINDER_KERNEL_SIZE_X, SEAM_FINDER_KERNEL_SIZE_Y, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv((int)rect.getWidth(), dimBlock.x), (unsigned)Cuda::ceilDiv((int)rect.getHeight(), dimBlock.y), SEAM_DIRECTION); blendImagesKernel<<<dimGrid, dimBlock, 0, stream.get()>>>( wrapWidth, id0, make_int2((int)rect0.left(), (int)rect0.top()), make_int2((int)rect0.getWidth(), (int)rect0.getHeight()), input0Buffer.get(), id1, make_int2((int)rect1.left(), (int)rect1.top()), make_int2((int)rect1.getWidth(), (int)rect1.getHeight()), input1Buffer.get(), make_int2((int)rect.left(), (int)rect.top()), make_int2((int)rect.getWidth(), (int)rect.getHeight()), outputsMapBuffer.borrow_const().get(), outputBuffer.get()); return CUDA_STATUS; } __global__ void findFeatheringMaskKernel(const int2 size, const uint32_t id, const unsigned char* const __restrict__ inputBuffer, uint32_t* __restrict__ outputBuffer) { unsigned x = blockIdx.x * blockDim.x + threadIdx.x; unsigned y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size.x && y < size.y) { const int index = y * size.x + x; if (inputBuffer[index] == id) { outputBuffer[index] = 0; } else { outputBuffer[index] = 1; } } } Status SeamFinder::findFeatheringMask(const int2 size, const GPU::Buffer<const unsigned char> inputBuffer, GPU::Buffer<uint32_t> outputBuffer, GPU::Stream stream) { dim3 dimBlock(SEAM_FINDER_KERNEL_SIZE_X, SEAM_FINDER_KERNEL_SIZE_Y, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, dimBlock.x), (unsigned)Cuda::ceilDiv((int)size.y, dimBlock.y), 1); const uint32_t id = (1 << id0) + (1 << id1); findFeatheringMaskKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(size, id, inputBuffer.get(), outputBuffer.get()); return CUDA_STATUS; } } // namespace MergerMask } // namespace VideoStitch
the_stack
namespace fastertransformer { #define TOPK_FP16_STORAGE 0 template <typename T, int MAX_K, int THREADBLOCK_SIZE> __launch_bounds__(THREADBLOCK_SIZE) __global__ void beam_topK_kernel(const T* log_probs, int* topk_tmp_id_buf, T* topk_tmp_val_buf, const int vocab_size, T diversity_rate) { typedef cub::BlockReduce<TopK<T, MAX_K>, THREADBLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int thread_id = threadIdx.x; int block_id = blockIdx.x; TopK<T, MAX_K> partial; for (int i = 0; i < MAX_K; ++i) { partial.p[i] = -1; partial.u[i] = -FLT_MAX; } for (int elem_id = thread_id; elem_id < vocab_size; elem_id += THREADBLOCK_SIZE) { int index = elem_id + block_id * vocab_size; partial.insert((T)log_probs[index], index); } TopK<T, MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op<T, MAX_K>); if (thread_id == 0) { int index = block_id * MAX_K; for (int i = 0; i < MAX_K; ++i) { topk_tmp_id_buf[index + i] = total.p[i]; topk_tmp_val_buf[index + i] = total.u[i] + diversity_rate * (T)i; } } } template <typename T, int MAX_K, int THREADBLOCK_SIZE> __launch_bounds__(THREADBLOCK_SIZE) __global__ void batch_topK_kernel(int* topk_tmp_id_buf, T* topk_tmp_val_buf, int* id_buf) { int thread_id = threadIdx.x; int block_id = blockIdx.x; TopK<T, MAX_K> partial; if (thread_id == 0) { for (int i = 0; i < MAX_K; ++i) { partial.p[i] = -1; partial.u[i] = -FLT_MAX; } int index = block_id * MAX_K * MAX_K; for (int i = 0; i < MAX_K * MAX_K; i++) { partial.insert((T)topk_tmp_val_buf[index + i], topk_tmp_id_buf[index + i]); } index = block_id * MAX_K; for (int i = 0; i < MAX_K; i++) { id_buf[index + i] = partial.p[i]; } } } template <typename T, int MAX_K, int THREADBLOCK_SIZE> __launch_bounds__(THREADBLOCK_SIZE) __global__ void batch_topK_kernel(const int* __restrict topk_tmp_id_buf, const T* __restrict topk_tmp_val_buf, int* __restrict id_buf, T* __restrict val_buf) { int thread_id = threadIdx.x; int block_id = blockIdx.x; TopK<T, MAX_K> partial; if (thread_id == 0) { for (int i = 0; i < MAX_K; ++i) { partial.p[i] = -1; partial.u[i] = -FLT_MAX; } int index = block_id * MAX_K * MAX_K; for (int i = 0; i < MAX_K * MAX_K; i++) { partial.insert((T)topk_tmp_val_buf[index + i], topk_tmp_id_buf[index + i]); } index = block_id * MAX_K; for (int i = 0; i < MAX_K; i++) { id_buf[index + i] = partial.p[i]; val_buf[index + i] = partial.u[i]; } } } template <typename T, int MAX_K, int THREADBLOCK_SIZE> __launch_bounds__(THREADBLOCK_SIZE) __global__ void batch_topk_kernel(const int* __restrict x, const T* __restrict y, int* __restrict z, float* __restrict v, int V, int K, T diversity_rate) { int thread_id = threadIdx.x; int vector_id = blockIdx.x; // reposition x, y to data for the current vector x += vector_id * V; y += vector_id * V; typedef cub::BlockReduce<TopK<T, MAX_K>, THREADBLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; TopK<T, MAX_K> partial; for (int i = 0; i < MAX_K; ++i) { partial.p[i] = -1; partial.u[i] = -FLT_MAX; } for (int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE) { int i = elem_id % K; T elem = y[elem_id] + diversity_rate * (T)i; int elem_idx = elem_id; // x[elem_id]; partial.insert(elem, elem_idx); } TopK<T, MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op<T, MAX_K>); if (thread_id == 0) { z += vector_id * K; v += vector_id * K; for (int i = 0; i < MAX_K; ++i) { if (i < K) { z[i] = x[total.p[i]]; v[i] = (float)y[total.p[i]]; } } } } struct __align__(8) MD { float m; float d; }; __device__ __forceinline__ MD reduce_md_op(MD a, MD b) { bool a_bigger = (a.m > b.m); MD bigger_m = a_bigger ? a : b; MD smaller_m = a_bigger ? b : a; MD res; res.d = bigger_m.d + smaller_m.d * __expf(smaller_m.m - bigger_m.m); res.m = bigger_m.m; return res; } template <typename T, int MAX_K> struct TopKMD { MD md; TopK<T, MAX_K> topk; }; template <typename T, int MAX_K> __device__ __forceinline__ TopKMD<T, MAX_K> reduce_topk_md_op( const TopKMD<T, MAX_K>& a, const TopKMD<T, MAX_K>& b) { TopKMD<T, MAX_K> res; res.md = reduce_md_op(a.md, b.md); res.topk = reduce_topk_op(a.topk, b.topk); return res; } template <typename T, int ITEMS_PER_THREAD, int MAX_K, int THREADBLOCK_SIZE, bool ALIVE = false> __launch_bounds__(THREADBLOCK_SIZE) __global__ void beam_online_softmax_topk_kernel(const T* __restrict x, const T* __restrict b, const float* __restrict c, const bool* __restrict finished, int* __restrict z, T* __restrict v, int V, int K, int E) { int thread_id = threadIdx.x; int vector_id = blockIdx.x; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16) ? HALF_FLT_MAX : FLT_MAX; // reposition y to data for the current vector x += vector_id * V; typedef cub::BlockReduce<TopKMD<float, MAX_K>, THREADBLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; TopKMD<float, MAX_K> partial; bool finish = ALIVE ? false : finished[vector_id]; for (int i = 0; i < MAX_K; ++i) { partial.topk.p[i] = -1; partial.topk.u[i] = -MAX_T_VAL; } partial.md.m = -MAX_T_VAL; partial.md.d = 0.0F; if (finish) { for (int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE) { float elem = (elem_id == E) ? MAX_T_VAL : -MAX_T_VAL; MD new_elem{elem, 1.0F}; partial.md = reduce_md_op(partial.md, new_elem); partial.topk.insert(elem, elem_id); } } else { for (int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE) { float elem = (float)x[elem_id] + b[elem_id]; MD new_elem{elem, 1.0F}; partial.md = reduce_md_op(partial.md, new_elem); partial.topk.insert(elem, elem_id); } } TopKMD<float, MAX_K> total = BlockReduce(temp_storage) .Reduce(partial, reduce_topk_md_op<float, MAX_K>); if (thread_id == 0) { z += vector_id * K; v += vector_id * K; // c += vector_id; // cum_log_probs puts the results of alive beams after finish beams, // thus we add the offset. c += ALIVE ? (vector_id / (K / 2) * K + vector_id % (K / 2) + K / 2) : vector_id; // float d_total_inverse = __fdividef(1.0F, total.md.d); float d_total_log = logf(total.md.d); for (int i = 0; i < MAX_K; ++i) { // float val = __expf(total.topk.u[i] - total.md.m) * d_total_inverse; float val = total.topk.u[i] - total.md.m - d_total_log; if (i < K) { z[i] = total.topk.p[i] + vector_id * V; // faster transformer needs absolute id v[i] = (float)val + (float)c[0]; } } } } template <typename T, int ITEMS_PER_THREAD, int MAX_K, int THREADBLOCK_SIZE, bool ALIVE = false> __launch_bounds__(THREADBLOCK_SIZE, 1) __global__ void beam_online_softmax_topk_stage1_kernel(const T* __restrict x, const T* __restrict b, const bool* __restrict finished, float* __restrict t, int V, int K, int E) { int thread_id = threadIdx.x; int vector_id = blockIdx.x; const int PACKED_TOP_KMD_SIZE = 2 * MAX_K + 2; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16) ? HALF_FLT_MAX : FLT_MAX; // one will have multiple sections per V const int v_local = (V + gridDim.y - 1) / gridDim.y; const int section_start = v_local * blockIdx.y; int section_end = section_start + v_local; section_end = (section_end > V) ? V : section_end; // reposition x to data for the current vector x += vector_id * V; #if TOPK_FP16_STORAGE == 1 typedef cub::BlockReduce<TopKMD<__half, MAX_K>, THREADBLOCK_SIZE> BlockReduce; #else typedef cub::BlockReduce<TopKMD<T, MAX_K>, THREADBLOCK_SIZE> BlockReduce; #endif __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ float buf_s[PACKED_TOP_KMD_SIZE]; // save intermediate result #if TOPK_FP16_STORAGE == 1 TopKMD<__half, MAX_K> partial; #else TopKMD<T, MAX_K> partial; #endif // bool finish = finished[vector_id]; bool finish = ALIVE ? false : finished[vector_id]; for (int i = 0; i < MAX_K; ++i) { partial.topk.p[i] = -1; partial.topk.u[i] = -MAX_T_VAL; } partial.md.m = -MAX_T_VAL; partial.md.d = 0.0F; if (finish) { #pragma unroll 1 for (int elem_id = section_start + thread_id; elem_id < section_end; elem_id += THREADBLOCK_SIZE) { float elem = (elem_id == E) ? MAX_T_VAL : -MAX_T_VAL; MD new_elem{elem, 1.0F}; partial.md = reduce_md_op(partial.md, new_elem); partial.topk.insert(elem, elem_id); } } else { #pragma unroll 1 for (int elem_id = section_start + thread_id; elem_id < section_end; elem_id += THREADBLOCK_SIZE) { T bias = b == nullptr ? (T)0.0f : b[elem_id]; // gpt-2 does not use bias T elem = x[elem_id] + bias; MD new_elem{elem, 1.0F}; partial.md = reduce_md_op(partial.md, new_elem); partial.topk.insert(elem, elem_id); } } #if TOPK_FP16_STORAGE == 1 TopKMD<__half, MAX_K> total = BlockReduce(temp_storage) .Reduce(partial, reduce_topk_md_op<__half, MAX_K>); #else TopKMD<T, MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_md_op<T, MAX_K>); #endif if (thread_id == 0) { for (int i = 0; i < K; i++) { reinterpret_cast<int*>(buf_s)[i] = total.topk.p[i] + vector_id * V; // faster transformer needs absolute id buf_s[MAX_K + i] = total.topk.u[i]; } buf_s[2 * MAX_K] = total.md.d; buf_s[2 * MAX_K + 1] = total.md.m; } __syncthreads(); if (threadIdx.x < PACKED_TOP_KMD_SIZE) { t[blockIdx.x * PACKED_TOP_KMD_SIZE * gridDim.y + blockIdx.y * PACKED_TOP_KMD_SIZE + threadIdx.x] = buf_s[threadIdx.x]; } } template <typename T, int MAX_K, int THREADBLOCK_SIZE, bool ALIVE = false> __launch_bounds__(THREADBLOCK_SIZE) __global__ void beam_online_softmax_topk_stage2_kernel(const float* __restrict x, const float* __restrict c, int* __restrict z, T* __restrict v, int K, int parts_per_beam) { const int vector_id = blockIdx.x; const int thread_id = threadIdx.x; const int PACKED_TOP_KMD_SIZE = 2 * MAX_K + 2; const bool IS_FP16 = std::is_same<T, half>::value; const T MAX_T_VAL = (IS_FP16) ? HALF_FLT_MAX : FLT_MAX; extern __shared__ char buf_s_[]; // intermediate result float* buf_s = reinterpret_cast<float*>(buf_s_); //__shared__ float buf_s[PACKED_TOP_KMD_SIZE * THREADBLOCK_SIZE]; // // intermediate result typedef cub::BlockReduce<TopKMD<T, MAX_K>, THREADBLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; x += vector_id * PACKED_TOP_KMD_SIZE * parts_per_beam; TopKMD<T, MAX_K> partial; for (int i = 0; i < MAX_K; ++i) { partial.topk.p[i] = -1; partial.topk.u[i] = -MAX_T_VAL; } partial.md.m = -MAX_T_VAL; partial.md.d = 0.0F; // load and unpack into registers through smem for (int idx = thread_id; idx < PACKED_TOP_KMD_SIZE * parts_per_beam; idx += THREADBLOCK_SIZE) { buf_s[idx] = x[idx]; } __syncthreads(); if (threadIdx.x < parts_per_beam) { float* b_s = buf_s + thread_id * PACKED_TOP_KMD_SIZE; for (int i = 0; i < K; i++) { partial.topk.p[i] = reinterpret_cast<int*>(b_s)[i]; partial.topk.u[i] = b_s[MAX_K + i]; } partial.md.d = b_s[2 * MAX_K]; partial.md.m = b_s[2 * MAX_K + 1]; } __syncthreads(); TopKMD<T, MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_md_op<T, MAX_K>); if (thread_id == 0) { z += vector_id * K; v += vector_id * K; // c += vector_id; // cum_log_probs puts the results of alive beams after finish beams, // thus we add the offset. c += ALIVE ? (vector_id / (K / 2) * K + vector_id % (K / 2) + K / 2) : vector_id; float d_total_log = logf(total.md.d); for (int i = 0; i < MAX_K; ++i) { float val = (float)total.topk.u[i] - total.md.m - d_total_log; if (i < K) { z[i] = total.topk.p[i]; v[i] = (float)val + (float)c[0]; } } } } template <typename T> void topK_kernelLauncher(T* log_probs, int* topk_tmp_id_buf, T* topk_tmp_val_buf, int* ids, DecodingBeamsearchArguments args, cudaStream_t stream) { const int batch_size = args.batch_size_; const int beam_width = args.beam_width_; const int vocab_size = args.vocab_size_padded_; const int diversity_rate = args.beam_search_diversity_rate_; const int block_size = SMALL_TOP_K_SOFTMAX_THREADBLOCK_SIZE; switch (beam_width) { case 1: beam_topK_kernel< T, 1, block_size><<<batch_size * beam_width, block_size, 0, stream>>>( log_probs, topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, diversity_rate); batch_topK_kernel<T, 1, block_size><<<batch_size, block_size, 0, stream>>>( topk_tmp_id_buf, topk_tmp_val_buf, ids); break; case 2: beam_topK_kernel< T, 2, block_size><<<batch_size * beam_width, block_size, 0, stream>>>( log_probs, topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, diversity_rate); batch_topK_kernel<T, 2, block_size><<<batch_size, block_size, 0, stream>>>( topk_tmp_id_buf, topk_tmp_val_buf, ids); break; case 3: beam_topK_kernel< T, 3, block_size><<<batch_size * beam_width, block_size, 0, stream>>>( log_probs, topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, diversity_rate); batch_topK_kernel<T, 3, block_size><<<batch_size, block_size, 0, stream>>>( topk_tmp_id_buf, topk_tmp_val_buf, ids); break; case 4: beam_topK_kernel< T, 4, block_size><<<batch_size * beam_width, block_size, 0, stream>>>( log_probs, topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, diversity_rate); batch_topK_kernel<T, 4, block_size><<<batch_size, block_size, 0, stream>>>( topk_tmp_id_buf, topk_tmp_val_buf, ids); break; case 6: beam_topK_kernel< T, 6, block_size><<<batch_size * beam_width, block_size, 0, stream>>>( log_probs, topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, diversity_rate); batch_topK_kernel<T, 6, block_size><<<batch_size, block_size, 0, stream>>>( topk_tmp_id_buf, topk_tmp_val_buf, ids); break; case 8: beam_topK_kernel< T, 8, block_size><<<batch_size * beam_width, block_size, 0, stream>>>( log_probs, topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, diversity_rate); batch_topK_kernel<T, 8, block_size><<<batch_size, block_size, 0, stream>>>( topk_tmp_id_buf, topk_tmp_val_buf, ids); break; case 32: beam_topK_kernel< T, 32, block_size><<<batch_size * beam_width, block_size, 0, stream>>>( log_probs, topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, diversity_rate); batch_topK_kernel<T, 32, block_size><<<batch_size, block_size, 0, stream>>>( topk_tmp_id_buf, topk_tmp_val_buf, ids); break; default: printf("[ERROR] Topk kernel does not support beamwidth = %d \n", beam_width); exit(0); break; } } template <typename T, int MAX_K, bool ALIVE = false> void beam_online_softmax_topk_stage2_kernelLauncher(const float* temp_storage, const float* cum_log_probs, int* ids, T* vals, int batch_size, int beam_width, int parts_per_beam, cudaStream_t stream) { // might rewrite beam_online_softmax_topk_stage2_kernel no to depend on // constant block size // in oreder to reduce compilation time int smem_stage2_size = parts_per_beam * (2 * MAX_K + 2) * sizeof(float); if (parts_per_beam <= 32) { beam_online_softmax_topk_stage2_kernel< T, MAX_K, 32, ALIVE><<<batch_size * beam_width, 32, smem_stage2_size, stream>>>( temp_storage, cum_log_probs, ids, vals, ALIVE ? beam_width * 2 : beam_width, parts_per_beam); return; } if (parts_per_beam <= 64) { beam_online_softmax_topk_stage2_kernel< T, MAX_K, 64, ALIVE><<<batch_size * beam_width, 64, smem_stage2_size, stream>>>( temp_storage, cum_log_probs, ids, vals, ALIVE ? beam_width * 2 : beam_width, parts_per_beam); return; } if (parts_per_beam <= 128) { beam_online_softmax_topk_stage2_kernel< T, MAX_K, 128, ALIVE><<<batch_size * beam_width, 128, smem_stage2_size, stream>>>( temp_storage, cum_log_probs, ids, vals, ALIVE ? beam_width * 2 : beam_width, parts_per_beam); return; } assert(0); } template <typename T, int MAX_K> void topK_softMax_kernelLauncher(const T* log_probs, const T* bias, const bool* finished, float* cum_log_probs, int* ids, void* temp_storage, const int temp_storage_size, const int batch_size, const int beam_width, const int vocab_size, const int end_id, T diversity_rate, cudaStream_t stream) { const int items_per_thread = 1; const int block_sz = (MAX_K < 16) ? (MAX_K < 8) ? SMALL_TOP_K_SOFTMAX_THREADBLOCK_SIZE : 128 : 64; // const int block_sz = SMALL_TOP_K_SOFTMAX_THREADBLOCK_SIZE; assert(temp_storage_size % 2 == 0); assert(temp_storage_size >= 2 * batch_size * beam_width * beam_width); const int topk_buf_offset = ceil(batch_size * beam_width * beam_width / 4.) * 4; int* topk_tmp_id_buf = reinterpret_cast<int*>(temp_storage); T* topk_tmp_val_buf = reinterpret_cast<T*>(topk_tmp_id_buf + topk_buf_offset); float* tmp_buffer = reinterpret_cast<float*>(topk_tmp_val_buf + topk_buf_offset); #ifdef DO_SPLIT_SMALL_TOP_K_SOFTMAX int voc_parts = 4; if (batch_size * beam_width < 256) { // Volta has 80 SMs, so we aim for three waves voc_parts = (240 + batch_size * beam_width - 1) / (batch_size * beam_width); voc_parts = std::min(128, voc_parts); // we implment up to 128 } dim3 grid(batch_size * beam_width, voc_parts); cudaFuncSetAttribute(beam_online_softmax_topk_stage1_kernel<T, items_per_thread, MAX_K, block_sz>, cudaFuncAttributePreferredSharedMemoryCarveout, cudaSharedmemCarveoutMaxL1); beam_online_softmax_topk_stage1_kernel< T, items_per_thread, MAX_K, block_sz><<<grid, block_sz, 0, stream>>>( log_probs, bias, finished, tmp_buffer, vocab_size, beam_width, end_id); #endif if (beam_width > 1) { #ifdef DO_SPLIT_SMALL_TOP_K_SOFTMAX beam_online_softmax_topk_stage2_kernelLauncher<T, MAX_K>(tmp_buffer, cum_log_probs, topk_tmp_id_buf, topk_tmp_val_buf, batch_size, beam_width, voc_parts, stream); #else beam_online_softmax_topk_kernel< T, items_per_thread, MAX_K, block_sz><<<batch_size * beam_width, block_sz, 0, stream>>>( log_probs, bias, cum_log_probs, finished, topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, beam_width, end_id); #endif #if 0 // wrong result with diversity_rate != 0.f batch_topK_kernel<T, MAX_K, 32><<<batch_size, 32, 0, stream>>> (topk_tmp_id_buf, topk_tmp_val_buf, ids, cum_log_probs); #else batch_topk_kernel<T, MAX_K, 32><<<batch_size, 32, 0, stream>>>( topk_tmp_id_buf, topk_tmp_val_buf, ids, cum_log_probs, beam_width * beam_width, beam_width, diversity_rate); #endif } else { #ifdef DO_SPLIT_SMALL_TOP_K_SOFTMAX beam_online_softmax_topk_stage2_kernelLauncher<float, MAX_K>(tmp_buffer, cum_log_probs, ids, cum_log_probs, batch_size, beam_width, voc_parts, stream); #else beam_online_softmax_topk_kernel< T, items_per_thread, MAX_K, block_sz><<<batch_size * beam_width, block_sz, 0, stream>>>( log_probs, bias, cum_log_probs, finished, ids, cum_log_probs, vocab_size, beam_width, end_id); #endif } } template <typename T> void topK_softMax(const T* log_probs, const T* bias, const bool* finished, float* cum_log_probs, int* ids, void* temp_storage, DecodingBeamsearchArguments args, cudaStream_t stream) { const int temp_storage_size = args.temp_storage_size_; const int batch_size = args.batch_size_; const int beam_width = args.beam_width_; const int vocab_size = args.vocab_size_padded_; const int end_id = args.end_id_; const T diversity_rate = args.beam_search_diversity_rate_; switch (beam_width) { case 1: topK_softMax_kernelLauncher<T, 1>(log_probs, bias, finished, cum_log_probs, ids, temp_storage, temp_storage_size, batch_size, beam_width, vocab_size, end_id, diversity_rate, stream); break; case 2: topK_softMax_kernelLauncher<T, 2>(log_probs, bias, finished, cum_log_probs, ids, temp_storage, temp_storage_size, batch_size, beam_width, vocab_size, end_id, diversity_rate, stream); break; case 3: topK_softMax_kernelLauncher<T, 3>(log_probs, bias, finished, cum_log_probs, ids, temp_storage, temp_storage_size, batch_size, beam_width, vocab_size, end_id, diversity_rate, stream); break; case 4: topK_softMax_kernelLauncher<T, 4>(log_probs, bias, finished, cum_log_probs, ids, temp_storage, temp_storage_size, batch_size, beam_width, vocab_size, end_id, diversity_rate, stream); break; case 8: topK_softMax_kernelLauncher<T, 8>(log_probs, bias, finished, cum_log_probs, ids, temp_storage, temp_storage_size, batch_size, beam_width, vocab_size, end_id, diversity_rate, stream); break; case 16: topK_softMax_kernelLauncher<T, 16>(log_probs, bias, finished, cum_log_probs, ids, temp_storage, temp_storage_size, batch_size, beam_width, vocab_size, end_id, diversity_rate, stream); break; case 32: topK_softMax_kernelLauncher<T, 32>(log_probs, bias, finished, cum_log_probs, ids, temp_storage, temp_storage_size, batch_size, beam_width, vocab_size, end_id, diversity_rate, stream); break; default: printf("[ERROR] Topk kernel does not support beamwidth = %d \n", beam_width); exit(0); break; } } template void topK_kernelLauncher<float>(float* log_probs, int* topk_tmp_id_buf, float* topk_tmp_val_buf, int* ids, DecodingBeamsearchArguments args, cudaStream_t stream); template void topK_kernelLauncher<half>(half* log_probs, int* topk_tmp_id_buf, half* topk_tmp_val_buf, int* ids, DecodingBeamsearchArguments args, cudaStream_t stream); template void topK_softMax<float>(const float* log_probs, const float* bias, const bool* finished, float* cum_log_probs, int* ids, void* tmp_storage, DecodingBeamsearchArguments args, cudaStream_t stream); template void topK_softMax<half>(const half* log_probs, const half* bias, const bool* finished, float* cum_log_probs, int* ids, void* tmp_storage, DecodingBeamsearchArguments args, cudaStream_t stream); template <typename T, int MAX_K> struct TopKFinish { T u[MAX_K]; int idx[MAX_K]; int len[MAX_K]; __device__ __forceinline__ void insert(T elem, int pidx, int step) { if (elem > u[MAX_K - 1]) { u[MAX_K - 1] = elem; idx[MAX_K - 1] = pidx; len[MAX_K - 1] = step; for (int k = MAX_K - 2; k >= 0; --k) { if (u[k + 1] > u[k]) { T u2 = u[k]; u[k] = u[k + 1]; u[k + 1] = u2; int tmp = idx[k]; idx[k] = idx[k + 1]; idx[k + 1] = tmp; tmp = len[k]; len[k] = len[k + 1]; len[k + 1] = tmp; } } } } }; template <int MAX_K> struct TopKStop { float u[MAX_K]; int word_id[MAX_K]; int idx[MAX_K]; int len[MAX_K]; __device__ __forceinline__ void insert(float elem, int ids, int pidx, int step) { if (elem > u[MAX_K - 1]) { u[MAX_K - 1] = elem; word_id[MAX_K - 1] = ids; idx[MAX_K - 1] = pidx; len[MAX_K - 1] = step; for (int k = MAX_K - 2; k >= 0; --k) { if (u[k + 1] > u[k]) { float u2 = u[k]; u[k] = u[k + 1]; u[k + 1] = u2; int tmp1 = word_id[k]; word_id[k] = word_id[k + 1]; word_id[k + 1] = tmp1; int tmp2 = idx[k]; idx[k] = idx[k + 1]; idx[k + 1] = tmp2; int tmp3 = len[k]; len[k] = len[k + 1]; len[k + 1] = tmp3; } } } } }; template <typename T, int MAX_K, int THREADBLOCK_SIZE> __launch_bounds__(THREADBLOCK_SIZE) __global__ void batch_topk_update_kernel(const int* __restrict x, const T* __restrict y, bool* finished, bool* alive_finished, int* sequence_length, int* word_ids, int* parent_ids, int* output_word_ids, int* output_parent_ids, float* output_cum_log_probs, const int batch_size, const int beam_width, const int vocab_size, const int end_id, const int step, const int max_out_len, int V, int K, T diversity_rate, float length_penalty, float max_length_penalty, const int finished_candidate_num, const bool early_stopping = false) { int thread_id = threadIdx.x; int vector_id = blockIdx.x; const bool IS_FP16 = std::is_same<T, half>::value; // to be consistent with MAX_T_VAL in init_kernel, which should also be same // with other topk kernel, however it does not const T MAX_T_VAL = (IS_FP16) ? HALF_FLT_MAX : 1e20f; // reposition x, y to data for the current vector x += vector_id * V; y += vector_id * V; typedef cub::BlockReduce<TopK<T, MAX_K>, THREADBLOCK_SIZE> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; TopK<T, MAX_K> partial; for (int i = 0; i < MAX_K; ++i) { partial.p[i] = -1; partial.u[i] = -MAX_T_VAL; } for (int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE) { int i = elem_id % K; T elem = y[elem_id] + diversity_rate * (T)i; int elem_idx = elem_id; // x[elem_id]; partial.insert(elem, elem_idx); } TopK<T, MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op<T, MAX_K>); // grow finish and grow alive if (thread_id == 0) { word_ids += vector_id * beam_width; parent_ids += vector_id * beam_width; output_word_ids += vector_id * K; // K == MAX_K == beam_width*2 output_parent_ids += vector_id * K; output_cum_log_probs += vector_id * K; sequence_length += vector_id * K; finished += vector_id * K; alive_finished += vector_id * beam_width; // load the finish queue to grow // TODO(guosheng): Use vectorized load or do this BlockReduce to use // multi-threads without extra sync int finish_num = 0; TopKFinish<T, MAX_K / 2> finish_candidate; for (int i = 0; i < MAX_K / 2; ++i) { if (step == 1) { // step number starts from 1 rather than 0 finish_candidate.u[i] = -MAX_T_VAL; finish_candidate.idx[i] = -1; finish_candidate.len[i] = 0; } else { finish_candidate.u[i] = output_cum_log_probs[i]; finish_candidate.idx[i] = i; finish_candidate.len[i] = sequence_length[i]; if (finished[i]) finish_num++; } } int alive_num = 0; for (int i = 0; i < MAX_K; ++i) { // K == MAX_K == beam_width*2 if (i < K) { // beam_online_softmax_topk_kernel produces absolute id, which can make // update_KV_cache_kernel use gather instead of gather_nd int abs_id = x[total.p[i]]; float cum_log_prob = (float)y[total.p[i]]; // There are two queues, one for the alive and another for the finish. // `beam_id` stands for parents in the alive, and it uses absolute id // represented as `batch_idx * beam_width + beam_idx`. int beam_id = abs_id / vocab_size; int beam_id_in_output = vector_id * K + (beam_id % beam_width) + beam_width; int word_id = abs_id % vocab_size; if (i < finished_candidate_num && word_id == end_id) { // grow finish // The alive candidates are put after finish candidates in the // finish queue, thus parent index should plus with the // offset(beam_width). finish_candidate.insert( cum_log_prob / length_penalty, beam_id_in_output, step); if (finish_num != MAX_K / 2) finish_num++; } else if (alive_num < beam_width && word_id != end_id) { // grow alive parent_ids[alive_num] = beam_id; word_ids[alive_num] = word_id; // Also put alive candidates after finish candidates, since output // must include both the finish and alive to trace full path output_word_ids[MAX_K / 2 + alive_num] = word_id; output_parent_ids[MAX_K / 2 + alive_num] = beam_id_in_output; output_cum_log_probs[MAX_K / 2 + alive_num] = cum_log_prob; sequence_length[MAX_K / 2 + alive_num] = step; finished[MAX_K / 2 + alive_num] = 0; alive_finished[alive_num] = 0; alive_num++; } } } for (int i = 0; i < MAX_K / 2; ++i) { output_word_ids[i] = end_id; output_cum_log_probs[i] = static_cast<float>(finish_candidate.u[i]); output_parent_ids[i] = finish_candidate.idx[i]; sequence_length[i] = finish_candidate.len[i]; // finished[i] = 1; finished[i] = finish_candidate.u[i] > (-MAX_T_VAL + (T)10.0f) ? 1 : 0; } // early finish float lowest_finish = finish_num == 0 ? -1e20f : output_cum_log_probs[finish_num - 1]; // The best possible score of the most likely alive sequence float lower_bound = (float)output_cum_log_probs[MAX_K / 2] / max_length_penalty; // output must include both the finish and alive to trace full path if (finished_candidate_num == MAX_K / 2) { if (finish_num == finished_candidate_num && (lowest_finish > lower_bound || early_stopping)) { // when finishing // If early stop, also mark the alive beams finished. for (int i = MAX_K / 2; i < MAX_K; ++i) { finished[i] = 1; alive_finished[i - MAX_K / 2] = 1; } } else if (step == max_out_len) { TopKStop<MAX_K / 2> finish_stop; for (int i = 0; i < MAX_K / 2; ++i) { finish_stop.word_id[i] = -1; finish_stop.u[i] = -1e20f; finish_stop.idx[i] = -1; finish_stop.len[i] = 0; } for (int i = 0; i < finish_num; ++i) { finish_stop.insert(output_cum_log_probs[i], end_id, output_parent_ids[i], sequence_length[i]); } for (int i = MAX_K / 2; i < MAX_K; ++i) { finish_stop.insert(output_cum_log_probs[i] / length_penalty, word_ids[i - MAX_K / 2], output_parent_ids[i], step); } for (int i = 0; i < MAX_K / 2; ++i) { output_word_ids[i] = finish_stop.word_id[i]; output_cum_log_probs[i] = finish_stop.u[i]; output_parent_ids[i] = finish_stop.idx[i]; sequence_length[i] = finish_stop.len[i]; finished[i] = 1; } // If early stop, also mark the alive beams finished. for (int i = MAX_K / 2; i < MAX_K; ++i) { finished[i] = 1; alive_finished[i - MAX_K / 2] = 1; } } } else { if (step == max_out_len || lowest_finish > lower_bound) { // when finishing for (int i = 0; finish_num < MAX_K / 2; ++finish_num, ++i) { output_word_ids[finish_num] = word_ids[i]; output_cum_log_probs[finish_num] = (float)output_cum_log_probs[i + beam_width] / length_penalty; output_parent_ids[finish_num] = output_parent_ids[i + beam_width]; sequence_length[finish_num] = step; finished[finish_num] = 1; } // If early stop, also mark the alive beams finished. for (int i = MAX_K / 2; i < MAX_K; ++i) { finished[i] = 1; alive_finished[i - MAX_K / 2] = 1; } } } } } template <typename T, int MAX_K> void topK_softMax_update_kernelLauncher(const T* log_probs, const T* bias, bool* finished, bool* alive_finished, int* sequence_length, int* word_ids, int* parent_ids, int* output_word_ids, int* output_parent_ids, float* cum_log_probs, void* temp_storage, const int temp_storage_size, const int batch_size, const int beam_width, const int vocab_size, const int end_id, const int step, const int max_out_len, T diversity_rate, const float alpha, const int finished_candidate_num, const bool early_stopping, cudaStream_t stream) { const int items_per_thread = 1; const int block_sz = (MAX_K < 16) ? (MAX_K < 8) ? SMALL_TOP_K_SOFTMAX_THREADBLOCK_SIZE : 128 : 64; // const int block_sz = SMALL_TOP_K_SOFTMAX_THREADBLOCK_SIZE; assert(temp_storage_size % 2 == 0); // select top beam_width*2 for topk_tmp_id_buf and topk_tmp_val_buf assert(temp_storage_size >= 2 * batch_size * beam_width * beam_width * 2); const int topk_buf_offset = ceil(batch_size * beam_width * beam_width / 4.) * 4 * 2; int* topk_tmp_id_buf = reinterpret_cast<int*>(temp_storage); T* topk_tmp_val_buf = reinterpret_cast<T*>(topk_tmp_id_buf + topk_buf_offset); float* tmp_buffer = reinterpret_cast<float*>(topk_tmp_val_buf + topk_buf_offset); #ifdef DO_SPLIT_SMALL_TOP_K_SOFTMAX int voc_parts = 4; if (batch_size * beam_width < 256) { // Volta has 80 SMs, so we aim for three waves voc_parts = (240 + batch_size * beam_width - 1) / (batch_size * beam_width); voc_parts = std::min(128, voc_parts); // we implment up to 128 } dim3 grid(batch_size * beam_width, voc_parts); cudaFuncSetAttribute(beam_online_softmax_topk_stage1_kernel<T, items_per_thread, MAX_K, block_sz>, cudaFuncAttributePreferredSharedMemoryCarveout, cudaSharedmemCarveoutMaxL1); beam_online_softmax_topk_stage1_kernel<T, items_per_thread, MAX_K, block_sz, true><<<grid, block_sz, 0, stream>>>( log_probs, bias, finished, tmp_buffer, vocab_size, beam_width * 2, end_id); beam_online_softmax_topk_stage2_kernelLauncher<T, MAX_K, true>( tmp_buffer, cum_log_probs, topk_tmp_id_buf, topk_tmp_val_buf, batch_size, beam_width, voc_parts, stream); // double beam_width in launcher #else beam_online_softmax_topk_kernel< T, items_per_thread, MAX_K, block_sz, true><<<batch_size * beam_width, block_sz, 0, stream>>>(log_probs, bias, cum_log_probs, finished, topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, beam_width * 2, end_id); #endif float length_penalty = (finished_candidate_num == beam_width) ? std::pow((5. + step - 1) / 6., alpha) : std::pow((5. + step + 1) / 6., alpha); float max_length_penalty = (finished_candidate_num == beam_width) ? length_penalty : std::pow((5. + max_out_len + 1) / 6., alpha); batch_topk_update_kernel<T, MAX_K, 32><<<batch_size, 32, 0, stream>>>( topk_tmp_id_buf, topk_tmp_val_buf, finished, alive_finished, sequence_length, word_ids, parent_ids, output_word_ids, output_parent_ids, cum_log_probs, batch_size, beam_width, vocab_size, end_id, step, max_out_len, beam_width * beam_width * 2, beam_width * 2, diversity_rate, length_penalty, max_length_penalty, finished_candidate_num, early_stopping); } template <typename T> void topK_softMax_update( const T* log_probs, const T* bias, // NOTE: bias is float in V3.1 bool* finished, bool* alive_finished, int* sequence_length, int* word_ids, int* parent_ids, // for update cache, only include alive beams int* output_word_ids, int* output_parent_ids, // for gather tree, include both alive and finish // beams float* output_cum_log_probs, // NOTE: cum_log_probs is T in V3.1 void* temp_storage, const int step, DecodingBeamsearchArguments args, cudaStream_t stream) { const int temp_storage_size = args.temp_storage_size_; const int batch_size = args.batch_size_; const int beam_width = args.beam_width_; const int vocab_size = args.vocab_size_padded_; const int end_id = args.end_id_; const T diversity_rate = args.beam_search_diversity_rate_; const int max_out_len = args.seq_len_; const float alpha = args.alpha_; const int finished_candidate_num = args.finished_candidate_num_; const bool early_stopping = args.early_stopping_; switch (beam_width) { case 1: topK_softMax_update_kernelLauncher<T, 2>(log_probs, bias, finished, alive_finished, sequence_length, word_ids, parent_ids, output_word_ids, output_parent_ids, output_cum_log_probs, temp_storage, temp_storage_size, batch_size, beam_width, vocab_size, end_id, step, max_out_len, diversity_rate, alpha, finished_candidate_num, early_stopping, stream); break; case 2: topK_softMax_update_kernelLauncher<T, 4>(log_probs, bias, finished, alive_finished, sequence_length, word_ids, parent_ids, output_word_ids, output_parent_ids, output_cum_log_probs, temp_storage, temp_storage_size, batch_size, beam_width, vocab_size, end_id, step, max_out_len, diversity_rate, alpha, finished_candidate_num, early_stopping, stream); break; case 3: topK_softMax_update_kernelLauncher<T, 6>(log_probs, bias, finished, alive_finished, sequence_length, word_ids, parent_ids, output_word_ids, output_parent_ids, output_cum_log_probs, temp_storage, temp_storage_size, batch_size, beam_width, vocab_size, end_id, step, max_out_len, diversity_rate, alpha, finished_candidate_num, early_stopping, stream); break; case 4: topK_softMax_update_kernelLauncher<T, 8>(log_probs, bias, finished, alive_finished, sequence_length, word_ids, parent_ids, output_word_ids, output_parent_ids, output_cum_log_probs, temp_storage, temp_storage_size, batch_size, beam_width, vocab_size, end_id, step, max_out_len, diversity_rate, alpha, finished_candidate_num, early_stopping, stream); break; case 8: topK_softMax_update_kernelLauncher<T, 16>(log_probs, bias, finished, alive_finished, sequence_length, word_ids, parent_ids, output_word_ids, output_parent_ids, output_cum_log_probs, temp_storage, temp_storage_size, batch_size, beam_width, vocab_size, end_id, step, max_out_len, diversity_rate, alpha, finished_candidate_num, early_stopping, stream); break; case 16: topK_softMax_update_kernelLauncher<T, 32>(log_probs, bias, finished, alive_finished, sequence_length, word_ids, parent_ids, output_word_ids, output_parent_ids, output_cum_log_probs, temp_storage, temp_storage_size, batch_size, beam_width, vocab_size, end_id, step, max_out_len, diversity_rate, alpha, finished_candidate_num, early_stopping, stream); break; case 32: topK_softMax_update_kernelLauncher<T, 64>(log_probs, bias, finished, alive_finished, sequence_length, word_ids, parent_ids, output_word_ids, output_parent_ids, output_cum_log_probs, temp_storage, temp_storage_size, batch_size, beam_width, vocab_size, end_id, step, max_out_len, diversity_rate, alpha, finished_candidate_num, early_stopping, stream); break; default: printf("[ERROR] Topk kernel does not support beamwidth = %d \n", beam_width); exit(0); break; } } template void topK_softMax_update<float>( const float* log_probs, const float* bias, bool* finished, bool* alive_finished, int* sequence_length, int* word_ids, int* parent_ids, // for update cache, only include alive beams int* output_word_ids, int* output_parent_ids, // for gather tree, include both alive and finish // beams float* output_cum_log_probs, void* temp_storage, const int step, DecodingBeamsearchArguments args, cudaStream_t stream); template void topK_softMax_update<half>( const half* log_probs, const half* bias, bool* finished, bool* alive_finished, int* sequence_length, int* word_ids, int* parent_ids, // for update cache, only include alive beams int* output_word_ids, int* output_parent_ids, // for gather tree, include both alive and finish // beams float* output_cum_log_probs, void* temp_storage, const int step, DecodingBeamsearchArguments args, cudaStream_t stream); } // end of namespace fastertransformer
the_stack
#include "ImageDrawer.h" // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:DEF_BLOCK_1D // 定义了默认的 1D Block 尺寸。 #define DEF_BLOCK_1D 512 // Kernel 函数:_brushAllImageKer(涂满整幅图像) // 将整幅图像使用同一种颜色涂满。 static __global__ void // Kernel 函数无返回值 _brushAllImageKer( ImageCuda img, // 待涂刷的图像 unsigned char clr // 颜色值 ); // Kernel 函数:_drawLinesKer(绘制直线) // 根据坐标点集中给定的直线参数,在图像上绘制直线。如果 color 小于 0,则使用坐 // 标点集中的附属数据做为亮度值(颜色值);如果 color 大于等于 0,则直接使用 // color 所指定的颜色。 static __global__ void // Kernel 函数无返回值 _drawLinesKer( ImageCuda img, // 待绘制直线的图像 CoordiSetCuda cst, // 用坐标点集表示的直线参数 int color // 绘图使用的颜色值 ); // Kernel 函数:_drawLinesKer(绘制直线) // 根据坐标点集中给定的椭圆参数,在图像上绘制椭圆。如果 color 小于 0,则使用坐 // 标点集中的附属数据做为亮度值(颜色值);如果 color 大于等于 0,则直接使用 // color 所指定的颜色。 static __global__ void // Kernel 函数无返回值 _drawEllipseKer( ImageCuda img, // 待绘制直线的椭圆 CoordiSetCuda cst, // 用坐标点集表示的椭圆参数 int color // 绘图使用的颜色值 ); // Kernel 函数:_brushAllImageKer(涂满整幅图像) static __global__ void _brushAllImageKer(ImageCuda img, unsigned char clr) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= img.imgMeta.width || r >= img.imgMeta.height) return; // 计算第一个像素点对应的图像数据数组下标。 int idx = r * img.pitchBytes + c; // 为第一个像素点赋值。 img.imgMeta.imgData[idx] = clr; // 处理剩下的三个像素点。 for (int i = 1; i < 4; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点之间没有变化,故不用检查。 if (++r >= img.imgMeta.height) return; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 idx += img.pitchBytes; // 为当前像素点赋值。 img.imgMeta.imgData[idx] = clr; } } // Host 成员方法:brushAllImage(涂满整幅图像) __host__ int ImageDrawer::brushAllImage(Image *img) { // 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。 if (img == NULL) return NULL_POINTER; // 如果算法 CLASS 的背景色为透明色,则不需要进行任何处理,直接返回即可。 if (this->brushColor == IDRAW_TRANSPARENT) return NO_ERROR; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(img); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像。 ImageCuda subimgCud; errcode = ImageBasicOp::roiSubImage(img, &subimgCud); if (errcode != NO_ERROR) return errcode; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (subimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (subimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 调用核函数完成计算。 _brushAllImageKer<<<gridsize, blocksize>>>(subimgCud, this->brushColor); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕,退出。 return NO_ERROR; } static __global__ void _drawLinesKer(ImageCuda img, CoordiSetCuda cst, int color) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 计算当前线程所要处理的直线的编号。 int lineidx = blockIdx.z; // 判断当前处理的直线是否已经越界。 if (2 * lineidx + 1 >= cst.tplMeta.count) return; // 共享内存声明。使用共享内存预先读出来一些数据,这样可以在一定程度上加快 // Kernel 的执行速度。 __shared__ int lineptShd[5]; // 直线的两个端点。 int *curcolorShd = &lineptShd[4]; // 绘制当前直线所用的颜色。 // 初始化直线的两个端点。使用当前 Block 的前四个 Thread 读取两个坐标,一共 // 四个整形数据。 if (threadIdx.x < 4 && threadIdx.y == 0) { lineptShd[threadIdx.x] = cst.tplMeta.tplData[4 * lineidx + threadIdx.x]; } // 初始化当前直线的颜色。 if (threadIdx.x == 0 && threadIdx.y == 0) { // 如果颜色值小于 0,说明当前使用了动态颜色模式,此时从坐标点集的附属数 // 据中读取颜色值。否则,直接使用参数的颜色值作为颜色值。 if (color < 0) curcolorShd[0] = ((int)(fabs(cst.attachedData[2 * lineidx]) * 255)) % 256; else curcolorShd[0] = color % 256; } // 同步 Block 内的所有 Thread,使得上面的初始化过程对所有的 Thread 可见。 __syncthreads(); // 如果当前处理的坐标不在图像所表示的范围内,则直接退出。 if (c >= img.imgMeta.width || r >= img.imgMeta.height) return; // 计算当前 Thread 对应的图像下标。 int idx = r * img.pitchBytes + c; // 将 Shared Memory 中的数据读取到寄存器中,以使得接下来的计算速度更快。 int pt0x = lineptShd[0]; int pt0y = lineptShd[1]; int pt1x = lineptShd[2]; int pt1y = lineptShd[3]; unsigned char curcolor = curcolorShd[0]; // 针对不同的情况,处理直线。主要的思路就是判断当前点是否在直线上,如果在直 // 线上,则将当前点的像素值赋值为前景色值;否则什么都不做。显然,相同的 // Block 处理的是相同的直线,因此对于该 if-else 语句会进入相同的分支,所 // 以,这个分支语句不会导致分歧执行,也不会产生额外的性能下降。 if (pt0x == pt1x) { // 当直线平行于 y 轴,则判断当前点是否跟直线端点具有同样的 x 坐标,如果 // 是,并且 y 坐标在端点的范围内,则绘制点,否则什么都不做。 // 对于当前点和端点具有不同 x 坐标的像素点,则直接退出。 if (c != pt0x) return; // 计算两个端点之间的 y 坐标的范围。 int miny = min(pt0y, pt1y); int maxy = max(pt0y, pt1y); // 对于 y 坐标范围已经不在图像像素范围内的情况,则直接退出。 if (maxy < 0 || miny >= img.imgMeta.height) return; // 检查当前处理的第一个点是否在 y 坐标范围内,如果在,则在该点绘制颜色 // 点。 if (r >= miny && r <= maxy) img.imgMeta.imgData[idx] = curcolor; // 处理剩余的三个点。 for (int i = 1; i < 4; i++) { // 检查这些点是否越界。 if (++r >= img.imgMeta.height) return; // 调整下标值,根据上下两点之间的位置关系,可以从前一下标值推算出下 // 一个下标值。 idx += img.pitchBytes; // 检查当前处理的剩余三个点是否在 y 坐标范围内,如果在,则在该点绘 // 制颜色点。 if (r >= miny && r <= maxy) img.imgMeta.imgData[idx] = curcolor; } } else if (pt0y == pt1y) { // 当直线平行于 x 轴,则判断当前点是否跟直线端点具有同样的 y 坐标,如果 // 是,并且 x 坐标在端点的范围内,则绘制点,否则什么都不做。 // 计算两个端点之间的 x 坐标的范围。 int minx = min(pt0x, pt1x); int maxx = max(pt0x, pt1x); // 对于当前点的 x 坐标不在两个端点的范围内的像素点,则直接退出。 if (c < minx || c > maxx) return; // 对于当前点的 y 坐标等于端点的 y 坐标,则在该点绘制颜色值。由于在该点 // 绘制了颜色后,可以断定其后就不会再有点可以绘制了,因此绘制后直接返 // 回。 if (r == pt0y) { img.imgMeta.imgData[idx] = curcolor; return; } // 处理剩余的三个点。 for (int i = 1; i < 4; i++) { // 检查这些点是否越界。 if (++r >= img.imgMeta.height) return; // 调整下标值,根据上下两点之间的位置关系,可以从前一下标值推算出下 // 一个下标值。 idx += img.pitchBytes; // 对于当前点的 y 坐标等于端点的 y 坐标,则在该点绘制颜色值。由于在 // 该点绘制了颜色后,可以断定其后就不会再有点可以绘制了,因此绘制后 // 直接返回。 if (r == pt0y) { img.imgMeta.imgData[idx] = curcolor; return; } } } else { // 对于其他情况,可以直接按照直线方程进行判断。 // 计算两个端点之间的 x 坐标的范围。 int minx = min(pt0x, pt1x); int maxx = max(pt0x, pt1x); // 计算两个端点之间的 y 坐标的范围。 int miny = min(pt0y, pt1y); int maxy = max(pt0y, pt1y); // 对于当前点的 x 坐标不在不在两个端点的范围内的像素点,则直接退出。 if (c < minx || c > maxx) return; // 计算直线关于 x 轴的斜率,预先计算斜率可以在后面的计算过程中重复利 // 用,以减少计算。 float dydx = (float)(pt1y - pt0y) / (pt1x - pt0x); // 计算直线方程。这里如果斜率的绝对值大于 1 时(坡度大于 45 度),按照 // 关于 y 的方程进行计算,这样函数值变化和图像栅格坐标的变化是在同数量 // 级的。 float fx, detfx; if (fabs(dydx) <= 1.0f ) { fx = dydx * (c - pt0x) + pt0y - r; detfx = -1.0f; } else { dydx = 1.0f / dydx; fx = dydx * (r - pt0y) + pt0x - c; detfx = dydx; } // 如果点落在直线上(即方程等于 0)则绘制该点的颜色值。这里判断 0.5 是 // 考虑到图像的栅格坐标。 if (fabs(fx) <= 0.5f && r >= miny && r <= maxy) img.imgMeta.imgData[idx] = curcolor; // 处理剩余的三个点。 for (int i = 1; i < 4; i++) { // 检查这些点是否越界。 if (++r >= img.imgMeta.height) return; // 调整下标值和函数值,根据上下两点之间的位置关系,可以从前一下标值 // 推算出下一个下标值。 fx += detfx; idx += img.pitchBytes; // 如果点落在直线上(即方程等于 0)则绘制该点的颜色值。这里判断 // 0.5 是考虑到图像的栅格坐标。 if (fabs(fx) <= 0.5f && r >= miny && r <= maxy) img.imgMeta.imgData[idx] = curcolor; } } } // Host 成员方法:drawLines(绘制直线) __host__ int ImageDrawer::drawLines(Image *img, CoordiSet *cst) { // 判断参数中的图像和坐标点集是否为 NULL。 if (img == NULL || cst == NULL) return NULL_POINTER; // 如果坐标点击中含有少于 2 个点则无法完成之间绘制,因此报错退出。 if (cst->count < 2) return INVALID_DATA; // 计算绘制索要使用的颜色。 int curcolor = this->lineColor; if (this->colorMode == IDRAW_CM_STATIC_COLOR) { // 如果是静态着色,且颜色值为透明色,则直接退出。 if (curcolor == IDRAW_TRANSPARENT) return NO_ERROR; } else if (this->colorMode == IDRAW_CM_DYNAMIC_COLOR) { // 对于动态绘图模式,则将颜色值赋值成一个负数,这样 Kernel 函数就能知道 // 当前使用的是动态着色模式。 curcolor = -1; } // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(img); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像。 ImageCuda subimgCud; errcode = ImageBasicOp::roiSubImage(img, &subimgCud); if (errcode != NO_ERROR) return errcode; // 将坐标点集拷贝到 Device 内存中。 errcode = CoordiSetBasicOp::copyToCurrentDevice(cst); if (errcode != NO_ERROR) return errcode; // 取出坐标点集对应的 CoordiSetCuda 型数据。 CoordiSetCuda *cstCud = COORDISET_CUDA(cst); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。这里使用 Block 的 z 维 // 度表示不同的直线。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; blocksize.z = 1; gridsize.x = (subimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (subimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); gridsize.z = cst->count / 2; // 调用 Kernel 完成计算。 _drawLinesKer<<<gridsize, blocksize>>>(subimgCud, *cstCud, curcolor); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕,退出。 return NO_ERROR; } // Kernel 函数:_extendLineCstKer(扩展坐标点集以绘制连续直线) static __global__ void _extendLineCstKer(CoordiSetCuda incst, CoordiSetCuda outcst) { // 计算输出坐标集的下标,本函数为每个输出坐标点配备一个 Thread。 int outidx = blockIdx.x * blockDim.x + threadIdx.x; // 如果输出点对应的是越界数据,则直接退出。 if (outidx >= outcst.tplMeta.count) return; // 计算输入下标。根据输出数组中各点对应于输入数组中的各点的关系,有【0】 // 【1】【1】【2】【2】【3】【3】等等,据此推演出输出坐标点和输入坐标点之间 // 的下标关系。 int inidx = (outidx + 1) / 2; // 如果计算出来的输入下标是越界的,则对计算出来的输入下标取模(之所以单独提 // 出来进行取模操作,是应为,取模操作比较耗时,且多数情况下不会出现越界的情 // 况)。 if (inidx >= incst.tplMeta.count) inidx %= incst.tplMeta.count; // 将对应的坐标点坐标和附属数据从输入坐标点集中拷贝到输出坐标点集中。 outcst.tplMeta.tplData[2 * outidx] = incst.tplMeta.tplData[2 * inidx]; outcst.tplMeta.tplData[2 * outidx + 1] = incst.tplMeta.tplData[2 * inidx + 1]; outcst.attachedData[outidx] = incst.attachedData[inidx]; } // 宏:FAIL_DRAWTRACE_FREE // 当下面的函数失败退出时,负责清理内存,防止内存泄漏。 #define FAIL_DRAWTRACE_FREE do { \ if (extcst != NULL) \ CoordiSetBasicOp::deleteCoordiSet(extcst); \ } while (0) // Host 成员方法:drawTrace(绘制连续直线) __host__ int ImageDrawer::drawTrace(Image *img, CoordiSet *cst, bool circled) { // 检查参数中的指针是否为 NULL。 if (img == NULL || cst == NULL) return NULL_POINTER; // 如果 cst 的坐标点少于 3 个,则只能绘制 0 条或 1 条线,因此可直接调用绘制 // 直线算法处理。 if (cst->count < 3) return this->drawLines(img, cst); // 局部变量,错误码 int errcode; // 申请扩展后的坐标点集,由于该函数最终要调用 drawLines 完成绘图工作,因此 // 首先需要根据输入的坐标点集生成所需要的扩展型坐标点集。 CoordiSet *extcst = NULL; errcode = CoordiSetBasicOp::newCoordiSet(&extcst); if (errcode != NO_ERROR) { FAIL_DRAWTRACE_FREE; return errcode; } // 计算扩展坐标点集中点的数量,并申请相应大小的内存空间。对于环形绘图相对于 // 蛇形绘图会多一条直线。 int extcstcnt = (circled ? 2 * cst->count : 2 * (cst->count - 1)); errcode = CoordiSetBasicOp::makeAtCurrentDevice(extcst, extcstcnt); if (errcode != NO_ERROR) { FAIL_DRAWTRACE_FREE; return errcode; } // 将输入的坐标点集拷贝当当前 Device。 errcode = CoordiSetBasicOp::copyToCurrentDevice(cst); if (errcode != NO_ERROR) { FAIL_DRAWTRACE_FREE; return errcode; } // 获取两个坐标点集的 CUDA 型数据指针。 CoordiSetCuda *cstCud = COORDISET_CUDA(cst); CoordiSetCuda *extcstCud = COORDISET_CUDA(extcst); // 计算启动 Kernel 所需要的 Block 尺寸和数量。 size_t blocksize = DEF_BLOCK_1D; size_t gridsize = (extcstcnt + DEF_BLOCK_1D - 1) / DEF_BLOCK_1D; // 启动 Kernel 完成扩展坐标集的计算操作。 _extendLineCstKer<<<gridsize, blocksize>>>(*cstCud, *extcstCud); // 检查 Kernel 执行是否正确。 if (cudaGetLastError() != cudaSuccess) { FAIL_DRAWTRACE_FREE; return errcode; } // 使用扩展坐标集绘制连续直线。 errcode = this->drawLines(img, extcst); if (errcode != NO_ERROR) { FAIL_DRAWTRACE_FREE; return errcode; } // 处理完毕,清除临时使用的扩展坐标点集。 CoordiSetBasicOp::deleteCoordiSet(extcst); // 处理完毕,退出。 return NO_ERROR; } static __global__ void _drawEllipseKer(ImageCuda img, CoordiSetCuda cst, int color) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 共享内存声明。使用共享内存预先读出来一些数据,这样可以在一定程度上加快 // Kernel 的执行速度。 __shared__ int ellipseptShd[5]; // 椭圆的外界矩形的左上顶点和右下 // 顶点的坐标 __shared__ float ellipsecenterptShd[2]; // 椭圆的中心坐标 __shared__ float ellipseradiuShd[2]; // 椭圆的半径 rx,ry int *curcolorShd = &ellipseptShd[4]; // 绘制当前椭圆所用的颜色。 // 初始化椭圆的外界矩形的 2 个顶点。使用前 4 个 Thread 读取 2 个坐标,一共 // 4 个整型数据。 if (threadIdx.x < 4 && threadIdx.y == 0) { ellipseptShd[threadIdx.x] = cst.tplMeta.tplData[threadIdx.x]; } // 使用第 5 个 Thread 初始化当前椭圆的颜色。 if (threadIdx.x == 4 && threadIdx.y == 0) { // 如果颜色值小于 0,说明当前使用了动态颜色模式,此时从坐标点集的附属数 // 据中读取颜色值。否则,直接使用参数的颜色值作为颜色值。 if (color < 0) curcolorShd[0] = ((int)(fabs(cst.attachedData[0]) * 255)) % 256; else curcolorShd[0] = color % 256; } // 同步 Block 内的所有 Thread,使得上面的初始化过程对所有的 Thread 可见。 __syncthreads(); // 使用前 2 个 Thread 初始化椭圆的中心坐标 if (threadIdx.x < 2 && threadIdx.y == 0) { // 初始化椭圆的中心坐标 ellipsecenterptShd[threadIdx.x] = (float)(ellipseptShd [threadIdx.x] + ellipseptShd [threadIdx.x + 2]) / 2; } // 使用第 3 个和第 4 个 Thread 初始化椭圆的半径 if (threadIdx.x >=2 && threadIdx.x <4 && threadIdx.y == 0) { ellipseradiuShd[threadIdx.x - 2] = (float)(ellipseptShd [threadIdx.x] - ellipseptShd [threadIdx.x - 2]) / 2; } // 同步 Block 内的所有 Thread,使得上面的初始化过程对所有的 Thread 可见。 __syncthreads(); // 如果当前处理的坐标不在图像所表示的范围内,则直接退出。 if (c >= img.imgMeta.width || r >= img.imgMeta.height) return; // 计算当前 Thread 对应的图像下标。 int idx = r * img.pitchBytes + c; // 将 Shared Memory 中的数据读取到寄存器中,以使得接下来的计算速度更快。 int pt0x = ellipseptShd[0]; int pt0y = ellipseptShd[1]; int pt1x = ellipseptShd[2]; int pt1y = ellipseptShd[3]; float rx = ellipseradiuShd[0]; float ry = ellipseradiuShd[1]; float xc = ellipsecenterptShd[0]; float yc = ellipsecenterptShd[1]; unsigned char curcolor = curcolorShd[0]; // 计算椭圆的 x 坐标的范围。 int minx = min(pt0x, pt1x); int maxx = max(pt0x, pt1x); // 计算椭圆的 y 坐标的范围。 int miny = min(pt0y, pt1y); int maxy = max(pt0y, pt1y); // 针对不同的情况,处理椭圆。主要的思路就是判断当前点是否在椭圆上,如果在 // 椭圆上,则将当前点的像素值赋值为前景色值;否则什么都不做。 if (minx == maxx || miny == maxy) { // 当输入点集在一条直线上时,退出程序。 return; } else { // 对于其他情况,可以直接按照椭圆方程进行判断。 // 对于当前点的 x,y 坐标,若不在椭圆的左上四分之一范围内,则直接退出。 if (c < minx || c > xc || r < miny || r > yc) return; // 计算椭圆方程。判断点(c,r)的斜率是否大于 1 ,若大于 1 则计算 // 点(c,r + 1)的方程的值,否则计算点(c + 1,r)的方程的值。 float fx1,fx2; int r1,c1; if (ry * ry * (c - xc) >= rx * rx * (r - yc)) { r1 = r + 1; c1 = c; } else { r1 = r; c1 = c + 1; } fx1 = (float)ry * ry * (c - xc)* (c - xc) + rx * rx * (r - yc)* (r - yc) - rx * rx * ry * ry; fx2 = (float)ry * ry * (c1 - xc)* (c1 - xc) + rx * rx * (r1 - yc)* (r1 - yc) - rx * rx * ry * ry; // 如果相邻两点分别落在椭圆内和椭圆外(即方程的值一正一负)则绘制距离 // 椭圆较近的点(即方程值的绝对值较小的点)及该点关于直线 x = xc, // y = yc 及关于点(xc,yc)对称的 3 个点的颜色值。 if(fx1 >= 0 && fx2 < 0) { if(fabs(fx1) < fabs(fx2)) { img.imgMeta.imgData[idx] = curcolor; // 关于直线 x = xc 对称的点 img.imgMeta.imgData[r * img.pitchBytes + (int)(2 * xc) - c] = curcolor; // 关于直线 y = yc 对称的点 img.imgMeta.imgData[((int)(2 * yc) - r) * img.pitchBytes + c] = curcolor; // 关于点(xc,yc)对称的点 img.imgMeta.imgData[(int)((2 * yc - r) * img.pitchBytes + 2 * xc - c)] = curcolor; } else { img.imgMeta.imgData[r1 * img.pitchBytes + c1] = curcolor; // 关于直线 x = xc 对称的点 img.imgMeta.imgData[r1 * img.pitchBytes + ((int)(2 * xc) - c1)] = curcolor; // 关于直线 y = yc 对称的点 img.imgMeta.imgData[(int)((2 * yc) - r1) * img.pitchBytes + c1] = curcolor; // 关于点(xc,yc)对称的点 img.imgMeta.imgData[(int)((2 * yc - r1) *img.pitchBytes + 2 * xc - c1)] = curcolor; } } // 处理剩余的三个点。 for (int i = 1; i < 4; i++) { // 检查这些点是否越界。 if (++r > yc) return; // 计算椭圆方程。判断点(c,r)的斜率是否大于 1 ,若大于 1 则计算 // 点(c,r + 1)的方程的值,否则计算点(c + 1,r)的方程的值。 if(ry * ry * (c - xc) >= rx * rx * (r - yc)) { r1 = r + 1; c1 = c; } else { r1 = r; c1 = c + 1; } fx1 = (float)ry * ry * (c - xc)* (c - xc) + rx * rx * (r - yc)* (r - yc) - rx * rx * ry * ry; fx2 = (float)ry * ry * (c1 - xc)* (c1 - xc) + rx * rx * (r1 - yc)* (r1 - yc) - rx * rx * ry * ry; idx += img.pitchBytes; // 如果相邻两点分别落在椭圆内和椭圆外(即方程的值一正一负)则绘制 // 距离椭圆较近的点(即方程值的绝对值较小的点)及该点关于直线 // x = xc,y = yc 及关于点(xc,yc)对称的 3 个点的颜色值。 if (fx1 >= 0 && fx2 < 0) { if(fabs(fx1) < fabs(fx2)) { img.imgMeta.imgData[idx] = curcolor; // 关于直线 x = xc 对称的点 img.imgMeta.imgData[r * img.pitchBytes + (int)(2 * xc) - c] =curcolor; // 关于直线 y = yc 对称的点 img.imgMeta.imgData[((int)(2 * yc) - r) * img.pitchBytes + c] = curcolor; // 关于点(xc,yc)对称的点 img.imgMeta.imgData[(int)((2 * yc - r) * img.pitchBytes + 2 * xc - c)] = curcolor; } else { img.imgMeta.imgData[r1 * img.pitchBytes + c1] = curcolor; // 关于直线 x = xc 对称的点 img.imgMeta.imgData[r1 * img.pitchBytes + (int)(2 * xc) - c1] = curcolor; // 关于直线 y = yc 对称的点 img.imgMeta.imgData[((int)(2 * yc) - r1) * img.pitchBytes + c1] = curcolor; // 关于点(xc,yc)对称的点 img.imgMeta.imgData[(int)((2 * yc - r1) * img.pitchBytes + 2 * xc - c1)] = curcolor; } } } } } // Host 成员方法:drawEllipse(绘制椭圆) __host__ int ImageDrawer::drawEllipse(Image *img, CoordiSet *cst) { // 判断参数中的图像和坐标点集是否为 NULL。 if (img == NULL || cst == NULL) return NULL_POINTER; // 如果坐标点击中含有少于 2 个点则无法完成椭圆绘制,因此报错退出。 if (cst->count < 2) return INVALID_DATA; // 计算绘制所要使用的颜色。 int curcolor = this->lineColor; if (this->colorMode == IDRAW_CM_STATIC_COLOR) { // 如果是静态着色,且颜色值为透明色,则直接退出。 if (curcolor == IDRAW_TRANSPARENT) return NO_ERROR; } else if (this->colorMode == IDRAW_CM_DYNAMIC_COLOR) { // 对于动态绘图模式,则将颜色值赋值成一个负数,这样 Kernel 函数就能知道 // 当前使用的是动态着色模式。 curcolor = -1; } // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(img); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像。 ImageCuda subimgCud; errcode = ImageBasicOp::roiSubImage(img, &subimgCud); if (errcode != NO_ERROR) return errcode; // 将坐标点集拷贝到 Device 内存中。 errcode = CoordiSetBasicOp::copyToCurrentDevice(cst); if (errcode != NO_ERROR) return errcode; // 取出坐标点集对应的 CoordiSetCuda 型数据。 CoordiSetCuda *cstCud = COORDISET_CUDA(cst); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (subimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (subimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 调用 Kernel 完成计算。 _drawEllipseKer<<<gridsize, blocksize>>>(subimgCud, *cstCud, curcolor); // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕,退出。 return NO_ERROR; }
the_stack
//#include <gunrock/util/array_utils.cuh> //#include <gunrock/oprtr/1D_oprtr/for_all.cuh> //#include <gunrock/oprtr/1D_oprtr/for_each.cuh> //#include <gunrock/oprtr/1D_oprtr/1D_scalar.cuh> //#include <gunrock/oprtr/1D_oprtr/1D_1D.cuh> //#include <gunrock/graph/csr.cuh> //#include <gunrock/graph/coo.cuh> //#include <gunrock/graph/csc.cuh> //#include <gunrock/graph/gp.cuh> #include <gunrock/graphio/graphio.cuh> //#include <gunrock/app/frontier.cuh> //#include <gunrock/partitioner/partitioner.cuh> //#include <gunrock/app/sssp/sssp_problem.cuh> //#include <gunrock/app/enactor_base.cuh> #include <gunrock/app/sssp/sssp_enactor.cuh> using namespace gunrock; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::graph; using namespace gunrock::app; typedef uint32_t VertexT; typedef unsigned long long SizeT; typedef float ValueT; template <typename _VertexT = int, typename _SizeT = _VertexT, typename _ValueT = _VertexT, GraphFlag _FLAG = GRAPH_NONE, unsigned int _cudaHostRegisterFlag = cudaHostRegisterDefault> struct TestGraph : public Csr<VertexT, SizeT, ValueT, _FLAG | HAS_CSR | HAS_COO | HAS_CSC | HAS_GP, _cudaHostRegisterFlag>, public Coo<VertexT, SizeT, ValueT, _FLAG | HAS_CSR | HAS_COO | HAS_CSC | HAS_GP, _cudaHostRegisterFlag>, public Csc<VertexT, SizeT, ValueT, _FLAG | HAS_CSR | HAS_COO | HAS_CSC | HAS_GP, _cudaHostRegisterFlag>, public Gp<VertexT, SizeT, ValueT, _FLAG | HAS_CSR | HAS_COO | HAS_CSC | HAS_GP, _cudaHostRegisterFlag> { typedef _VertexT VertexT; typedef _SizeT SizeT; typedef _ValueT ValueT; static const GraphFlag FLAG = _FLAG | HAS_CSR | HAS_COO | HAS_CSC | HAS_GP; static const unsigned int cudaHostRegisterFlag = _cudaHostRegisterFlag; typedef Csr<VertexT, SizeT, ValueT, FLAG, cudaHostRegisterFlag> CsrT; typedef Coo<VertexT, SizeT, ValueT, FLAG, cudaHostRegisterFlag> CooT; typedef Csc<VertexT, SizeT, ValueT, FLAG, cudaHostRegisterFlag> CscT; typedef Gp<VertexT, SizeT, ValueT, FLAG, cudaHostRegisterFlag> GpT; SizeT nodes, edges; template <typename CooT_in> cudaError_t FromCoo(CooT_in &coo, bool self_coo = false) { cudaError_t retval = cudaSuccess; nodes = coo.CooT::nodes; edges = coo.CooT::edges; retval = this->CsrT::FromCoo(coo); if (retval) return retval; retval = this->CscT::FromCoo(coo); if (retval) return retval; if (!self_coo) retval = this->CooT::FromCoo(coo); return retval; } template <typename CsrT_in> cudaError_t FromCsr(CsrT_in &csr, bool self_csr = false) { cudaError_t retval = cudaSuccess; nodes = csr.CsrT::nodes; edges = csr.CsrT::edges; retval = this->CooT::FromCsr(csr); if (retval) return retval; retval = this->CscT::FromCsr(csr); if (retval) return retval; if (!self_csr) retval = this->CsrT::FromCsr(csr); return retval; } cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; util::PrintMsg("GraphT::Realeasing on " + util::Location_to_string(target)); retval = this->CooT::Release(target); if (retval) return retval; retval = this->CsrT::Release(target); if (retval) return retval; retval = this->CscT::Release(target); if (retval) return retval; retval = this->GpT::Release(target); if (retval) return retval; return retval; } CsrT &csr() { return (static_cast<CsrT *>(this))[0]; } }; /*void Test_Array() { // test array Array1D<int, int, PINNED> test_array; test_array.SetName("test_array"); test_array.Allocate(1024, HOST | DEVICE); test_array.EnsureSize(2048); test_array.Move(HOST, DEVICE); test_array.Release(); } void Test_ForAll() { // test ForAll Array1D<int, int, PINNED> array1, array2; array1.SetName("array1"); array2.SetName("array2"); array1.Allocate(1024 * 1024, HOST | DEVICE); array2.Allocate(1024 * 1024, HOST | DEVICE); array1.ForAll( [] __host__ __device__ (int* elements, int pos) { elements[pos] = pos / 1024; });//, DefaultSize, HOST | DEVICE); array2.ForAll( [] __host__ __device__ (int* elements, int pos){ elements[pos] = pos % 1024; });//, DefaultSize, HOST | DEVICE); //ForAll(array1, 1024 * 1024, // [] __host__ __device__ (int* elements, int pos){ // printf("array1[%d] = %d\t", pos, elements[pos]); // }, HOST | DEVICE); int mod = 10; std::cout << "mod = ?"; std::cin >> mod; array1.ForAllCond( array2, [mod] __host__ __device__ (int* elements_in, int* elements_out, int pos) { return (elements_in[pos] == elements_out[pos] && (pos%mod) == 0); }, [mod] __host__ __device__ (int* elements_in, int* elements_out, int pos) { //if (elements_in[pos] == elements_out[pos] && (pos%mod) == 0) printf("on %s: array1[%d] = array2[%d] = %d\n", #ifdef __CUDA_ARCH__ "GPU", #else "CPU", #endif pos, pos, elements_in[pos]); });//, DefaultSize, HOST | DEVICE); cudaDeviceSynchronize(); } void Test_ForEach() { // test ForEach Array1D<SizeT, ValueT, PINNED> array3, array4; array3.SetName("array3");array4.SetName("array4"); SizeT length = 1024 * 1024; Location target = HOST | DEVICE; array3.Allocate(length, target); array4.Allocate(length, target); array4.SetIdx(); array3 = 10; array3 += array4; array3 -= 19.5; //ForEach(array3.GetPointer(DEVICE), // [] __host__ __device__ (ValueT &element){ // element = 10; // }, length, DEVICE); array4.ForEach([] __host__ __device__ (ValueT &element){ element = 20; }); } void Test_Csr() { // Test_Csr typedef int VertexT; Csr<VertexT, SizeT, ValueT, HAS_CSR> csr; csr.Allocate(10, 10); Coo<VertexT, SizeT, ValueT, HAS_COO> coo; csr.FromCoo(coo); Csr<VertexT, SizeT, ValueT, HAS_CSR | HAS_EDGE_VALUES> csr2; csr2.Allocate(10, 10); Coo<VertexT, SizeT, ValueT, HAS_COO | HAS_EDGE_VALUES> coo2; csr2.FromCoo(coo2); } cudaError_t Test_GraphIo(int argc, char* argv[]) { // Test graphio cudaError_t retval = cudaSuccess; util::Parameters parameters("test refactor"); typedef TestGraph<VertexT, SizeT, ValueT, HAS_EDGE_VALUES> GraphT; GraphT graph; retval = graphio::UseParameters(parameters); if (retval) return retval; retval = parameters.Parse_CommandLine(argc, argv); if (retval) return retval; if (parameters.Get<bool>("help")) { parameters.Print_Help(); return cudaSuccess; } retval = parameters.Check_Required(); if (retval) return retval; retval = graphio::LoadGraph(parameters, graph); if (retval) return retval; //retval = graph.CooT::Display(); if (retval) return retval; //retval = graph.CsrT::Display(); if (retval) return retval; //retval = graph.CscT::Display(); if (retval) return retval; typedef Csr<VertexT, SizeT, ValueT> CsrT; CsrT csr; retval = csr.FromCoo(graph); if (retval) return retval; PrintMsg("CSR from COO:"); csr.Display(); //graph.CooT::Display(); retval = csr.FromCsc(graph); if (retval) return retval; PrintMsg("CSR from CSC:"); csr.Display(); //graph.CooT::Display(); retval = csr.FromCsr(graph); if (retval) return retval; PrintMsg("CSR from CSR:"); csr.Display(); //graph.CooT::Display(); typedef Csr<VertexT, SizeT, ValueT, HAS_EDGE_VALUES> CsreT; CsreT csre; retval = csre.FromCoo(graph); if (retval) return retval; PrintMsg("CSRE from COO:"); csre.Display(); //graph.CooT::Display(); retval = csre.FromCsc(graph); if (retval) return retval; PrintMsg("CSRE from CSC:"); csre.Display(); //graph.CooT::Display(); retval = csre.FromCsr(graph); if (retval) return retval; PrintMsg("CSRE from CSR:"); csre.Display(); //graph.CooT::Display(); typedef Csc<VertexT, SizeT, ValueT> CscT; CscT csc; retval = csc.FromCoo(graph); if (retval) return retval; PrintMsg("CSC from COO:"); csc.Display(); //graph.CooT::Display(); retval = csc.FromCsc(graph); if (retval) return retval; PrintMsg("CSC from CSC:"); csc.Display(); //graph.CooT::Display(); retval = csc.FromCsr(graph); if (retval) return retval; PrintMsg("CSC from CSR:"); csc.Display(); //graph.CooT::Display(); typedef Csc<VertexT, SizeT, ValueT, HAS_EDGE_VALUES> CsceT; CsceT csce; retval = csce.FromCoo(graph); if (retval) return retval; PrintMsg("CSCE from COO:"); csce.Display(); //graph.CooT::Display(); retval = csce.FromCsc(graph); if (retval) return retval; PrintMsg("CSCE from CSC:"); csce.Display(); //graph.CooT::Display(); retval = csce.FromCsr(graph); if (retval) return retval; PrintMsg("CSCE from CSR:"); csce.Display(); //graph.CooT::Display(); typedef Coo<VertexT, SizeT, ValueT> CooT; CooT coo; retval = coo.FromCoo(graph); if (retval) return retval; PrintMsg("COO from COO:"); coo.Display(); //graph.CooT::Display(); retval = coo.FromCsc(graph); if (retval) return retval; PrintMsg("COO from CSC:"); coo.Display(); //graph.CooT::Display(); retval = coo.FromCsr(graph); if (retval) return retval; PrintMsg("COO from CSR:"); coo.Display(); //graph.CooT::Display(); typedef Coo<VertexT, SizeT, ValueT, HAS_EDGE_VALUES> CooeT; CooeT cooe; retval = cooe.FromCoo(graph); if (retval) return retval; PrintMsg("COOE from COO:"); cooe.Display(); //graph.CooT::Display(); retval = cooe.FromCsc(graph); if (retval) return retval; PrintMsg("COOE from CSC:"); cooe.Display(); //graph.CooT::Display(); retval = cooe.FromCsr(graph); if (retval) return retval; PrintMsg("COOE from CSR:"); cooe.Display(); //graph.CooT::Display(); return retval; } void Test_Frontier() { Frontier<VertexT, SizeT> frontier; frontier.SetName("test_frontier"); frontier.Init(); frontier.Release(); }*/ template <typename GraphT> cudaError_t LoadGraph(util::Parameters &parameters, GraphT &graph) { cudaError_t retval = cudaSuccess; retval = graphio::LoadGraph(parameters, graph); if (retval) return retval; // util::cpu_mt::PrintCPUArray<typename GraphT::SizeT, typename // GraphT::SizeT>( // "row_offsets", graph.GraphT::CsrT::row_offsets + 0, graph.nodes+1); // util::cpu_mt::PrintCPUArray<typename GraphT::SizeT, typename // GraphT::ValueT>( // "edge_values", graph.GraphT::CsrT::edge_values + 0, graph.edges); return retval; } /*template <typename GraphT> cudaError_t Test_Partitioner(Parameters &parameters, GraphT &graph) { cudaError_t retval = cudaSuccess; GraphT *sub_graphs = NULL; retval = partitioner::Partition( graph, sub_graphs, parameters, 4, partitioner::Keep_Node_Num, util::HOST); if (retval) return retval; PrintMsg("OrgGraph"); graph.CsrT::Display(); for (int i=0; i<4; i++) { PrintMsg("SubGraph " + std::to_string(i)); sub_graphs[i].CsrT::Display(); } return retval; }*/ /*template <typename GraphT> cudaError_t Test_ProblemBase(Parameters &parameters, GraphT &graph) { cudaError_t retval = cudaSuccess; typedef ProblemBase<GraphT> ProblemT; ProblemT problem; retval = problem.Init(parameters, graph); if (retval) return retval; return retval; }*/ template <typename GraphT> cudaError_t Test_SSSP(Parameters &parameters, GraphT &graph, util::Location target = util::HOST) { cudaError_t retval = cudaSuccess; typedef gunrock::app::sssp::Problem<GraphT, unsigned char> ProblemT; typedef gunrock::app::sssp::Enactor<ProblemT> EnactorT; ProblemT problem; EnactorT enactor; retval = problem.Init(parameters, graph, target); if (retval) return retval; retval = enactor.Init(parameters, &problem, target); if (retval) return retval; retval = problem.Reset(0, target); if (retval) return retval; retval = enactor.Reset(0, target); if (retval) return retval; retval = enactor.Enact(0); if (retval) return retval; retval = problem.Release(target); if (retval) return retval; retval = enactor.Release(target); if (retval) return retval; return retval; } /*template <typename GraphT> cudaError_t Test_EnactorBase(Parameters &parameters, GraphT &graph, util::Location target = util::HOST) { cudaError_t retval = cudaSuccess; typedef gunrock::app::EnactorBase<GraphT> EnactorT; EnactorT enactor("refactor"); retval = enactor.Init(parameters, 2, NULL, 1024, target); if (retval) return retval; retval = enactor.Reset(target); if (retval) return retval; retval = enactor.Release(target); if (retval) return retval; return retval; }*/ int main(int argc, char *argv[]) { // const SizeT DefaultSize = PreDefinedValues<SizeT>::InvalidValue; // Test_Array(); // Test_ForAll(); // Test_ForEach(); // Test_Csr(); // Test_GraphIo(argc, argv); // Test_Frontier(); cudaError_t retval = cudaSuccess; util::Parameters parameters("test refactor"); typedef TestGraph<VertexT, SizeT, ValueT, HAS_EDGE_VALUES> GraphT; GraphT graph; GUARD_CU(graphio::UseParameters(parameters)); // GUARD_CU(partitioner::UseParameters(parameters)); GUARD_CU(app::sssp::UseParameters(parameters)); GUARD_CU(app::sssp::UseParameters2(parameters)); GUARD_CU(parameters.Parse_CommandLine(argc, argv)); if (parameters.Get<bool>("help")) { parameters.Print_Help(); return cudaSuccess; } retval = parameters.Check_Required(); if (retval) return 5; retval = LoadGraph(parameters, graph); if (retval) return 11; // retval = Test_Partitioner(parameters, graph); // if (retval) return 12; /*retval = Test_SSSPProblem(parameters, graph, util::HOST); if (retval) return 13; util::PrintMsg("====Test on HOST finished"); retval = Test_SSSPProblem(parameters, graph, util::DEVICE); if (retval) return 14; util::PrintMsg("====Test on DEVICE finished"); retval = Test_SSSPProblem(parameters, graph, util::HOST | util::DEVICE); if (retval) return 15; util::PrintMsg("====Test on HOST | DEVICE finished");*/ /*retval = Test_EnactorBase(parameters, graph, util::HOST); if (retval) return 16; util::PrintMsg("====Test on HOST finished"); retval = Test_EnactorBase(parameters, graph, util::DEVICE); if (retval) return 17; util::PrintMsg("====Test on DEVICE finished"); retval = Test_EnactorBase(parameters, graph, util::HOST | util::DEVICE); if (retval) return 18; util::PrintMsg("====Test on HOST | DEVICE finished");*/ // retval = Test_SSSP(parameters, graph, util::HOST); // if (retval) return 16; // util::PrintMsg("====Test on HOST finished"); retval = Test_SSSP(parameters, graph, util::DEVICE); if (retval) return 17; util::PrintMsg("====Test on DEVICE finished"); // retval = Test_SSSP(parameters, graph, util::HOST | util::DEVICE); // if (retval) return 18; // util::PrintMsg("====Test on HOST | DEVICE finished"); return 0; }
the_stack
#include "octnet/gpu/pool.h" #include "octnet/gpu/gpu.h" #include <cstdio> #include <cstdlib> template <int level> __global__ void kernel_pool2x2x2_struct(octree out, int n_blocks, const octree in, ot_size_t feature_size) { CUDA_KERNEL_LOOP(grid_idx, n_blocks) { ot_tree_t* in_tree = octree_get_tree(&in, grid_idx); ot_tree_t* out_tree = octree_get_tree(&out, grid_idx); if(level == 0) { if(tree_isset_bit(in_tree, 0) && tree_cnt1(in_tree, 1, 9) == 0) { tree_unset_bit(out_tree, 0); } } if(level == 1) { if(tree_isset_bit(in_tree, 0)) { for(int bit_idx_l1 = 1; bit_idx_l1 < 9; ++bit_idx_l1) { int bit_idx_l2 = tree_child_bit_idx(bit_idx_l1); if(tree_isset_bit(in_tree, bit_idx_l1) && tree_cnt1(in_tree, bit_idx_l2, bit_idx_l2+8) == 0) { tree_unset_bit(out_tree, bit_idx_l1); } } } } if(level == 2) { if(tree_isset_bit(in_tree, 0)) { for(int bit_idx_l1 = 1; bit_idx_l1 < 9; ++bit_idx_l1) { if(tree_isset_bit(in_tree, bit_idx_l1)) { int bit_idx_l2 = tree_child_bit_idx(bit_idx_l1); for(int idx = 0; idx < 8; ++idx) { tree_unset_bit(out_tree, bit_idx_l2); bit_idx_l2++; } } } } } } } // template <int pool_fcn> // __global__ void kernel_pool2x2x2_data(octree out, int n_blocks, const octree in, ot_size_t feature_size) { // CUDA_KERNEL_LOOP(grid_idx, n_blocks) { // ot_tree_t* out_tree = octree_get_tree(&out, grid_idx); // ot_data_t* out_data = out.data_ptrs[grid_idx]; // ot_tree_t* in_tree = octree_get_tree(&in, grid_idx); // ot_data_t* in_data = in.data_ptrs[grid_idx]; // if(tree_isset_bit(in_tree, 0)) { // if(!tree_isset_bit(out_tree, 0)) { // octree_pool2x2x2<pool_fcn>(in_data, feature_size, out_data); // } // else { // for(int bit_idx_l1 = 1; bit_idx_l1 < 9; ++bit_idx_l1) { // int out_data_idx_l1 = tree_data_idx(out_tree, bit_idx_l1, feature_size); // if(tree_isset_bit(in_tree, bit_idx_l1)) { // if(!tree_isset_bit(out_tree, bit_idx_l1)) { // int in_data_idx = tree_data_idx(in_tree, tree_child_bit_idx(bit_idx_l1), feature_size); // octree_pool2x2x2<pool_fcn>(in_data + in_data_idx, feature_size, out_data + out_data_idx_l1); // } // else { // for(int idx_l2 = 0; idx_l2 < 8; ++idx_l2) { // int bit_idx_l2 = tree_child_bit_idx(bit_idx_l1) + idx_l2; // int out_data_idx_l2 = tree_data_idx(out_tree, bit_idx_l2, feature_size); // if(tree_isset_bit(in_tree, bit_idx_l2)) { // if(!tree_isset_bit(out_tree, bit_idx_l2)) { // int in_data_idx = tree_data_idx(in_tree, tree_child_bit_idx(bit_idx_l2), feature_size); // octree_pool2x2x2<pool_fcn>(in_data + in_data_idx, feature_size, out_data + out_data_idx_l2); // } // else { // int bit_idx_l3 = tree_child_bit_idx(bit_idx_l2); // int out_data_idx_l3 = tree_data_idx(out_tree, bit_idx_l3, feature_size); // int in_data_idx_l3 = tree_data_idx(in_tree, bit_idx_l3, feature_size); // octree_cpy_leaf(in_data + in_data_idx_l3, 8*feature_size, out_data + out_data_idx_l3); // } // } // else { // int in_data_idx = tree_data_idx(in_tree, bit_idx_l2, feature_size); // octree_cpy_leaf(in_data + in_data_idx, feature_size, out_data + out_data_idx_l2); // } // } // } // } // else { // int in_data_idx = tree_data_idx(in_tree, bit_idx_l1, feature_size); // octree_cpy_leaf(in_data + in_data_idx, feature_size, out_data + out_data_idx_l1); // } // } // } // } // else { // octree_cpy_leaf(in_data, feature_size, out_data); // } // }//grid_idx // } template <int pool_fcn> __global__ void kernel_pool2x2x2_data(octree out, int n_leafs, const octree in) { extern __shared__ ot_data_t out_shared[]; CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { const int grid_idx = out.data[leaf_idx * out.feature_size]; // const int grid_idx = leaf_idx_to_grid_idx(&out, leaf_idx); const ot_tree_t* out_tree = octree_get_tree(&out, grid_idx); // const int cum_n_leafs = n_leafs_upto(&out, grid_idx); const int cum_n_leafs = out.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(out_tree, data_idx); int out_data_idx = data_idx * out.feature_size; const ot_tree_t* in_tree = octree_get_tree(&in, grid_idx); if(bit_idx < 73 && tree_isset_bit(in_tree, bit_idx)) { int in_data_idx = tree_data_idx(in_tree, tree_child_bit_idx(bit_idx), out.feature_size); // octree_pool2x2x2<pool_fcn>(in.data_ptrs[grid_idx] + in_data_idx, out.feature_size, out.data_ptrs[grid_idx] + out_data_idx); octree_pool2x2x2<pool_fcn>(octree_get_data(&in, grid_idx) + in_data_idx, out.feature_size, octree_get_data(&out, grid_idx) + out_data_idx); } else { int in_data_idx = tree_data_idx(in_tree, bit_idx, out.feature_size); // octree_cpy_leaf(in.data_ptrs[grid_idx] + in_data_idx, out.feature_size, out.data_ptrs[grid_idx] + out_data_idx); octree_cpy_leaf(octree_get_data(&in, grid_idx) + in_data_idx, out.feature_size, octree_get_data(&out, grid_idx) + out_data_idx); } } } template <int pool_fcn> void octree_pool2x2x2_gpu(const octree* in, bool level_0, bool level_1, bool level_2, octree* out) { octree_resize_gpu(in->n, in->grid_depth, in->grid_height, in->grid_width, in->feature_size, 0, out); octree_cpy_trees_gpu_gpu(in, out); int n_blocks = octree_num_blocks(in); ot_size_t feature_size = in->feature_size; if(level_0) { kernel_pool2x2x2_struct<0><<<GET_BLOCKS(n_blocks), CUDA_NUM_THREADS>>>(*out, n_blocks, *in, feature_size); CUDA_POST_KERNEL_CHECK; } if(level_1) { kernel_pool2x2x2_struct<1><<<GET_BLOCKS(n_blocks), CUDA_NUM_THREADS>>>(*out, n_blocks, *in, feature_size); CUDA_POST_KERNEL_CHECK; } if(level_2) { kernel_pool2x2x2_struct<2><<<GET_BLOCKS(n_blocks), CUDA_NUM_THREADS>>>(*out, n_blocks, *in, feature_size); CUDA_POST_KERNEL_CHECK; } octree_upd_n_leafs_gpu(out); octree_resize_as_gpu(out, out); octree_upd_prefix_leafs_gpu(out); //Do the actual pooling octree_leaf_idx_to_grid_idx_gpu(out, out->feature_size, out->data_capacity, out->data); kernel_pool2x2x2_data<pool_fcn><<<GET_BLOCKS(out->n_leafs), CUDA_NUM_THREADS>>>( *out, out->n_leafs, *in ); CUDA_POST_KERNEL_CHECK; } template <int pool_fcn> __global__ void kernel_pool2x2x2_bwd(octree grad_in, int n_leafs, const octree in, const octree grad_out) { extern __shared__ ot_data_t out_shared[]; CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { // const int grid_idx = out.data[leaf_idx * out.feature_size]; const int grid_idx = leaf_idx_to_grid_idx(&grad_out, leaf_idx); const ot_tree_t* out_tree = octree_get_tree(&grad_out, grid_idx); // const int cum_n_leafs = n_leafs_upto(&grad_out, grid_idx); const int cum_n_leafs = grad_out.prefix_leafs[grid_idx]; const int data_idx = leaf_idx - cum_n_leafs; const int bit_idx = data_idx_to_bit_idx(out_tree, data_idx); int out_data_idx = data_idx * grad_out.feature_size; const ot_tree_t* in_tree = octree_get_tree(&in, grid_idx); if(bit_idx < 73 && tree_isset_bit(in_tree, bit_idx)) { int in_data_idx = tree_data_idx(in_tree, tree_child_bit_idx(bit_idx), grad_out.feature_size); // octree_pool2x2x2_bwd<pool_fcn>(in.data_ptrs[grid_idx] + in_data_idx, grad_out.data_ptrs[grid_idx] + out_data_idx, grad_out.feature_size, grad_in.data_ptrs[grid_idx] + in_data_idx); octree_pool2x2x2_bwd<pool_fcn>(octree_get_data(&in, grid_idx) + in_data_idx, octree_get_data(&grad_out, grid_idx) + out_data_idx, grad_out.feature_size, octree_get_data(&grad_in, grid_idx) + in_data_idx); } else { int in_data_idx = tree_data_idx(in_tree, bit_idx, grad_out.feature_size); // octree_cpy_leaf(grad_out.data_ptrs[grid_idx] + out_data_idx, grad_out.feature_size, grad_in.data_ptrs[grid_idx] + in_data_idx); octree_cpy_leaf(octree_get_data(&grad_out, grid_idx) + out_data_idx, grad_out.feature_size, octree_get_data(&grad_in, grid_idx) + in_data_idx); } } } template <int pool_fcn> void octree_pool2x2x2_bwd_gpu(const octree* in, const octree* grad_out, octree* grad_in) { octree_resize_as_gpu(in, grad_in); octree_cpy_trees_gpu_gpu(in, grad_in); octree_cpy_prefix_leafs_gpu_gpu(in, grad_in);; //Do the actual pooling bwd kernel_pool2x2x2_bwd<pool_fcn><<<GET_BLOCKS(grad_out->n_leafs), CUDA_NUM_THREADS>>>( *grad_in, grad_out->n_leafs, *in, *grad_out ); CUDA_POST_KERNEL_CHECK; } extern "C" void octree_pool2x2x2_avg_gpu(const octree* in, bool level_0, bool level_1, bool level_2, octree* out) { if(DEBUG) { printf("[DEBUG] octree_pool2x2x2_avg_gpu\n"); } octree_pool2x2x2_gpu<REDUCE_AVG>(in, level_0, level_1, level_2, out); } extern "C" void octree_pool2x2x2_max_gpu(const octree* in, bool level_0, bool level_1, bool level_2, octree* out) { if(DEBUG) { printf("[DEBUG] octree_pool2x2x2_max_gpu\n"); } octree_pool2x2x2_gpu<REDUCE_MAX>(in, level_0, level_1, level_2, out); } extern "C" void octree_pool2x2x2_avg_bwd_gpu(const octree* in, const octree* grad_out, octree* grad_in) { if(DEBUG) { printf("[DEBUG] octree_pool2x2x2_avg_bwd_gpu\n"); } octree_pool2x2x2_bwd_gpu<REDUCE_AVG>(in, grad_out, grad_in); } extern "C" void octree_pool2x2x2_max_bwd_gpu(const octree* in, const octree* grad_out, octree* grad_in) { if(DEBUG) { printf("[DEBUG] octree_pool2x2x2_max_bwd_gpu\n"); } octree_pool2x2x2_bwd_gpu<REDUCE_MAX>(in, grad_out, grad_in); }
the_stack
#include "common.cuh" #include <kat/on_device/miscellany.cuh> #include <kat/utility.hpp> // #include <gsl/span> #include <limits> template <typename T> KAT_DEV bool single_swap_test(T x, T y) { T x_ { x }; T y_ { y }; kat::swap<T>(x_, y_); return (x == y_) and (y == x_); } struct copy_testcase_spec { std::size_t dst_offset; std::size_t src_offset; std::size_t num; }; START_COUNTING_LINES(num_swap_checks); namespace kernels { template <typename T> __global__ void swap_tests(bool* results) { // bool print_first_indices_for_each_function { false }; auto i = 0; // auto maybe_print = [&](const char* section_title) { // if (print_first_indices_for_each_function) { // printf("%-30s tests start at index %3d\n", section_title, i); // } // }; // // maybe_print("single_swap_test"); results[i++] = single_swap_test<T>(T{0}, T{0}); COUNT_THIS_LINE results[i++] = single_swap_test<T>(T{1}, T{1}); COUNT_THIS_LINE results[i++] = single_swap_test<T>(T{0}, T{1}); COUNT_THIS_LINE results[i++] = single_swap_test<T>(T{1}, T{2}); COUNT_THIS_LINE results[i++] = single_swap_test<T>(T{1}, T{0}); COUNT_THIS_LINE results[i++] = single_swap_test<T>(T{2}, T{1}); COUNT_THIS_LINE results[i++] = single_swap_test<T>(std::numeric_limits<T>::min(), std::numeric_limits<T>::max()); COUNT_THIS_LINE results[i++] = single_swap_test<T>(std::numeric_limits<T>::min(), T{0}); COUNT_THIS_LINE results[i++] = single_swap_test<T>(T{0}, std::numeric_limits<T>::min()); COUNT_THIS_LINE results[i++] = single_swap_test<T>(std::numeric_limits<T>::max(), T{0}); COUNT_THIS_LINE results[i++] = single_swap_test<T>(T{0}, std::numeric_limits<T>::max()); COUNT_THIS_LINE } } // namespace kernels FINISH_COUNTING_LINES(num_swap_checks); namespace kernels { template <typename T> __global__ void copy_tests( T* __restrict__ copy_destination_buffer, const T* __restrict__ test_data, const copy_testcase_spec* __restrict__ test_cases, std::size_t num_test_cases) { // Use the following two lines to limit the execution to a single // testcase and ignore all the others. They may fail, but if you want // to debug the run of a single one without interruption from // the others... this will be useful. // enum { run_all_cases = -1 }; // static const int single_testcase_to_run { 1 }; // 0-based! for(std::size_t i = 0; i < num_test_cases; i++) { // if ((single_testcase_to_run != run_all_cases) and (single_testcase_to_run != i)) { continue; } auto test_case = test_cases[i]; kat::copy( copy_destination_buffer + test_case.dst_offset, test_data + test_case.src_offset, test_case.num ); } } } // namespace kernels // Note: Use CHECK_MESSAGE! constexpr const std::size_t copy_alignment_quantum { 256 }; std::vector<copy_testcase_spec> make_copy_testcases() { std::vector<copy_testcase_spec> testcase_specs; copy_testcase_spec tc; auto make_offset = [&](std::size_t extra_offset) { return copy_alignment_quantum * testcase_specs.size() + extra_offset; }; tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 64; testcase_specs.push_back(tc); // --- tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 64; testcase_specs.push_back(tc); // --- tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 64; testcase_specs.push_back(tc); // --- tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 64; testcase_specs.push_back(tc); // --- tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(0); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(4); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(2); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(1); tc.num = 64; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 0; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 1; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 2; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 3; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 4; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 61; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 62; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 63; testcase_specs.push_back(tc); tc.dst_offset = make_offset(1); tc.src_offset = make_offset(3); tc.num = 64; testcase_specs.push_back(tc); return testcase_specs; } template <typename T, typename C> std::vector<T> generate_copy_test_data(const C& testcases) { auto buffer_length_in_elements = copy_alignment_quantum * sizeof(T) * testcases.size(); std::vector<T> test_data; test_data.reserve(buffer_length_in_elements); util::random::insertion_generate_n( std::back_inserter(test_data), buffer_length_in_elements, // So, we don't need all of the data, but let's make this code simpler rather than generate less util::random::uniform_distribution<T>{} ); // Enable the following for debugging - it will generate the testcase index // in the places to be copied, and 0 elsewhere // std::vector<T> test_data(buffer_length_in_elements); // std::fill(test_data.begin(), test_data.end(), 0); // for(auto it = testcases.begin(); it < testcases.end(); it++) { // auto idx = std::distance(testcases.begin(), it); // auto tc = *it; // std::fill_n(test_data.data() + tc.src_offset, tc.num, idx + 1); // // Note: There may be less possible values of idx in type T than testcases to run... // } return test_data; } TEST_SUITE("miscellany") { // TODO: Consider using larger-than-64-bit types, classes, etc. TEST_CASE_TEMPLATE("test swap", T, int8_t, int16_t, int32_t, int32_t, int64_t, float, double ) { cuda::device_t device { cuda::device::current::get() }; cuda::launch_configuration_t launch_config { cuda::grid::dimensions_t::point(), cuda::grid::dimensions_t::point() }; auto device_side_results { cuda::memory::device::make_unique<bool[]>(device, num_swap_checks) }; auto host_side_results { std::unique_ptr<bool[]>(new bool[num_swap_checks]) }; cuda::launch( kernels::swap_tests<T>, launch_config, device_side_results.get()); cuda::memory::copy(host_side_results.get(), device_side_results.get(), sizeof(bool) * num_swap_checks); for(auto i = 0 ; i < num_swap_checks; i++) { if (not host_side_results.get()[i]) { MESSAGE("The " << i << xth(i) << " swap check failed."); } } } TEST_CASE_TEMPLATE("test num_warps_to_cover", I, int8_t, int16_t, int32_t, uint32_t, int64_t ) { using std::size_t; auto irrelevant = [](size_t x) { return x > std::numeric_limits<I>::max(); }; using kat::num_warp_sizes_to_cover; size_t num; num = 0; if (not irrelevant(num)) { CHECK(num_warp_sizes_to_cover<I>(num) == 0); } num = 1; if (not irrelevant(num)) { CHECK(num_warp_sizes_to_cover<I>(num) == 1); } num = 2; if (not irrelevant(num)) { CHECK(num_warp_sizes_to_cover<I>(num) == 1); } num = 3; if (not irrelevant(num)) { CHECK(num_warp_sizes_to_cover<I>(num) == 1); } num = 31; if (not irrelevant(num)) { CHECK(num_warp_sizes_to_cover<I>(num) == 1); } num = 32; if (not irrelevant(num)) { CHECK(num_warp_sizes_to_cover<I>(num) == 1); } num = 33; if (not irrelevant(num)) { CHECK(num_warp_sizes_to_cover<I>(num) == 2); } num = 1023; if (not irrelevant(num)) { CHECK(num_warp_sizes_to_cover<I>(num) == 32); } num = 1024; if (not irrelevant(num)) { CHECK(num_warp_sizes_to_cover<I>(num) == 32); } num = 1025; if (not irrelevant(num)) { CHECK(num_warp_sizes_to_cover<I>(num) == 33); } num = (size_t{1} << 31) - 1; if (not irrelevant(num)) { CHECK(num_warp_sizes_to_cover<I>(num) == size_t{1} << 26); } num = size_t{1} << 31 ; if (not irrelevant(num)) { CHECK(num_warp_sizes_to_cover<I>(num) == size_t{1} << 26); } num = (size_t{1} << 31) + 1; if (not irrelevant(num)) { CHECK(num_warp_sizes_to_cover<I>(num) == (size_t{1} << 26) + 1); } num = (size_t{1} << 32) - 1; if (not irrelevant(num)) { CHECK(num_warp_sizes_to_cover<I>(num) == size_t{1} << 27); } num = size_t{1} << 32 ; if (not irrelevant(num)) { CHECK(num_warp_sizes_to_cover<I>(num) == size_t{1} << 27); } num = (size_t{1} << 32) + 1; if (not irrelevant(num)) { CHECK(num_warp_sizes_to_cover<I>(num) == (size_t{1} << 27) + 1); } } TEST_CASE_TEMPLATE("test native-word copy", T, uint8_t, int16_t, int32_t, uint32_t, int64_t ,float, double ) { std::size_t testcase_index { 0 }; bool cuda_calls_complete { false }; // TODO: Try this with element sizes which aren't powers of 2, and kat::array types auto host_side_testcases = make_copy_testcases(); // Note: The testcases are assumed to have num + offset lower than the quantum auto buffer_length_in_elements = copy_alignment_quantum * sizeof(T) * host_side_testcases.size(); auto host_side_test_data = generate_copy_test_data<T>(host_side_testcases); try { cuda::device_t device { cuda::device::current::get() }; auto device_side_test_data { cuda::memory::device::make_unique<T[]>(device, buffer_length_in_elements) }; auto device_side_copy_target { cuda::memory::device::make_unique<T[]>(device, buffer_length_in_elements) }; auto host_side_copy_target { std::unique_ptr<T[]>(new T[buffer_length_in_elements]) }; auto device_side_testcases { cuda::memory::device::make_unique<copy_testcase_spec[]>(device, host_side_testcases.size()) }; auto launch_config { single_thread_launch_config() }; cuda::memory::copy(device_side_test_data.get(), host_side_test_data.data(), sizeof(T) * buffer_length_in_elements); cuda::memory::device::zero(device_side_copy_target.get(), sizeof(T) * buffer_length_in_elements); cuda::memory::copy(device_side_testcases.get(), host_side_testcases.data(), sizeof(copy_testcase_spec) * host_side_testcases.size()); cuda::launch( kernels::copy_tests<T>, launch_config, device_side_copy_target.get(), device_side_test_data.get(), device_side_testcases.get(), host_side_testcases.size() ); cuda::outstanding_error::ensure_none(); cuda::memory::copy(host_side_copy_target.get(), device_side_copy_target.get(), sizeof(T) * buffer_length_in_elements); cuda_calls_complete = true; std::stringstream ss; for(; testcase_index < host_side_testcases.size(); testcase_index++) { auto tc = host_side_testcases[testcase_index]; // gsl::span<T> test_data(host_side_test_data.data() + tc.src_offset, tc.num); // gsl::span<T> copy_destination(host_side_copy_target.get() + tc.dst_offset, tc.num); std::vector<T> test_data{host_side_test_data.data() + tc.src_offset, host_side_test_data.data() + tc.src_offset + tc.num}; std::vector<T> copy_destination{host_side_copy_target.get() + tc.dst_offset, host_side_copy_target.get() + tc.dst_offset + tc.num}; auto first_mismatch = std::mismatch(test_data.begin(), test_data.end(), copy_destination.begin() ); if (first_mismatch.first != test_data.end()) { // gsl::span<T> test_data_quantum(host_side_test_data.data() + testcase_index * copy_alignment_quantum, copy_alignment_quantum); // gsl::span<T> copy_destination_quantum(host_side_copy_target.get() + testcase_index * copy_alignment_quantum, copy_alignment_quantum); // std::cout << "Testcase " << std::setw(3) << (testcase_index+1) << " quantum within test data: "; // for(auto x : test_data_quantum) { // std::cout << promote_for_streaming(x) << ' '; // } // std::cout << '\n'; // std::cout << "Testcase " << std::setw(3) << (testcase_index+1) << " quantum within copy buffer: "; // for(auto x : copy_destination_quantum) { // std::cout << promote_for_streaming(x) << ' '; // } // std::cout << "\n\n"; ss.str(""); // clear the stream ss << "Testcase " << std::setw(3) << (testcase_index+1) << " (1-based; " << "src at +" << (tc.src_offset % copy_alignment_quantum) << ", " << "dest at +" << (tc.dst_offset % copy_alignment_quantum) << ", " << std::setw(3) << tc.num << " elements): " << "at index " << std::setw(3) << (first_mismatch.first - test_data.begin()) << ", values " << promote_for_streaming(*(first_mismatch.first)) << " != " << promote_for_streaming(*(first_mismatch.second)) ; } std::string message = ss.str(); CHECK_MESSAGE(first_mismatch.first == test_data.end(), message); // TODO: Ensure the rest of the alignment quantum in the copy destination is all 0's } } catch(std::exception& err) { std::stringstream message; message << "An exception occurred " << (cuda_calls_complete ? "after" : "before") << " the copy, launch and copy back calls were completed, and before testcase " << testcase_index << " was completed. The error was" << err.what(); FAIL(message.str()); } } } // TEST_SUITE("miscellany")
the_stack
// TEST make sure boost isn't included in nvcc code #if defined(BOOST_COMPILER) int bla[-1]; #endif namespace PyCA { __global__ void SubVol_kernel(float* d_o, const float* d_i, int osizeX, int osizeY, int osizeZ, int isizeX, int isizeY, int isizeZ, int startX, int startY, int startZ) { uint o_x = blockIdx.x * blockDim.x + threadIdx.x; uint o_y = blockIdx.y * blockDim.y + threadIdx.y; if (o_x < osizeX && o_y < osizeY){ int o_id = o_x + osizeX * o_y; int o_wh = osizeX*osizeY; int i_x = o_x+startX; int i_y = o_y+startY; if ((i_x >= 0 && i_x < isizeX) && (i_y >= 0 && i_y < isizeY)) { int i_wh = isizeX*isizeY; int i_last = i_wh*isizeZ; int i_id = i_x + isizeX * i_y + i_wh*startZ; for (int o_z=0; o_z < osizeZ; ++o_z, o_id += o_wh, i_id += i_wh) { float v = 0.f; if(i_id >= 0 && i_id < i_last) v = d_i[i_id]; d_o[o_id] = v; } }else{ for (int o_z=0; o_z < osizeZ; ++o_z, o_id += o_wh) { d_o[o_id] = 0.f; } } } } void SubVol(float* d_o,const float* d_i, const Vec3Di& oSize, const Vec3Di& iSize, const Vec3Di& start, StreamT st) { dim3 threads(16,16); dim3 grids(iDivUp(oSize.x, threads.x), iDivUp(oSize.y, threads.y)); SubVol_kernel<<<grids, threads, 0, st>>> (d_o, d_i, oSize.x, oSize.y, oSize.z, iSize.x, iSize.y, iSize.z, start.x, start.y, start.z); } __global__ void SetSubVol_I_kernel(float* d_o, const float* d_i, int osizeX, int osizeY, int osizeZ, int isizeX, int isizeY, int isizeZ, int startX, int startY, int startZ) { uint i_x = blockIdx.x * blockDim.x + threadIdx.x; uint i_y = blockIdx.y * blockDim.y + threadIdx.y; if (i_x < isizeX && i_y < isizeY){ int i_id = i_x + isizeX * i_y; int i_wh = isizeX*isizeY; int o_x = i_x+startX; int o_y = i_y+startY; if ((o_x >= 0 && o_x < osizeX) && (o_y >= 0 && o_y < osizeY)) { int o_wh = osizeX*osizeY; int istartZ = PYCAMAX(0,-startZ); int iendZ = PYCAMIN(isizeZ,osizeZ-startZ); int o_id = o_x + osizeX * o_y + o_wh*PYCAMAX(startZ,0); for (int i_z=istartZ; i_z < iendZ; ++i_z, i_id += i_wh, o_id += o_wh) { d_o[o_id] = d_i[i_id]; } } } } void SetSubVol_I(float* d_o,const float* d_i, const Vec3Di& oSize, const Vec3Di& iSize, const Vec3Di& start, StreamT st) { dim3 threads(16,16); dim3 grids(iDivUp(iSize.x, threads.x), iDivUp(iSize.y, threads.y)); SetSubVol_I_kernel<<<grids, threads, 0, st>>> (d_o, d_i, oSize.x, oSize.y, oSize.z, iSize.x, iSize.y, iSize.z, start.x, start.y, start.z); } __global__ void Shrink_kernel(float* d_o, const float* d_i, float c, int szX, int szY, int szZ) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; int wh = szX*szY; if (x < szX && y < szY){ int id = x + y*szX; for (int z=0; z < szZ; ++z, id += wh){ float v = d_i[id]; d_o[id] = PYCAMAX(v-c,0.f)+PYCAMIN(v+c,0.f); } } } void Shrink(float *d_o, const float *d_i, const Vec3Di &sz, float c, StreamT st) { dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); Shrink_kernel<<<grids, threads, 0, st>>> (d_o, d_i, c, sz.x, sz.y, sz.z); } __global__ void SoftAbs_kernel(float* d_o, const float* d_i, float eps, int szX, int szY, int szZ) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; int wh = szX*szY; if (x < szX && y < szY){ int id = x + y*szX; for (int z=0; z < szZ; ++z, id += wh){ float v = d_i[id]; if(v < -eps){ d_o[id] = -v-eps/2.f; }else if(v > eps){ d_o[id] = v-eps/2.f; }else{ d_o[id] = (v*v)/(2.f*eps); } } } } void SoftAbs(float *d_o, const float *d_i, const Vec3Di &sz, float eps, StreamT st) { dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); SoftAbs_kernel<<<grids, threads, 0, st>>> (d_o, d_i, eps, sz.x, sz.y, sz.z); } __global__ void SoftSgn_kernel(float* d_o, const float* d_i, float eps, int szX, int szY, int szZ) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; int wh = szX*szY; if (x < szX && y < szY){ int id = x + y*szX; for (int z=0; z < szZ; ++z, id += wh){ float v = d_i[id]; if(v < -eps){ d_o[id] = -1.f; }else if(v > eps){ d_o[id] = 1.f; }else{ d_o[id] = v/eps; } } } } void SoftSgn(float *d_o, const float *d_i, const Vec3Di &sz, float eps, StreamT st) { dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); SoftSgn_kernel<<<grids, threads, 0, st>>> (d_o, d_i, eps, sz.x, sz.y, sz.z); } template<BackgroundStrategy bg, InterpT interp, bool useOriginOffset> __global__ void Resampling_kernel(float* d_o, const float* d_i, int osizeX, int osizeY, int osizeZ, int isizeX, int isizeY, int isizeZ) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; float rX = (float)isizeX / (float)osizeX; float rY = (float)isizeY / (float)osizeY; float rZ = (float)isizeZ / (float)osizeZ; float offX=0.f, offY=0.f, offZ=0.f; if(useOriginOffset){ offX = (rX-1.f)/2.f; offY = (rY-1.f)/2.f; offZ = (rZ-1.f)/2.f; } if (x < osizeX && y < osizeY){ int id = x + osizeX * y; float i_x = x * rX + offX; float i_y = y * rY + offY; for (int z=0; z < osizeZ; ++z, id += osizeX * osizeY){ float i_z = z * rZ + offZ; d_o[id] = point_interp<interp, bg> (d_i, i_x, i_y, i_z, isizeX, isizeY, isizeZ); } } } template<BackgroundStrategy bg, InterpT interp, bool useOriginOffset> void Resample(float *d_o, const Vec3Di &oSz, const float*d_i, const Vec3Di &iSz, StreamT stream) { MK_CHECK_IMAGE_BACKGROUND(bg); dim3 threads(16,16); dim3 grids(iDivUp(oSz.x, threads.x), iDivUp(oSz.y, threads.y)); Resampling_kernel<bg, interp, useOriginOffset> <<<grids, threads, 0, stream>>>(d_o, d_i, oSz.x ,oSz.y ,oSz.z, iSz.x ,iSz.y ,iSz.z); } template<BackgroundStrategy bg, InterpT interp> __global__ void ResampleWorld_kernel(float* d_o, const float* d_i, int oSzX, int oSzY, int oSzZ, float oSpX, float oSpY, float oSpZ, float oOrX, float oOrY, float oOrZ, int iSzX, int iSzY, int iSzZ, float iSpX, float iSpY, float iSpZ, float iOrX, float iOrY, float iOrZ) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; float rX = oSpX/iSpX; float rY = oSpY/iSpY; float rZ = oSpZ/iSpZ; float oX = (oOrX-iOrX)/iSpX; float oY = (oOrY-iOrY)/iSpY; float oZ = (oOrZ-iOrZ)/iSpZ; if (x < oSzX && y < oSzY){ int id = x + oSzX * y; float i_x = x*rX + oX; float i_y = y*rY + oY; for (int z=0; z < oSzZ; ++z, id += oSzX * oSzY){ float i_z = z*rZ + oZ; d_o[id] = point_interp<interp, bg>(d_i, i_x, i_y, i_z, iSzX, iSzY, iSzZ); } } } template<BackgroundStrategy bg, InterpT interp> void ResampleWorld(float *d_o, const Vec3Di &oSz, const Vec3Df &oSp, const Vec3Df &oOr, const float *d_i, const Vec3Di &iSz, const Vec3Df &iSp, const Vec3Df &iOr, StreamT stream) { MK_CHECK_IMAGE_BACKGROUND(bg); dim3 threads(16,16); dim3 grids(iDivUp(oSz.x, threads.x), iDivUp(oSz.y, threads.y)); ResampleWorld_kernel<bg, interp><<<grids, threads, 0, stream>>> (d_o, d_i, oSz.x, oSz.y, oSz.z, oSp.x, oSp.y, oSp.z, oOr.x, oOr.y, oOr.z, iSz.x, iSz.y, iSz.z, iSp.x, iSp.y, iSp.z, iOr.x, iOr.y, iOr.z); } template<BackgroundStrategy bg> __global__ void SplatWorld_kernel(float* d_o, const float* d_i, int oSzX, int oSzY, int oSzZ, float oSpX, float oSpY, float oSpZ, float oOrX, float oOrY, float oOrZ, int iSzX, int iSzY, int iSzZ, float iSpX, float iSpY, float iSpZ, float iOrX, float iOrY, float iOrZ) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; float rX = iSpX/oSpX; float rY = iSpY/oSpY; float rZ = iSpZ/oSpZ; float oX = (iOrX-oOrX)/oSpX; float oY = (iOrY-oOrY)/oSpY; float oZ = (iOrZ-oOrZ)/oSpZ; if (x < iSzX && y < iSzY){ int id = x + iSzX * y; float i_x = x*rX + oX; float i_y = y*rY + oY; for (int z=0; z < iSzZ; ++z, id += iSzX * iSzY){ float i_z = z*rZ + oZ; #if __CUDA_ARCH__ >= 200 // floating point atomics supported only by Fermi and above Splatting::atomicSplatFloat(d_o, d_i[id], i_x, i_y, i_z, oSzX, oSzY, oSzZ); #else Splatting::atomicSplat(reinterpret_cast<int*>(d_o), d_i[id], i_x, i_y, i_z, oSzX, oSzY, oSzZ); #endif } } } template<BackgroundStrategy bg> __global__ void SplatWorld_kernel(float* d_o, const float* d_i, float* d_w, int oSzX, int oSzY, int oSzZ, float oSpX, float oSpY, float oSpZ, float oOrX, float oOrY, float oOrZ, int iSzX, int iSzY, int iSzZ, float iSpX, float iSpY, float iSpZ, float iOrX, float iOrY, float iOrZ) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; float rX = iSpX/oSpX; float rY = iSpY/oSpY; float rZ = iSpZ/oSpZ; float oX = (iOrX-oOrX)/oSpX; float oY = (iOrY-oOrY)/oSpY; float oZ = (iOrZ-oOrZ)/oSpZ; if (x < iSzX && y < iSzY){ int id = x + iSzX * y; float i_x = x*rX + oX; float i_y = y*rY + oY; for (int z=0; z < iSzZ; ++z, id += iSzX * iSzY){ float i_z = z*rZ + oZ; #if __CUDA_ARCH__ >= 200 // floating point atomics supported only by Fermi and above Splatting::atomicSplatFloat(d_o, d_w, d_i[id], i_x, i_y, i_z, oSzX, oSzY, oSzZ); #else Splatting::atomicSplat(reinterpret_cast<int*>(d_o), reinterpret_cast<int*>(d_w), d_i[id], i_x, i_y, i_z, oSzX, oSzY, oSzZ); #endif } } } template<BackgroundStrategy bg> void SplatWorld(float *d_o, const Vec3Di &oSz, const Vec3Df &oSp, const Vec3Df &oOr, const float *d_i, const Vec3Di &iSz, const Vec3Df &iSp, const Vec3Df &iOr, StreamT stream) { MK_CHECK_IMAGE_BACKGROUND(bg); dim3 threads(16,16); dim3 grids(iDivUp(oSz.x, threads.x), iDivUp(oSz.y, threads.y)); SplatWorld_kernel<bg><<<grids, threads, 0, stream>>> (d_o, d_i, oSz.x, oSz.y, oSz.z, oSp.x, oSp.y, oSp.z, oOr.x, oOr.y, oOr.z, iSz.x, iSz.y, iSz.z, iSp.x, iSp.y, iSp.z, iOr.x, iOr.y, iOr.z); } template<BackgroundStrategy bg> void SplatWorld(float *d_o, const Vec3Di &oSz, const Vec3Df &oSp, const Vec3Df &oOr, const float *d_i, const Vec3Di &iSz, const Vec3Df &iSp, const Vec3Df &iOr, float *d_w, StreamT stream) { MK_CHECK_IMAGE_BACKGROUND(bg); dim3 threads(16,16); dim3 grids(iDivUp(oSz.x, threads.x), iDivUp(oSz.y, threads.y)); SplatWorld_kernel<bg><<<grids, threads, 0, stream>>> (d_o, d_i, d_w, oSz.x, oSz.y, oSz.z, oSp.x, oSp.y, oSp.z, oOr.x, oOr.y, oOr.z, iSz.x, iSz.y, iSz.z, iSp.x, iSp.y, iSp.z, iOr.x, iOr.y, iOr.z); } /** * Note: this should be implemented with the kernel copied into shared memory * for small kernels, which will be the standard use case * jsp2012 */ __global__ void Convolve_kernel(float* d_o, const float* d_i, const float* d_kernel, int iSzX, int iSzY, int iSzZ, int kSzX, int kSzY, int kSzZ) { uint cx = blockIdx.x * blockDim.x + threadIdx.x; uint cy = blockIdx.y * blockDim.y + threadIdx.y; if (cx < iSzX && cy < iSzY){ int halfSzX = kSzX/2; int halfSzY = kSzY/2; int halfSzZ = kSzZ/2; for (int cz=0; cz<iSzZ; ++cz){ // loop over offsets in kernel float v = 0.f; for (int oz=-halfSzZ; oz <= halfSzZ; ++oz) { for (int oy=-halfSzY; oy <= halfSzY; ++oy) { for (int ox=-halfSzX; ox <= halfSzX; ++ox) { float kv = getVal<float> (d_kernel, kSzX,kSzY,kSzZ, ox+halfSzX,oy+halfSzY,oz+halfSzZ); float iv = getSafeVal<float,BACKGROUND_STRATEGY_CLAMP> (d_i, iSzX,iSzY,iSzZ, cx+ox,cy+oy,cz+oz); v += kv*iv; } } } getVal<float>(d_o,iSzX,iSzY,iSzZ,cx,cy,cz) = v; } } } void Convolve(float *d_o, const float *d_i, const Vec3Di &sz, const float *d_kernel, const Vec3Di &kSz, StreamT stream) { dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); if(kSz.x%2 == 0 || kSz.y%2 == 0 || kSz.z%2 == 0){ throw PyCAException(__FILE__, __LINE__, "Only odd-sized kernels allowed in convolution"); } Convolve_kernel<<<grids, threads, 0, stream>>> (d_o, d_i, d_kernel, sz.x, sz.y, sz.z, kSz.x, kSz.y, kSz.z); } // template instantiations #include "GImageOperKernels_inst.cxx" } // end namespace PyCA
the_stack
#include <thrust/execution_policy.h> #include <algorithm> #include <map> #include <stack> #include "impl.cuh" namespace { using namespace manifold; __host__ __device__ void AtomicAddVec3(glm::vec3& target, const glm::vec3& add) { for (int i : {0, 1, 2}) { #ifdef __CUDA_ARCH__ atomicAdd(&target[i], add[i]); #else #pragma omp atomic target[i] += add[i]; #endif } } struct Normalize { __host__ __device__ void operator()(glm::vec3& v) { v = SafeNormalize(v); } }; struct Transform4x3 { const glm::mat4x3 transform; __host__ __device__ void operator()(glm::vec3& position) { position = transform * glm::vec4(position, 1.0f); } }; struct TransformNormals { const glm::mat3 transform; __host__ __device__ void operator()(glm::vec3& normal) { normal = glm::normalize(transform * normal); if (isnan(normal.x)) normal = glm::vec3(0.0f); } }; struct AssignNormals { glm::vec3* vertNormal; const glm::vec3* vertPos; const Halfedge* halfedges; const float precision; const bool calculateTriNormal; __host__ __device__ void operator()(thrust::tuple<glm::vec3&, int> in) { glm::vec3& triNormal = thrust::get<0>(in); const int face = thrust::get<1>(in); glm::ivec3 triVerts; for (int i : {0, 1, 2}) triVerts[i] = halfedges[3 * face + i].startVert; glm::vec3 edge[3]; for (int i : {0, 1, 2}) { const int j = (i + 1) % 3; edge[i] = glm::normalize(vertPos[triVerts[j]] - vertPos[triVerts[i]]); } if (calculateTriNormal) { triNormal = glm::normalize(glm::cross(edge[0], edge[1])); if (isnan(triNormal.x)) triNormal = glm::vec3(0, 0, 1); } // corner angles glm::vec3 phi; float dot = -glm::dot(edge[2], edge[0]); phi[0] = dot >= 1 ? 0 : (dot <= -1 ? glm::pi<float>() : glm::acos(dot)); dot = -glm::dot(edge[0], edge[1]); phi[1] = dot >= 1 ? 0 : (dot <= -1 ? glm::pi<float>() : glm::acos(dot)); phi[2] = glm::pi<float>() - phi[0] - phi[1]; // assign weighted sum for (int i : {0, 1, 2}) { AtomicAddVec3(vertNormal[triVerts[i]], phi[i] * triNormal); } } }; struct Tri2Halfedges { Halfedge* halfedges; TmpEdge* edges; __host__ __device__ void operator()( thrust::tuple<int, const glm::ivec3&> in) { const int tri = thrust::get<0>(in); const glm::ivec3& triVerts = thrust::get<1>(in); for (const int i : {0, 1, 2}) { const int j = (i + 1) % 3; const int edge = 3 * tri + i; halfedges[edge] = {triVerts[i], triVerts[j], -1, tri}; edges[edge] = TmpEdge(triVerts[i], triVerts[j], edge); } } }; struct LinkHalfedges { Halfedge* halfedges; const TmpEdge* edges; __host__ __device__ void operator()(int k) { const int i = 2 * k; const int j = i + 1; const int pair0 = edges[i].halfedgeIdx; const int pair1 = edges[j].halfedgeIdx; if (halfedges[pair0].startVert != halfedges[pair1].endVert || halfedges[pair0].endVert != halfedges[pair1].startVert || halfedges[pair0].face == halfedges[pair1].face) printf("Not manifold!\n"); halfedges[pair0].pairedHalfedge = pair1; halfedges[pair1].pairedHalfedge = pair0; } }; struct SwapHalfedges { Halfedge* halfedges; const TmpEdge* edges; __host__ void operator()(int k) { const int i = 2 * k; const int j = i - 2; const TmpEdge thisEdge = edges[i]; const TmpEdge lastEdge = edges[j]; if (thisEdge.first == lastEdge.first && thisEdge.second == lastEdge.second) { const int swap0idx = thisEdge.halfedgeIdx; Halfedge& swap0 = halfedges[swap0idx]; const int swap1idx = swap0.pairedHalfedge; Halfedge& swap1 = halfedges[swap1idx]; const int next0idx = swap0idx + ((swap0idx + 1) % 3 == 0 ? -2 : 1); const int next1idx = swap1idx + ((swap1idx + 1) % 3 == 0 ? -2 : 1); Halfedge& next0 = halfedges[next0idx]; Halfedge& next1 = halfedges[next1idx]; next0.startVert = swap0.endVert = next1.endVert; swap0.pairedHalfedge = next1.pairedHalfedge; halfedges[swap0.pairedHalfedge].pairedHalfedge = swap0idx; next1.startVert = swap1.endVert = next0.endVert; swap1.pairedHalfedge = next0.pairedHalfedge; halfedges[swap1.pairedHalfedge].pairedHalfedge = swap1idx; next0.pairedHalfedge = next1idx; next1.pairedHalfedge = next0idx; } } }; struct InitializeBaryRef { const int meshID; const Halfedge* halfedge; __host__ __device__ void operator()(thrust::tuple<BaryRef&, int> inOut) { BaryRef& baryRef = thrust::get<0>(inOut); int tri = thrust::get<1>(inOut); // Leave existing meshID if input is negative if (meshID >= 0) baryRef.meshID = meshID; baryRef.face = tri; glm::ivec3 triVerts(0.0f); for (int i : {0, 1, 2}) triVerts[i] = halfedge[3 * tri + i].startVert; baryRef.verts = triVerts; baryRef.vertBary = {-1, -1, -1}; } }; struct CoplanarEdge { BaryRef* triBary; const Halfedge* halfedge; const glm::vec3* vertPos; const float precision; __host__ __device__ void operator()(int edgeIdx) { const Halfedge edge = halfedge[edgeIdx]; if (!edge.IsForward()) return; const Halfedge pair = halfedge[edge.pairedHalfedge]; const glm::vec3 base = vertPos[edge.startVert]; const glm::vec3 jointVec = vertPos[edge.endVert] - base; const glm::vec3 edgeVec = vertPos[halfedge[NextHalfedge(edgeIdx)].endVert] - base; const glm::vec3 pairVec = vertPos[halfedge[NextHalfedge(edge.pairedHalfedge)].endVert] - base; const glm::vec3 cross = glm::cross(jointVec, edgeVec); const float area = glm::length(cross); const float areaPair = glm::length(glm::cross(pairVec, jointVec)); const float volume = glm::abs(glm::dot(cross, pairVec)); const float height = volume / glm::max(area, areaPair); // Only operate on coplanar triangles if (height > precision) return; const float length = glm::max(glm::length(edgeVec), glm::length(jointVec)); const float lengthPair = glm::max(glm::length(pairVec), glm::length(jointVec)); const bool edgeColinear = area < length * precision; const bool pairColinear = areaPair < lengthPair * precision; int& edgeFace = triBary[edge.face].face; int& pairFace = triBary[pair.face].face; // Point toward non-degenerate triangle if (edgeColinear && !pairColinear) edgeFace = pairFace; else if (pairColinear && !edgeColinear) pairFace = edgeFace; else { // Point toward lower index if (edgeFace < pairFace) pairFace = edgeFace; else edgeFace = pairFace; } } }; struct EdgeBox { const glm::vec3* vertPos; __host__ __device__ void operator()( thrust::tuple<Box&, const TmpEdge&> inout) { const TmpEdge& edge = thrust::get<1>(inout); thrust::get<0>(inout) = Box(vertPos[edge.first], vertPos[edge.second]); } }; } // namespace namespace manifold { std::vector<int> Manifold::Impl::meshID2Original_; /** * Create a manifold from an input triangle Mesh. Will throw if the Mesh is not * manifold. TODO: update halfedgeTangent during CollapseDegenerates. */ Manifold::Impl::Impl(const Mesh& mesh) : vertPos_(mesh.vertPos), halfedgeTangent_(mesh.halfedgeTangent) { CheckDevice(); CalculateBBox(); SetPrecision(); CreateAndFixHalfedges(mesh.triVerts); InitializeNewReference(); CalculateNormals(); CollapseDegenerates(); Finish(); } /** * Create eiter a unit tetrahedron, cube or octahedron. The cube is in the first * octant, while the others are symmetric about the origin. */ Manifold::Impl::Impl(Shape shape) { std::vector<glm::vec3> vertPos; std::vector<glm::ivec3> triVerts; switch (shape) { case Shape::TETRAHEDRON: vertPos = {{-1.0f, -1.0f, 1.0f}, {-1.0f, 1.0f, -1.0f}, {1.0f, -1.0f, -1.0f}, {1.0f, 1.0f, 1.0f}}; triVerts = {{2, 0, 1}, {0, 3, 1}, {2, 3, 0}, {3, 2, 1}}; break; case Shape::CUBE: vertPos = {{0.0f, 0.0f, 0.0f}, // {1.0f, 0.0f, 0.0f}, // {1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, 0.0f}, // {0.0f, 0.0f, 1.0f}, // {1.0f, 0.0f, 1.0f}, // {1.0f, 1.0f, 1.0f}, // {0.0f, 1.0f, 1.0f}}; triVerts = {{0, 2, 1}, {0, 3, 2}, // {4, 5, 6}, {4, 6, 7}, // {0, 1, 5}, {0, 5, 4}, // {1, 2, 6}, {1, 6, 5}, // {2, 3, 7}, {2, 7, 6}, // {3, 0, 4}, {3, 4, 7}}; break; case Shape::OCTAHEDRON: vertPos = {{1.0f, 0.0f, 0.0f}, // {-1.0f, 0.0f, 0.0f}, // {0.0f, 1.0f, 0.0f}, // {0.0f, -1.0f, 0.0f}, // {0.0f, 0.0f, 1.0f}, // {0.0f, 0.0f, -1.0f}}; triVerts = {{0, 2, 4}, {1, 5, 3}, // {2, 1, 4}, {3, 5, 0}, // {1, 3, 4}, {0, 5, 2}, // {3, 0, 4}, {2, 5, 1}}; break; default: throw userErr("Unrecognized shape!"); } vertPos_ = vertPos; CreateHalfedges(triVerts); Finish(); InitializeNewReference(); MergeCoplanarRelations(); } /** * When a manifold is copied, it is given a new unique set of mesh relation IDs, * identifying a particular instance of a copied input mesh. The original mesh * ID can be found using the meshID2Original mapping. */ void Manifold::Impl::DuplicateMeshIDs() { std::map<int, int> old2new; for (BaryRef& ref : meshRelation_.triBary) { if (old2new.find(ref.meshID) == old2new.end()) { old2new[ref.meshID] = meshID2Original_.size(); meshID2Original_.push_back(meshID2Original_[ref.meshID]); } ref.meshID = old2new[ref.meshID]; } } void Manifold::Impl::ReinitializeReference(int meshID) { thrust::for_each_n(zip(meshRelation_.triBary.beginD(), countAt(0)), NumTri(), InitializeBaryRef({meshID, halfedge_.cptrD()})); } int Manifold::Impl::InitializeNewReference() { meshRelation_.triBary.resize(NumTri()); const int nextMeshID = meshID2Original_.size(); meshID2Original_.push_back(nextMeshID); ReinitializeReference(nextMeshID); return nextMeshID; } void Manifold::Impl::MergeCoplanarRelations() { thrust::for_each_n( countAt(0), halfedge_.size(), CoplanarEdge({meshRelation_.triBary.ptrD(), halfedge_.cptrD(), vertPos_.cptrD(), precision_})); VecH<BaryRef>& triBary = meshRelation_.triBary.H(); std::stack<int> stack; for (int tri = 0; tri < NumTri(); ++tri) { int thisTri = tri; while (triBary[thisTri].face != thisTri) { stack.push(thisTri); thisTri = triBary[thisTri].face; } while (!stack.empty()) { triBary[stack.top()].face = thisTri; stack.pop(); } } } /** * Create the halfedge_ data structure from an input triVerts array like Mesh. */ void Manifold::Impl::CreateHalfedges(const VecDH<glm::ivec3>& triVerts) { const int numTri = triVerts.size(); halfedge_.resize(3 * numTri); VecDH<TmpEdge> edge(3 * numTri); thrust::for_each_n(zip(countAt(0), triVerts.beginD()), numTri, Tri2Halfedges({halfedge_.ptrD(), edge.ptrD()})); thrust::sort(edge.beginD(), edge.endD()); thrust::for_each_n(countAt(0), halfedge_.size() / 2, LinkHalfedges({halfedge_.ptrD(), edge.cptrD()})); } /** * Create the halfedge_ data structure from an input triVerts array like Mesh. * Check that the input is an even-manifold, and if it is not 2-manifold, * perform edge swaps until it is. This is a host function. */ void Manifold::Impl::CreateAndFixHalfedges(const VecDH<glm::ivec3>& triVerts) { const int numTri = triVerts.size(); halfedge_.resize(3 * numTri); VecDH<TmpEdge> edge(3 * numTri); thrust::for_each_n(zip(countAt(0), triVerts.begin()), numTri, Tri2Halfedges({halfedge_.ptrH(), edge.ptrH()})); // Stable sort is required here so that halfedges from the same face are // paired together (the triangles were created in face order). In some // degenerate situations the triangulator can add the same internal edge in // two different faces, causing this edge to not be 2-manifold. We detect this // and fix it by swapping one of the identical edges, so it is important that // we have the edges paired according to their face. std::stable_sort(edge.begin(), edge.end()); thrust::for_each_n(thrust::host, countAt(0), halfedge_.size() / 2, LinkHalfedges({halfedge_.ptrH(), edge.cptrH()})); thrust::for_each(thrust::host, countAt(1), countAt(halfedge_.size() / 2), SwapHalfedges({halfedge_.ptrH(), edge.cptrH()})); } /** * Does a full recalculation of the face bounding boxes, including updating the * collider, but does not resort the faces. */ void Manifold::Impl::Update() { CalculateBBox(); VecDH<Box> faceBox; VecDH<uint32_t> faceMorton; GetFaceBoxMorton(faceBox, faceMorton); collider_.UpdateBoxes(faceBox); } void Manifold::Impl::ApplyTransform() const { // This const_cast is here because these operations cancel out, leaving the // state conceptually unchanged. This enables lazy transformation evaluation. const_cast<Impl*>(this)->ApplyTransform(); } /** * Bake the manifold's transform into its vertices. This function allows lazy * evaluation, which is important because often several transforms are applied * between operations. */ void Manifold::Impl::ApplyTransform() { if (transform_ == glm::mat4x3(1.0f)) return; thrust::for_each(vertPos_.beginD(), vertPos_.endD(), Transform4x3({transform_})); glm::mat3 normalTransform = glm::inverse(glm::transpose(glm::mat3(transform_))); thrust::for_each(faceNormal_.beginD(), faceNormal_.endD(), TransformNormals({normalTransform})); thrust::for_each(vertNormal_.beginD(), vertNormal_.endD(), TransformNormals({normalTransform})); // This optimization does a cheap collider update if the transform is // axis-aligned. if (!collider_.Transform(transform_)) Update(); const float oldScale = bBox_.Scale(); transform_ = glm::mat4x3(1.0f); CalculateBBox(); const float newScale = bBox_.Scale(); precision_ *= glm::max(1.0f, newScale / oldScale) * glm::max(glm::length(transform_[0]), glm::max(glm::length(transform_[1]), glm::length(transform_[2]))); // Maximum of inherited precision loss and translational precision loss. SetPrecision(precision_); } /** * Sets the precision based on the bounding box, and limits its minimum value by * the optional input. */ void Manifold::Impl::SetPrecision(float minPrecision) { precision_ = glm::max(minPrecision, kTolerance * bBox_.Scale()); if (!glm::isfinite(precision_)) precision_ = -1; } /** * If face normals are already present, this function uses them to compute * vertex normals (angle-weighted pseudo-normals); otherwise it also computes * the face normals. Face normals are only calculated when needed because nearly * degenerate faces will accrue rounding error, while the Boolean can retain * their original normal, which is more accurate and can help with merging * coplanar faces. * * If the face normals have been invalidated by an operation like Warp(), ensure * you do faceNormal_.resize(0) before calling this function to force * recalculation. */ void Manifold::Impl::CalculateNormals() { vertNormal_.resize(NumVert()); thrust::fill(vertNormal_.beginD(), vertNormal_.endD(), glm::vec3(0)); bool calculateTriNormal = false; if (faceNormal_.size() != NumTri()) { faceNormal_.resize(NumTri()); calculateTriNormal = true; } thrust::for_each_n( zip(faceNormal_.beginD(), countAt(0)), NumTri(), AssignNormals({vertNormal_.ptrD(), vertPos_.cptrD(), halfedge_.cptrD(), precision_, calculateTriNormal})); thrust::for_each(vertNormal_.beginD(), vertNormal_.endD(), Normalize()); } /** * Returns a sparse array of the bounding box overlaps between the edges of the * input manifold, Q and the faces of this manifold. Returned indices only * point to forward halfedges. */ SparseIndices Manifold::Impl::EdgeCollisions(const Impl& Q) const { VecDH<TmpEdge> edges = CreateTmpEdges(Q.halfedge_); const int numEdge = edges.size(); VecDH<Box> QedgeBB(numEdge); thrust::for_each_n(zip(QedgeBB.beginD(), edges.cbeginD()), numEdge, EdgeBox({Q.vertPos_.cptrD()})); SparseIndices q1p2 = collider_.Collisions(QedgeBB); thrust::for_each(q1p2.beginD(0), q1p2.endD(0), ReindexEdge({edges.cptrD()})); return q1p2; } /** * Returns a sparse array of the input vertices that project inside the XY * bounding boxes of the faces of this manifold. */ SparseIndices Manifold::Impl::VertexCollisionsZ( const VecDH<glm::vec3>& vertsIn) const { return collider_.Collisions(vertsIn); } } // namespace manifold
the_stack
#include "atomics/missing_in_cuda.cuh" #include <kat/on_device/common.cuh> #include <kat/on_device/builtins.cuh> #include <kat/on_device/math.cuh> #include <device_atomic_functions.h> #if (CUDART_VERSION >= 8000) #include <sm_60_atomic_functions.h> #include <cuda_fp16.h> #endif #include <kat/detail/pointers.cuh> #include <functional> #include <type_traits> #include <climits> ///@cond #include <kat/detail/execution_space_specifiers.hpp> ///@endcond namespace kat { namespace atomic { namespace detail { template <unsigned NBytes> struct uint_helper; template<> struct uint_helper<1> { using type = uint8_t; }; template<> struct uint_helper<2> { using type = uint16_t; }; template<> struct uint_helper<4> { using type = uint32_t; }; template<> struct uint_helper<8> { using type = uint64_t; }; template <unsigned NBytes> using uint_t = typename uint_helper<NBytes>::type; /** * @brief extracts a smaller-size value using a contiguous sequence * of bytes representing a value of another, larger type. * * @note This may not work for any pair of types; but should work * for integral types of sizes which are a power of 2. */ template <typename Smaller, typename Larger, typename Offset> KAT_FHD Smaller extract_value(Larger larger_value ,Offset offset_into_larger_value) { static_assert(std::is_trivial<Smaller>::value, "Cannot extract non-trivially-copyable values"); static_assert(std::is_trivial<Larger>::value, "Cannot extract from non-trivially-copyable values"); unsigned char* smaller_value_ptr = reinterpret_cast<unsigned char*>(larger_value) + offset_into_larger_value; // In C++17 this could have been std::byte I suppose return reinterpret_cast<const Smaller&>(smaller_value_ptr); } template <typename T> struct tag { }; template <typename U, typename I> constexpr U KAT_FHD ones(I num_ones) { return (U{1} << num_ones) - I{1}; } template <typename Larger, typename Smaller, typename Offset> uint_t<sizeof(Larger)> KAT_FHD bitmask_for_value_at_offset(Offset offset_into_larger_value) { using larger_uint_type = uint_t<sizeof(Larger)>; constexpr const auto unshifted_ones { ones<larger_uint_type>(sizeof(Smaller) * CHAR_BIT) }; auto shift_amount = offset_into_larger_value * CHAR_BIT; return unshifted_ones << shift_amount; } template <typename Larger, typename Smaller, typename Offset> uint_t<sizeof(Larger)> KAT_FHD bitmask_for_padding_before_value_at_offset(Offset offset_into_larger_value) { return ~bitmask_for_value_at_offset(offset_into_larger_value); } template <typename Larger, typename Smaller, typename Offset> Larger KAT_FHD generate_value_at_offset(Smaller small_value, Offset offset_into_larger_value) { static_assert(sizeof(Larger) > sizeof(Smaller), "invalid sizes"); static_assert(std::is_trivial<Larger>::value, "Larger must be a trivial type"); static_assert(std::is_trivial<Smaller>::value, "Smaller must be a trivial type"); auto shift_amount = offset_into_larger_value * CHAR_BIT; using smaller_uint = uint_t<sizeof(Smaller)>; using larger_uint = uint_t<sizeof(Larger)>; auto small_value_as_uint { reinterpret<smaller_uint>(small_value) }; auto result_as_uint = larger_uint{small_value_as_uint} << shift_amount; return reinterpret<Larger>(result_as_uint); } template <typename Larger, typename Smaller, typename Offset> Larger KAT_FHD replace_bytes_at_offset(Larger larger_value, Smaller replacement, Offset offset_into_larger_value) { auto clearing_mask { ~bitmask_for_value_at_offset<Larger, Smaller>(offset_into_larger_value) }; return (larger_value & clearing_mask ) | generate_value_at_offset<Larger>(replacement, offset_into_larger_value); // TODO: Some Larger types may not admit bitwise-and'ing, so we'd have to reinterpret them as larger_uint_type } namespace implementation { template <typename T> KAT_FD T compare_and_swap(std::true_type, T* __restrict__ address, T compare, T val) { static_assert(sizeof(T) == sizeof(int) or sizeof(T) == sizeof(long long int), "Cannot compare_and_swap directly with the requested type"); // This switch is necessary due to atomicCAS being defined // only for a very small selection of types - int and unsigned long long switch(sizeof(T)) { case sizeof(int): { int int_result = atomicCAS( reinterpret_cast<int* >(address), reinterpret_cast<const int&>(compare), reinterpret_cast<const int&>(val) ); return reinterpret<T>(int_result); } case sizeof(long long int): { long long int llint_result = atomicCAS( reinterpret_cast<unsigned long long* >(address), reinterpret_cast<const unsigned long long&>(compare), reinterpret_cast<const unsigned long long&>(val) ); return reinterpret<T>(llint_result); } default: return T(); // should not be able to get here } } template <typename T> KAT_FD T compare_and_swap(std::false_type, T* __restrict__ address, T compare, T val) { // The idea is to apply compare-and-swap on more memory than the T type actually takes // up. However, we can't just have that larger stretch of memory start at `address`, since // NVIDIA GPUs require operands to machine instructions to be naturally-aligned (e.g. // a 4-byte-sized operand must start at an address being a multiple of 4). That means we // could theoretically have "slack" bytes we use in the compare-and-swap both before and // after the value we're actually interested in. We'll have to "contend" for atomic access // to both the proper bytes of interest and the slack bytes, together. static_assert (not std::is_const<T>::value, "Can't compare-and-swap with a const value"); static_assert (sizeof(T) <= sizeof(long long int), "Type size too large for atomic compare-and-swap"); using casable_type = typename std::conditional<sizeof(T) < sizeof(int), int, unsigned long long int>::type; casable_type* casable = kat::detail::align_down<casable_type>(address); const auto offset_into_casable_type { kat::detail::address_difference(address, casable) }; const auto mask_for_value_at_offset { bitmask_for_value_at_offset<casable_type, T>(offset_into_casable_type) }; // The on-bits are those which are _not_ used for the T-type value within the casable value at address auto value_at_offset { generate_value_at_offset<casable_type>(val, offset_into_casable_type) }; // ... with 0's in all other bits auto last_known_casable_value { *casable }; auto casable_value_to_swap_in { replace_bytes_at_offset(last_known_casable_value, val, offset_into_casable_type) }; casable_type expected_value_at_addr = last_known_casable_value; bool value_of_interest_changed; do { last_known_casable_value = atomic::compare_and_swap<casable_type>( casable, expected_value_at_addr, casable_value_to_swap_in); auto cas_did_swap = (last_known_casable_value == value_at_offset); if (cas_did_swap) { return compare; } auto value_of_interest_at_address { extract_value<T>(last_known_casable_value, offset_into_casable_type) }; if (value_of_interest_at_address != compare) { return value_of_interest_at_address; } // At this point, it must be the case that the padding bytes changed. That means // we still need to try to perform the CASing - but we need to update the padding // so that we don't switch it back. expected_value_at_addr |= (last_known_casable_value & ~mask_for_value_at_offset); } while (true); } } // namespace implementation } // namespace detail template <typename T> KAT_FD T compare_and_swap(T* address, T compare, T val) { static_assert(sizeof(T) <= sizeof(long long int), "nVIDIA GPUs do not support atomic operations on data larger than long long int"); // CUDA PTX (or at least, the CUDA on-device API) cannot compare-and-swap all fundamental // numeric C/C++ types; and some types it can CAS, but only if you pass them as another // type of the same size. Thus we need to choose between an implementation which // compares-and-swaps directly, and another which uses a larger value that is supported. constexpr bool can_cas_directly = (sizeof(T) == sizeof(int)) or (sizeof(T) == sizeof(unsigned long long int)); return detail::implementation::compare_and_swap<T>( kat::bool_constant<can_cas_directly>{}, address, compare, val); } namespace detail { namespace implementation { template <typename UnaryFunction, typename T> KAT_FD T apply(std::true_type, UnaryFunction f, T* __restrict__ address) { T newest_value_found_at_addr { *address }; T value_expected_at_addr; do { value_expected_at_addr = newest_value_found_at_addr; auto value_to_set = f(value_expected_at_addr); newest_value_found_at_addr = detail::implementation::compare_and_swap<T>(std::true_type{}, address, value_expected_at_addr, value_to_set); if (newest_value_found_at_addr != value_expected_at_addr) { } } while(newest_value_found_at_addr != value_expected_at_addr); return newest_value_found_at_addr; } /** * Applies a unary function, essentially-atomically, to a value in memory, returning the value * before the function was applied * * @param address the memory location of the value to apply a function to * @param f unary function to apply to the value at @p address * @return existing value before the application of @p f * * @note Since we can't directly compare-and-set the target value in memory, we'll work on * a slightly larger bit of memory including it. */ template <typename UnaryFunction, typename T> KAT_FD T apply(std::false_type, UnaryFunction f, T* __restrict__ address) { // Similar to the no-primitive-available compare_and_swap, except that we don't have a "compare" // value, i.e. we don't condition our action on a specific existing value - we'll go with whatever // is at the address static_assert (not std::is_const<T>::value, "Can't compare-and-swap with a const value"); static_assert (sizeof(T) <= sizeof(long long int), "Type size too large for atomic compare-and-swap"); using casable_type = typename std::conditional<sizeof(T) < sizeof(int), int, unsigned long long int>::type; casable_type* casable = kat::detail::align_down<casable_type>(address); const auto offset_into_casable_type { kat::detail::address_difference(address, casable) }; // const auto mask_for_value_at_offset { bitmask_for_value_at_offset<casable_type, T>(offset_into_casable_type) }; // The on-bits are those which are _not_ used for the T-type value within the casable value at address // auto value_at_offset { generate_value_at_offset<casable_type>(val, offset_into_casable_type) }; // ... with 0's in all other bits casable_type extracted_from_known_casable; bool cas_did_swap; auto known_value_of_casable { *casable }; while (true) { extracted_from_known_casable = extract_value<T>(known_value_of_casable, offset_into_casable_type); casable_type casable_value_to_swap_in = replace_bytes_at_offset(known_value_of_casable, f(extracted_from_known_casable), offset_into_casable_type); auto new_known_value_of_casable = atomic::compare_and_swap<casable_type>( casable, known_value_of_casable, casable_value_to_swap_in); cas_did_swap = (new_known_value_of_casable == known_value_of_casable); if (cas_did_swap) { return extracted_from_known_casable; } known_value_of_casable = new_known_value_of_casable; }; } } // namespace implementation } // namespace detail /** * Applies a unary function, atomically, to a value in memory, returning the value * before the function was applied * * @param address the memory location of the value to apply a function to * @param f unary function to apply to the value at @p address * @return existing value before the application of @p f */ template <typename UnaryFunction, typename T> KAT_FD T apply(UnaryFunction f, T* __restrict__ address) { static_assert(sizeof(T) <= sizeof(long long int), "nVIDIA GPUs do not support atomic operations on data larger than long long int"); // We can't really apply atomically; we can only apply f to a copy - which means // we can't avoid a loop. But we can still benefit from an atomic CAS if it's _directly_ // available to us. constexpr bool can_cas_directly = (sizeof(T) == sizeof(int)) or (sizeof(T) == sizeof(unsigned long long int)); return detail::implementation::apply<UnaryFunction, T>( kat::bool_constant<can_cas_directly>{}, f, address); } template <typename Function, typename T, typename... Ts> KAT_FD T apply( Function f, T* __restrict__ address, const Ts... xs) { auto uf = [&](T existing_value) -> T { return f(existing_value, xs...); }; static_assert(std::is_same<decltype(uf(*address)), T>::value, "The function to apply must return the same type it takes"); return apply(uf, address); } namespace detail { namespace implementation { template <typename T> KAT_FD T increment( std::true_type, // can use a builtin atomic increment T* address, T wraparound_value) { static_assert(sizeof(T) == sizeof(unsigned int) and std::is_integral<T>::value, "invalid type"); return (T) (atomicInc( reinterpret_cast<unsigned int* >(address), reinterpret_cast<const unsigned int&>(wraparound_value) )); } template <typename T> KAT_FD T increment( std::false_type, // can't use a builtin atomic increment T* address, T wraparound_value) { auto do_increment = [](T existing_value, T wraparound) -> T { return existing_value >= wraparound ? T{0} : T(existing_value+1); // Note the possibility of overflow here when wraparound_value is std::numeric_limits<T>::max() }; return atomic::apply(do_increment, address, wraparound_value); } } // namespace implementation } // namespace detail template <typename T> KAT_FD T increment( T* address, T wraparound_value) { constexpr const bool can_act_directly = (sizeof(T) == sizeof(unsigned int) and std::is_integral<T>::value); return detail::implementation::increment<T>( kat::bool_constant<can_act_directly>{}, address, wraparound_value); } namespace detail { namespace implementation { template <typename T> KAT_FD T decrement ( std::true_type, T* address, T wraparound_value) { static_assert( sizeof(T) == sizeof(unsigned int) and std::is_integral<T>::value, "invalid type"); return (T) (atomicDec( reinterpret_cast<unsigned int* >(address), wraparound_value )); } template <typename T> KAT_FD T decrement ( std::false_type, T* address, T wraparound_value) { auto do_decrement = [=](T existing_value) -> T { return ((existing_value <= 0) or (existing_value >= wraparound_value)) ? wraparound_value - 1 : existing_value - 1; }; return kat::atomic::apply(do_decrement, address); } } // namespace implementation } // namespace detail template <typename T> KAT_FD T decrement ( T* address, T wraparound_value) { constexpr bool can_act_directly = ( sizeof(T) == sizeof(unsigned int) and std::is_integral<T>::value); return detail::implementation::decrement<T>( kat::bool_constant<can_act_directly>{}, address, wraparound_value); } namespace detail { namespace implementation { template <typename T> KAT_FD T add(std::true_type, T* address, T val) { return ::atomicAdd(address, val); } template <typename T> KAT_FD T add(std::false_type, T* address, T val) { auto do_addition = [](T existing_value, T x) -> T { return static_cast<T>(existing_value + x); }; return kat::atomic::apply(do_addition, address, val); } // TODO: Double-check (no pun intended) that pre-Pascal cards, or Pascal-and-onwards // cards running CC 3.0 code, handle doubles properly - I have my doubts. template <typename T> KAT_FD T subtract(std::true_type, T* address, T val) { return ::atomicSub(address, val); } template <typename T> KAT_FD T subtract(std::false_type, T* address, T val) { auto do_subtraction = [](T existing_value, T x) -> T { return static_cast<T>(existing_value - x); }; return kat::atomic::apply(do_subtraction, address, val); } template <typename T> KAT_FD T exchange(std::true_type, T* address, T val) { // Note: We know there are implementations available for int, unsigned, unsigned long long and float; // but the only thing we should really care about here is the size. static_assert(sizeof(unsigned) == sizeof(float), "Unexpected fundamental type sizes"); switch(sizeof(T)) { case sizeof(unsigned): { unsigned previous_value = ::atomicExch(reinterpret_cast<unsigned*>(address), reinterpret<unsigned>(val)); return reinterpret<T>(previous_value); } case sizeof(unsigned long long): { unsigned long long previous_value = ::atomicExch(reinterpret_cast<unsigned long long*>(address), reinterpret<unsigned long long>(val)); return reinterpret<T>(previous_value); } default: // should not be able to get here return T{}; } } template <typename T> KAT_FD T exchange(std::false_type, T* address, T val) { auto do_exchange = [](T existing_value, T x) -> T { return x; }; return kat::atomic::apply(do_exchange, address, val); } template <typename T> KAT_FD T min(std::true_type, T* address, T val) { return ::atomicMin(address, val); } template <typename T> KAT_FD T min(std::false_type, T* address, T val) { auto do_min = [](T existing_value, T x) -> T { return kat::minimum(existing_value, x); }; return kat::atomic::apply(do_min, address, val); } template <typename T> KAT_FD T max(std::true_type, T* address, T val) { return ::atomicMax(address, val); } template <typename T> KAT_FD T max(std::false_type, T* address, T val) { auto do_max = [](T existing_value, T x) -> T { return kat::maximum(existing_value, x); }; return kat::atomic::apply(do_max, address, val); } template <typename T> KAT_FD T bitwise_and(std::true_type, T* address, T val) { return ::atomicAnd(address, val); } template <typename T> KAT_FD T bitwise_and(std::false_type, T* address, T val) { auto do_and = [](T existing_value, T x) -> T { return existing_value & x; }; return kat::atomic::apply(do_and, address, val); } template <typename T> KAT_FD T bitwise_or(std::true_type, T* address, T val) { return ::atomicOr(address, val); } template <typename T> KAT_FD T bitwise_or(std::false_type, T* address, T val) { auto do_or = [](T existing_value, T x) -> T { return existing_value | x; }; return kat::atomic::apply(do_or, address, val); } template <typename T> KAT_FD T bitwise_xor(std::true_type, T* address, T val) { return ::atomicXor(address, val); } template <typename T> KAT_FD T bitwise_xor(std::false_type, T* address, T val) { auto do_xor = [](T existing_value, T x) -> T { return existing_value ^ x; }; return kat::atomic::apply(do_xor, address, val); } // TODO: Double-check (no pun intended) that pre-Pascal cards, or Pascal-and-onwards // cards running CC 3.0 code, handle doubles properly - I have my doubts. } // namespace implementation } // namespace detail template <typename T> KAT_FD T add(T* address, T val) { constexpr bool can_act_directly = std::is_same< T,int >::value or std::is_same< T,long int >::value or std::is_same< T,long long int >::value or std::is_same< T,unsigned >::value or std::is_same< T,unsigned long >::value or std::is_same< T,unsigned long long >::value or std::is_same< T,float >::value #if __CUDA_ARCH__ >= 600 or std::is_same< T,half >::value or std::is_same< T,double >::value #endif ; return detail::implementation::add<T>( kat::bool_constant<can_act_directly>{}, //kat::bool_constant<false>{}, address, val); } template <typename T> KAT_FD T subtract (T* address, T val) { constexpr bool can_act_directly = std::is_same< T,int >::value or std::is_same< T,unsigned >::value ; return detail::implementation::subtract<T>( kat::bool_constant<can_act_directly>{}, address, val); } template <typename T> KAT_FD T exchange (T* address, T val) { constexpr bool can_act_directly = (sizeof(T) == 4) or (sizeof(T) == 8); return detail::implementation::exchange<T>( kat::bool_constant<can_act_directly>{}, address, val); } template <typename T> KAT_FD T min (T* address, T val) { constexpr bool can_act_directly = #if CUDA_ARCH >= 320 std::is_same< T,int >::value or std::is_same< T,unsigned >::value #if CUDA_ARCH >= 350 or std::is_same< T,long >::value or std::is_same< T,long long >::value or std::is_same< T,unsigned long >::value or std::is_same< T,unsigned long long >::value #endif #else false #endif ; return detail::implementation::min<T>( kat::bool_constant<can_act_directly>{}, address, val); } template <typename T> KAT_FD T max (T* address, T val) { constexpr bool can_act_directly = #if CUDA_ARCH >= 320 std::is_same< T,int >::value or std::is_same< T,unsigned >::value #if CUDA_ARCH >= 350 or std::is_same< T,long >::value or std::is_same< T,long long >::value or std::is_same< T,unsigned long >::value or std::is_same< T,unsigned long long >::value #endif #else false #endif ; return detail::implementation::max<T>( kat::bool_constant<can_act_directly>{}, address, val); } template <typename T> KAT_FD T bitwise_and (T* address, T val) { constexpr bool can_act_directly = #if CUDA_ARCH >= 320 std::is_same< T,int >::value or std::is_same< T,unsigned >::value #if CUDA_ARCH >= 350 or std::is_same< T,unsigned long >::value or std::is_same< T,unsigned long long >::value or std::is_same< T,long >::value or std::is_same< T,long long >::value #endif #else false #endif ; return detail::implementation::bitwise_and<T>( kat::bool_constant<can_act_directly>{}, address, val); } template <typename T> KAT_FD T bitwise_or (T* address, T val) { constexpr bool can_act_directly = #if CUDA_ARCH >= 320 std::is_same< T,int >::value or std::is_same< T,unsigned >::value #if CUDA_ARCH >= 350 or std::is_same< T,unsigned long >::value or std::is_same< T,unsigned long long >::value or std::is_same< T,long >::value or std::is_same< T,long long >::value #endif #else false #endif ; return detail::implementation::bitwise_or<T>( kat::bool_constant<can_act_directly>{}, address, val); } template <typename T> KAT_FD T bitwise_xor (T* address, T val) { constexpr bool can_act_directly = #if CUDA_ARCH >= 320 std::is_same< T,int >::value or std::is_same< T,unsigned >::value #if CUDA_ARCH >= 350 or std::is_same< T,unsigned long >::value or std::is_same< T,unsigned long long >::value or std::is_same< T,long >::value or std::is_same< T,long long >::value #endif #else false #endif ; return detail::implementation::bitwise_xor<T>( kat::bool_constant<can_act_directly>{}, address, val); } // ... and now for some ops for which there are absolutely no primitives, // so that they can only be implemented using apply() template <typename T> KAT_FD T logical_and(T* address, T val) { auto do_logical_and = [](T existing_value, T x) -> T { return existing_value and x; }; return kat::atomic::apply(do_logical_and, address, val); } template <typename T> KAT_FD T logical_or(T* address, T val) { auto do_logical_or = [](T existing_value, T x) -> T { return existing_value or x; }; return kat::atomic::apply(do_logical_or, address, val); } template <typename T> KAT_FD T logical_xor(T* address, T val) { auto do_logical_xor = [](T existing_value, T x) -> T { return (existing_value and (not x)) or ((not existing_value) and x); }; return kat::atomic::apply(do_logical_xor, address, val); } template <typename T> KAT_FD T logical_not(T* address) { auto do_logical_not = [](T existing_value) -> T { return not existing_value; }; return kat::atomic::apply(do_logical_not, address); } template <typename T> KAT_FD T bitwise_not (T* address) { constexpr const T all_ones { ~T{0} }; return bitwise_xor(address, all_ones); } template <typename T> KAT_FD T set_bit (T* address, native_word_t bit_index) { auto f = [](T existing_value, native_word_t x) -> T { return existing_value | (T(1) << x); }; return apply(f, address, bit_index); } template <typename T> KAT_FD T unset_bit (T* address, native_word_t bit_index) { auto f = [](T existing_value, native_word_t x) -> T { return existing_value & ~(T(1) << x); }; return apply(f, address, bit_index); } } // namespace atomic } // namespace kat #endif // CUDA_KAT_ON_DEVICE_ATOMICS_DETAIL_CUH_
the_stack
#include <claraparabricks/genomeworks/utils/cudautils.hpp> //GW_CU_CHECK_ERR #include <claraparabricks/genomeworks/utils/signed_integer_utils.hpp> //get_size #include "gtest/gtest.h" namespace claraparabricks { namespace genomeworks { namespace cudapoa { class BasicGenerateConsensus { public: BasicGenerateConsensus(std::vector<uint8_t> nodes, std::vector<int16_t> sorted_graph, Int16Vec2D node_alignments, Int16Vec2D outgoing_edges, std::vector<uint16_t> node_coverage_counts, Uint16Vec2D outgoing_edge_w) : graph_(nodes, sorted_graph, node_alignments, node_coverage_counts, outgoing_edges) , outgoing_edge_w_(outgoing_edge_w) , outgoing_edges_(outgoing_edges) { } void get_graph_buffers(uint8_t* nodes, int16_t* node_count, int16_t* sorted_poa, int16_t* node_id_to_pos, int16_t* incoming_edges, uint16_t* incoming_edge_count, int16_t* outgoing_edges, uint16_t* outgoing_edge_count, uint16_t* incoming_edge_w, uint16_t* node_coverage_counts, int16_t* node_alignments, uint16_t* node_alignment_count) const { graph_.get_nodes(nodes, node_count); graph_.get_sorted_graph(sorted_poa); graph_.get_node_id_to_pos(node_id_to_pos); graph_.get_node_coverage_counts(node_coverage_counts); graph_.get_edges(incoming_edges, incoming_edge_count, outgoing_edges, outgoing_edge_count); graph_.get_node_alignments(node_alignments, node_alignment_count); get_incoming_edge_w(incoming_edge_w); } void get_incoming_edge_w(uint16_t* incoming_edge_w) const { auto outgoing_edges = graph_.get_outgoing_edges(); for (int i = 0; i < get_size(outgoing_edges); i++) { for (int j = 0; j < get_size(outgoing_edges[i]); j++) { int16_t to_node = outgoing_edges[i][j]; incoming_edge_w[to_node * CUDAPOA_MAX_NODE_EDGES + i] = outgoing_edge_w_[i][j]; } } } protected: SortedGraph graph_; Int16Vec2D outgoing_edges_; Uint16Vec2D outgoing_edge_w_; }; typedef std::pair<std::string, BasicGenerateConsensus> GenerateConsensusTestPair; // create a vector of test cases std::vector<GenerateConsensusTestPair> getGenerateConsensusTestCases() { std::vector<GenerateConsensusTestPair> test_cases; /* * T * / \ * graph A — A A * \ / * A */ std::string ans_1 = "ATAA"; BasicGenerateConsensus gc_1({'A', 'A', 'A', 'A', 'T'}, //nodes {0, 1, 2, 4, 3}, //sorted_graph {{}, {}, {4}, {}, {2}}, //node_alignments {{1}, {2, 4}, {3}, {}, {3}}, //outgoing_edges {2, 2, 1, 2, 1}, //node_coverage_counts {{5}, {4, 3}, {2}, {}, {1}}); //outgoing_edge_w test_cases.emplace_back(std::move(ans_1), std::move(gc_1)); /* * graph A — T — C — G — A */ std::string ans_2 = "AGCTA"; BasicGenerateConsensus gc_2({'A', 'T', 'C', 'G', 'A'}, //nodes {0, 1, 2, 3, 4}, //sorted_graph {{}, {}, {}, {}, {}}, //node_alignments {{1}, {2}, {3}, {4}, {}}, //outgoing_edges {1, 1, 1, 1, 1}, //node_coverage_counts {{4}, {3}, {2}, {1}, {}}); test_cases.emplace_back(std::move(ans_2), std::move(gc_2)); /* * T * / \ * graph A — C — C — G * \ / * A */ std::string ans_3 = "GCCA"; BasicGenerateConsensus gc_3({'A', 'A', 'C', 'G', 'C', 'T'}, //nodes {0, 1, 4, 5, 2, 3}, //sorted_graph {{}, {4, 5}, {}, {}, {1, 5}, {1, 4}}, //node_alignments {{1, 4, 5}, {2}, {3}, {}, {2}, {2}}, //outgoing_edges {3, 1, 3, 3, 1, 1}, //node_coverage_counts {{7, 6, 5}, {4}, {3}, {}, {2}, {1}}); test_cases.emplace_back(std::move(ans_3), std::move(gc_3)); /* * graph A — T — T — G — A * \_____________/ */ std::string ans_4 = "AGTTA"; BasicGenerateConsensus gc_4({'A', 'T', 'T', 'G', 'A'}, //nodes {0, 1, 2, 3, 4}, //sorted_graph {{}, {}, {}, {}, {}}, //node_alignments {{1, 4}, {2}, {3}, {4}, {}}, //outgoing_edges {2, 1, 1, 1, 2}, //node_coverage_counts {{5, 4}, {3}, {2}, {1}, {}}); test_cases.emplace_back(std::move(ans_4), std::move(gc_4)); /* * T — G * / \ * graph A — C — A — T — A * \ / * T */ std::string ans_5 = "ATTCA"; BasicGenerateConsensus gc_5({'A', 'T', 'G', 'T', 'A', 'C', 'A', 'T'}, //nodes {0, 1, 5, 2, 6, 7, 3, 4}, //sorted_graph {{}, {5}, {6, 7}, {}, {}, {1}, {2, 7}, {2, 6}}, //node_alignments {{1, 5}, {2}, {3}, {4}, {}, {6, 7}, {3}, {3}}, //outgoing_edges {3, 1, 1, 3, 3, 2, 1, 1}, //node_coverage_counts {{9, 8}, {7}, {6}, {5}, {}, {4, 3}, {2}, {1}}); test_cases.emplace_back(std::move(ans_5), std::move(gc_5)); //add more test cases below return test_cases; } // host function for calling the kernel to test topsort device function. std::string testGenerateConsensus(const BasicGenerateConsensus& obj) { //declare device buffer uint8_t* nodes = nullptr; int16_t* node_count = nullptr; int16_t* graph = nullptr; int16_t* node_id_to_pos = nullptr; int16_t* incoming_edges = nullptr; uint16_t* incoming_edge_count = nullptr; int16_t* outgoing_edges = nullptr; uint16_t* outgoing_edge_count = nullptr; uint16_t* incoming_edge_w = nullptr; uint16_t* node_coverage_counts = nullptr; int16_t* node_alignments = nullptr; uint16_t* node_alignment_count = nullptr; //buffers that don't need initialization int16_t* predecessors = nullptr; int32_t* scores = nullptr; uint8_t* consensus = nullptr; uint16_t* coverage = nullptr; //default data size limits BatchConfig batch_size; //allocate unified memory so they can be accessed by both host and device. GW_CU_CHECK_ERR(cudaMallocManaged(&nodes, batch_size.max_nodes_per_graph * sizeof(uint8_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&node_count, sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&graph, batch_size.max_nodes_per_graph * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&node_id_to_pos, batch_size.max_nodes_per_graph * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&incoming_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&incoming_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&outgoing_edges, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&outgoing_edge_count, batch_size.max_nodes_per_graph * sizeof(uint16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&incoming_edge_w, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_EDGES * sizeof(uint16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&node_coverage_counts, batch_size.max_nodes_per_graph * sizeof(uint16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&node_alignments, batch_size.max_nodes_per_graph * CUDAPOA_MAX_NODE_ALIGNMENTS * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&node_alignment_count, batch_size.max_nodes_per_graph * sizeof(uint16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&predecessors, batch_size.max_nodes_per_graph * sizeof(int16_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&scores, batch_size.max_nodes_per_graph * sizeof(int32_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&consensus, batch_size.max_consensus_size * sizeof(uint8_t))); GW_CU_CHECK_ERR(cudaMallocManaged(&coverage, batch_size.max_consensus_size * sizeof(uint16_t))); //initialize all 'count' buffers memset(incoming_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t)); memset(outgoing_edge_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t)); memset(node_coverage_counts, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t)); memset(node_alignment_count, 0, batch_size.max_nodes_per_graph * sizeof(uint16_t)); //calculate edge counts on host obj.get_graph_buffers(nodes, node_count, graph, node_id_to_pos, incoming_edges, incoming_edge_count, outgoing_edges, outgoing_edge_count, incoming_edge_w, node_coverage_counts, node_alignments, node_alignment_count); // call the host wrapper of topsort kernel generateConsensusTestHost(nodes, *node_count, graph, node_id_to_pos, incoming_edges, incoming_edge_count, outgoing_edges, outgoing_edge_count, incoming_edge_w, predecessors, scores, consensus, coverage, node_coverage_counts, node_alignments, node_alignment_count, batch_size.max_consensus_size); GW_CU_CHECK_ERR(cudaDeviceSynchronize()); //input and output buffers are the same ones in unified memory, so the results are updated in place //create and return a new BasicGraph object that encodes the resulting graph structure after adding the alignment std::string res((char*)consensus); GW_CU_CHECK_ERR(cudaFree(nodes)); GW_CU_CHECK_ERR(cudaFree(node_count)); GW_CU_CHECK_ERR(cudaFree(graph)); GW_CU_CHECK_ERR(cudaFree(node_id_to_pos)); GW_CU_CHECK_ERR(cudaFree(incoming_edges)); GW_CU_CHECK_ERR(cudaFree(incoming_edge_count)); GW_CU_CHECK_ERR(cudaFree(outgoing_edges)); GW_CU_CHECK_ERR(cudaFree(outgoing_edge_count)); GW_CU_CHECK_ERR(cudaFree(incoming_edge_w)); GW_CU_CHECK_ERR(cudaFree(node_coverage_counts)); GW_CU_CHECK_ERR(cudaFree(node_alignments)); GW_CU_CHECK_ERR(cudaFree(node_alignment_count)); GW_CU_CHECK_ERR(cudaFree(predecessors)); GW_CU_CHECK_ERR(cudaFree(scores)); GW_CU_CHECK_ERR(cudaFree(consensus)); GW_CU_CHECK_ERR(cudaFree(coverage)); return res; } using ::testing::TestWithParam; using ::testing::ValuesIn; class GenerateConsensusTest : public TestWithParam<GenerateConsensusTestPair> { public: void SetUp() {} std::string runGenerateConsensus(const BasicGenerateConsensus& obj) { return testGenerateConsensus(obj); } }; TEST_P(GenerateConsensusTest, TestGenerateConsensuesCorrectness) { const auto test_case = GetParam(); EXPECT_EQ(test_case.first, runGenerateConsensus(test_case.second)); } INSTANTIATE_TEST_SUITE_P(TestGenerateConsensus, GenerateConsensusTest, ValuesIn(getGenerateConsensusTestCases())); } // namespace cudapoa } // namespace genomeworks } // namespace claraparabricks
the_stack
* Note: * PME_Common. This is an experimental interface that is subject to change and/or deletion. */ #ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_COMMON_H_ #define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_PME_PME_COMMON_H_ #include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh" __constant__ float PME_Ma[4] = {1.0 / 6.0, -0.5, 0.5, -1.0 / 6.0}; __constant__ float PME_Mb[4] = {0, 0.5, -1, 0.5}; __constant__ float PME_Mc[4] = {0, 0.5, 0, -0.5}; __constant__ float PME_Md[4] = {0, 1.0 / 6.0, 4.0 / 6.0, 1.0 / 6.0}; __constant__ float PME_dMa[4] = {0.5, -1.5, 1.5, -0.5}; __constant__ float PME_dMb[4] = {0, 1, -2, 1}; __constant__ float PME_dMc[4] = {0, 0.5, 0, -0.5}; #define PI 3.1415926 const float periodic_factor_inverse = 2.3283064365387e-10; static dim3 thread_PME; const float cutoff = 10.0; const float tolerance = 0.00001; static float M_(float u, int n) { if (n == 2) { if (u > 2 || u < 0) return 0; return 1 - abs(u - 1); } else { return u / (n - 1) * M_(u, n - 1) + (n - u) / (n - 1) * M_(u - 1, n - 1); } } static float Get_Beta(float cutoff, float tolerance) { float beta, low, high, tempf; int ilow, ihigh; high = 1.0; ihigh = 1; while (1) { tempf = erfc(high * cutoff) / cutoff; if (tempf <= tolerance) break; high *= 2; ihigh++; } ihigh += 50; low = 0.0; for (ilow = 1; ilow < ihigh; ilow++) { beta = (low + high) / 2; tempf = erfc(beta * cutoff) / cutoff; if (tempf >= tolerance) low = beta; else high = beta; } return beta; } static cufftComplex expc(cufftComplex z) { cufftComplex res; float t = expf(z.x); sincosf(z.y, &res.y, &res.x); res.x *= t; res.y *= t; return res; } static float getb(int k, int NFFT, int B_order) { cufftComplex tempc, tempc2, res; float tempf; tempc2.x = 0; tempc2.y = 0; tempc.x = 0; tempc.y = 2 * (B_order - 1) * PI * k / NFFT; res = expc(tempc); for (int kk = 0; kk < (B_order - 1); kk++) { tempc.x = 0; tempc.y = 2 * PI * k / NFFT * kk; tempc = expc(tempc); tempf = M_(kk + 1, B_order); tempc2.x += tempf * tempc.x; tempc2.y += tempf * tempc.y; } res = cuCdivf(res, tempc2); return res.x * res.x + res.y * res.y; } __global__ static void device_add(float *ene, float *factor, float *charge_sum) { ene[0] += factor[0] * charge_sum[0] * charge_sum[0]; } __global__ static void PME_Atom_Near(const UNSIGNED_INT_VECTOR *uint_crd, int *PME_atom_near, const int PME_Nin, const float periodic_factor_inverse_x, const float periodic_factor_inverse_y, const float periodic_factor_inverse_z, const int atom_numbers, const int fftx, const int ffty, const int fftz, const UNSIGNED_INT_VECTOR *PME_kxyz, UNSIGNED_INT_VECTOR *PME_uxyz, VECTOR *PME_frxyz) { int atom = blockDim.x * blockIdx.x + threadIdx.x; if (atom < atom_numbers) { UNSIGNED_INT_VECTOR *temp_uxyz = &PME_uxyz[atom]; int k, tempux, tempuy, tempuz; float tempf; tempf = static_cast<float>(uint_crd[atom].uint_x) * periodic_factor_inverse_x; tempux = static_cast<int>(tempf); PME_frxyz[atom].x = tempf - tempux; tempf = static_cast<float>(uint_crd[atom].uint_y) * periodic_factor_inverse_y; tempuy = static_cast<int>(tempf); PME_frxyz[atom].y = tempf - tempuy; tempf = static_cast<float>(uint_crd[atom].uint_z) * periodic_factor_inverse_z; tempuz = static_cast<int>(tempf); PME_frxyz[atom].z = tempf - tempuz; if (tempux != (*temp_uxyz).uint_x || tempuy != (*temp_uxyz).uint_y || tempuz != (*temp_uxyz).uint_z) { (*temp_uxyz).uint_x = tempux; (*temp_uxyz).uint_y = tempuy; (*temp_uxyz).uint_z = tempuz; int *temp_near = PME_atom_near + atom * 64; int kx, ky, kz; for (k = 0; k < 64; k++) { UNSIGNED_INT_VECTOR temp_kxyz = PME_kxyz[k]; kx = tempux - temp_kxyz.uint_x; if (kx < 0) kx += fftx; if (kx > fftx) kx -= fftx; ky = tempuy - temp_kxyz.uint_y; if (ky < 0) ky += ffty; if (ky > ffty) ky -= ffty; kz = tempuz - temp_kxyz.uint_z; if (kz < 0) kz += fftz; if (kz > fftz) kz -= fftz; temp_near[k] = kx * PME_Nin + ky * fftz + kz; } } } } __global__ static void PME_Q_Spread(int *PME_atom_near, const float *charge, const VECTOR *PME_frxyz, float *PME_Q, const UNSIGNED_INT_VECTOR *PME_kxyz, const int atom_numbers) { int atom = blockDim.x * blockIdx.x + threadIdx.x; if (atom < atom_numbers) { int k; float tempf, tempQ, tempf2; int *temp_near = PME_atom_near + atom * 64; VECTOR temp_frxyz = PME_frxyz[atom]; float tempcharge = charge[atom]; UNSIGNED_INT_VECTOR temp_kxyz; unsigned int kx; for (k = threadIdx.y; k < 64; k = k + blockDim.y) { temp_kxyz = PME_kxyz[k]; kx = temp_kxyz.uint_x; tempf = (temp_frxyz.x); tempf2 = tempf * tempf; tempf = PME_Ma[kx] * tempf * tempf2 + PME_Mb[kx] * tempf2 + PME_Mc[kx] * tempf + PME_Md[kx]; tempQ = tempcharge * tempf; kx = temp_kxyz.uint_y; tempf = (temp_frxyz.y); tempf2 = tempf * tempf; tempf = PME_Ma[kx] * tempf * tempf2 + PME_Mb[kx] * tempf2 + PME_Mc[kx] * tempf + PME_Md[kx]; tempQ = tempQ * tempf; kx = temp_kxyz.uint_z; tempf = (temp_frxyz.z); tempf2 = tempf * tempf; tempf = PME_Ma[kx] * tempf * tempf2 + PME_Mb[kx] * tempf2 + PME_Mc[kx] * tempf + PME_Md[kx]; tempQ = tempQ * tempf; atomicAdd(&PME_Q[temp_near[k]], tempQ); } } } __global__ static void PME_Direct_Energy(const int atom_numbers, const NEIGHBOR_LIST *nl, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *boxlength, const float *charge, const float beta, const float cutoff_square, float *direct_ene) { int atom_i = blockDim.x * blockIdx.x + threadIdx.x; if (atom_i < atom_numbers) { NEIGHBOR_LIST nl_i = nl[atom_i]; int N = nl_i.atom_numbers; int atom_j; int int_x; int int_y; int int_z; UNSIGNED_INT_VECTOR r1 = uint_crd[atom_i], r2; VECTOR dr; float dr2; float dr_abs; // float dr_inverse; float ene_temp; float charge_i = charge[atom_i]; float ene_lin = 0.; // int x, y; // int atom_pair_LJ_type; for (int j = threadIdx.y; j < N; j = j + blockDim.y) { atom_j = nl_i.atom_serial[j]; r2 = uint_crd[atom_j]; int_x = r2.uint_x - r1.uint_x; int_y = r2.uint_y - r1.uint_y; int_z = r2.uint_z - r1.uint_z; dr.x = boxlength[0].x * int_x; dr.y = boxlength[0].y * int_y; dr.z = boxlength[0].z * int_z; dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; if (dr2 < cutoff_square) { dr_abs = norm3df(dr.x, dr.y, dr.z); ene_temp = charge_i * charge[atom_j] * erfcf(beta * dr_abs) / dr_abs; ene_lin = ene_lin + ene_temp; } } atomicAdd(direct_ene, ene_lin); } } __global__ static void PME_Direct_Atom_Energy(const int atom_numbers, const NEIGHBOR_LIST *nl, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *boxlength, const float *charge, const float beta, const float cutoff_square, float *direct_ene) { int atom_i = blockDim.x * blockIdx.x + threadIdx.x; if (atom_i < atom_numbers) { NEIGHBOR_LIST nl_i = nl[atom_i]; int N = nl_i.atom_numbers; int atom_j; int int_x; int int_y; int int_z; UNSIGNED_INT_VECTOR r1 = uint_crd[atom_i], r2; VECTOR dr; float dr2; float dr_abs; // float dr_inverse; float ene_temp; float charge_i = charge[atom_i]; float ene_lin = 0.; for (int j = threadIdx.y; j < N; j = j + blockDim.y) { atom_j = nl_i.atom_serial[j]; r2 = uint_crd[atom_j]; int_x = r2.uint_x - r1.uint_x; int_y = r2.uint_y - r1.uint_y; int_z = r2.uint_z - r1.uint_z; dr.x = boxlength[0].x * int_x; dr.y = boxlength[0].y * int_y; dr.z = boxlength[0].z * int_z; dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; if (dr2 < cutoff_square) { dr_abs = norm3df(dr.x, dr.y, dr.z); ene_temp = charge_i * charge[atom_j] * erfcf(beta * dr_abs) / dr_abs; ene_lin = ene_lin + ene_temp; } } atomicAdd(&direct_ene[atom_i], ene_lin); } } __global__ static void PME_Energy_Product(const int element_number, const float *list1, const float *list2, float *sum) { if (threadIdx.x == 0) { sum[0] = 0.; } __syncthreads(); float lin = 0.0; for (int i = threadIdx.x; i < element_number; i = i + blockDim.x) { lin = lin + list1[i] * list2[i]; } atomicAdd(sum, lin); } __global__ static void PME_BCFQ(cufftComplex *PME_FQ, float *PME_BC, int PME_Nfft) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < PME_Nfft) { float tempf = PME_BC[index]; cufftComplex tempc = PME_FQ[index]; PME_FQ[index].x = tempc.x * tempf; PME_FQ[index].y = tempc.y * tempf; } } __global__ static void PME_Excluded_Energy_Correction(const int atom_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *sacler, const float *charge, const float pme_beta, const float sqrt_pi, const int *excluded_list_start, const int *excluded_list, const int *excluded_atom_numbers, float *ene) { int atom_i = blockDim.x * blockIdx.x + threadIdx.x; if (atom_i < atom_numbers) { int excluded_number = excluded_atom_numbers[atom_i]; if (excluded_number > 0) { int list_start = excluded_list_start[atom_i]; // int atom_min = excluded_list[list_start]; int list_end = list_start + excluded_number; int atom_j; int int_x; int int_y; int int_z; float charge_i = charge[atom_i]; float charge_j; float dr_abs; float beta_dr; UNSIGNED_INT_VECTOR r1 = uint_crd[atom_i], r2; VECTOR dr; float dr2; float ene_lin = 0.; for (int i = list_start; i < list_end; i = i + 1) { atom_j = excluded_list[i]; r2 = uint_crd[atom_j]; charge_j = charge[atom_j]; int_x = r2.uint_x - r1.uint_x; int_y = r2.uint_y - r1.uint_y; int_z = r2.uint_z - r1.uint_z; dr.x = sacler[0].x * int_x; dr.y = sacler[0].y * int_y; dr.z = sacler[0].z * int_z; dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; dr_abs = sqrtf(dr2); beta_dr = pme_beta * dr_abs; ene_lin -= charge_i * charge_j * erff(beta_dr) / dr_abs; } atomicAdd(ene, ene_lin); } } } #endif
the_stack
#include "boolean3.cuh" #include "impl.cuh" namespace { using namespace manifold; using namespace thrust::placeholders; struct MakeTri { const Halfedge* halfedges; __host__ __device__ void operator()(thrust::tuple<glm::ivec3&, int> inOut) { glm::ivec3& tri = thrust::get<0>(inOut); const int face = 3 * thrust::get<1>(inOut); for (int i : {0, 1, 2}) { tri[i] = halfedges[face + i].startVert; } } }; struct GetMeshID { __host__ __device__ void operator()(thrust::tuple<int&, BaryRef> inOut) { thrust::get<0>(inOut) = thrust::get<1>(inOut).meshID; } }; Manifold Halfspace(Box bBox, glm::vec3 normal, float originOffset) { normal = glm::normalize(normal); Manifold cutter = Manifold::Cube(glm::vec3(2.0f), true).Translate({1.0f, 0.0f, 0.0f}); float size = glm::length(bBox.Center() - normal * originOffset) + 0.5f * glm::length(bBox.Size()); cutter.Scale(glm::vec3(size)).Translate({originOffset, 0.0f, 0.0f}); float yDeg = glm::degrees(-glm::asin(normal.z)); float zDeg = glm::degrees(glm::atan(normal.y, normal.x)); return cutter.Rotate(0.0f, yDeg, zDeg); } } // namespace namespace manifold { Manifold::Manifold() : pImpl_{std::make_unique<Impl>()} {} Manifold::Manifold(const Mesh& mesh) : pImpl_{std::make_unique<Impl>(mesh)} {} Manifold::~Manifold() = default; Manifold::Manifold(Manifold&&) noexcept = default; Manifold& Manifold::operator=(Manifold&&) noexcept = default; Manifold::Manifold(const Manifold& other) : pImpl_(new Impl(*other.pImpl_)) { pImpl_->DuplicateMeshIDs(); } Manifold& Manifold::operator=(const Manifold& other) { if (this != &other) { pImpl_.reset(new Impl(*other.pImpl_)); pImpl_->DuplicateMeshIDs(); } return *this; } /** * This returns a Mesh of simple vectors of vertices and triangles suitable for * saving or other operations outside of the context of this library. */ Mesh Manifold::GetMesh() const { pImpl_->ApplyTransform(); Mesh result; result.vertPos.insert(result.vertPos.end(), pImpl_->vertPos_.begin(), pImpl_->vertPos_.end()); result.vertNormal.insert(result.vertNormal.end(), pImpl_->vertNormal_.begin(), pImpl_->vertNormal_.end()); result.halfedgeTangent.insert(result.halfedgeTangent.end(), pImpl_->halfedgeTangent_.begin(), pImpl_->halfedgeTangent_.end()); result.triVerts.resize(NumTri()); thrust::for_each_n(zip(result.triVerts.begin(), countAt(0)), NumTri(), MakeTri({pImpl_->halfedge_.cptrH()})); return result; } /** * These static properties control how circular shapes are quantized by default * on construction. If circularSegments is specified, it takes precedence. If it * is zero, then instead the minimum is used of the segments calculated based on * edge length and angle, rounded up to the nearest multiple of four. To get * numbers not divisible by four, circularSegements must be specified. */ int Manifold::circularSegments_ = 0; float Manifold::circularAngle_ = 10.0f; float Manifold::circularEdgeLength_ = 1.0f; void Manifold::SetMinCircularAngle(float angle) { ALWAYS_ASSERT(angle > 0.0f, userErr, "angle must be positive!"); Manifold::circularAngle_ = angle; } void Manifold::SetMinCircularEdgeLength(float length) { ALWAYS_ASSERT(length > 0.0f, userErr, "length must be positive!"); Manifold::circularEdgeLength_ = length; } void Manifold::SetCircularSegments(int number) { ALWAYS_ASSERT(number > 2 || number == 0, userErr, "must have at least three segments in circle!"); Manifold::circularSegments_ = number; } int Manifold::GetCircularSegments(float radius) { if (Manifold::circularSegments_ > 0) return Manifold::circularSegments_; int nSegA = 360.0f / Manifold::circularAngle_; int nSegL = 2.0f * radius * glm::pi<float>() / Manifold::circularEdgeLength_; int nSeg = min(nSegA, nSegL) + 3; nSeg -= nSeg % 4; return nSeg; } bool Manifold::IsEmpty() const { return pImpl_->IsEmpty(); } int Manifold::NumVert() const { return pImpl_->NumVert(); } int Manifold::NumEdge() const { return pImpl_->NumEdge(); } int Manifold::NumTri() const { return pImpl_->NumTri(); } Box Manifold::BoundingBox() const { return pImpl_->bBox_.Transform(pImpl_->transform_); } float Manifold::Precision() const { pImpl_->ApplyTransform(); return pImpl_->precision_; } /** * The genus is a topological property of the manifold, representing the number * of "handles". A sphere is 0, torus 1, etc. It is only meaningful for a single * mesh, so it is best to call Decompose() first. */ int Manifold::Genus() const { int chi = NumVert() - NumEdge() + NumTri(); return 1 - chi / 2; } /** * Returns the surface area and volume of the manifold in a Properties * structure. These properties are clamped to zero for a given face if they are * within rounding tolerance. This means degenerate manifolds can by identified * by testing these properties as == 0. */ Properties Manifold::GetProperties() const { return pImpl_->GetProperties(); } /** * Curvature is the inverse of the radius of curvature, and signed such that * positive is convex and negative is concave. There are two orthogonal * principal curvatures at any point on a manifold, with one maximum and the * other minimum. Gaussian curvature is their product, while mean * curvature is their sum. This approximates them for every vertex (returned as * vectors in the structure) and also returns their minimum and maximum values. */ Curvature Manifold::GetCurvature() const { return pImpl_->GetCurvature(); } /** * Gets the relationship to the previous mesh, for the purpose of assinging * properties like texture coordinates. The triBary vector is the same length as * Mesh.triVerts and BaryRef.face gives a unique identifier of the original mesh * face to which this triangle belongs. BaryRef.verts gives the three original * mesh vertex indices to which its barycentric coordinates refer. * BaryRef.vertBary gives an index for each vertex into the barycentric vector * if that vertex is >= 0, indicating it is a new vertex. If the index is < 0, * this indicates it is an original vertex of the triangle, found as the * corresponding element of BaryRef.verts. */ MeshRelation Manifold::GetMeshRelation() const { MeshRelation out; const auto& relation = pImpl_->meshRelation_; out.triBary.insert(out.triBary.end(), relation.triBary.begin(), relation.triBary.end()); out.barycentric.insert(out.barycentric.end(), relation.barycentric.begin(), relation.barycentric.end()); return out; } /** * Returns a vector of unique meshIDs that are referenced by this manifold's * meshRelation. If this manifold has been newly constructed then there will * only be a single meshID, which can be associated with the input mesh for * future reference. */ std::vector<int> Manifold::GetMeshIDs() const { VecDH<int> meshIDs(NumTri()); thrust::for_each_n( zip(meshIDs.beginD(), pImpl_->meshRelation_.triBary.beginD()), NumTri(), GetMeshID()); thrust::sort(meshIDs.beginD(), meshIDs.endD()); int n = thrust::unique(meshIDs.beginD(), meshIDs.endD()) - meshIDs.beginD(); meshIDs.resize(n); std::vector<int> out; out.insert(out.end(), meshIDs.begin(), meshIDs.end()); return out; } /** * If you copy a manifold, but you want this new copy to have new properties * (e.g. a different UV mapping), you can reset its meshID as an original, * meaning it will now be referenced by its descendents instead of the mesh it * was copied from, allowing you to differentiate the copies when applying your * properties to the final result. Its new meshID is returned. */ int Manifold::SetAsOriginal(bool mergeCoplanarRelations) { int meshID = pImpl_->InitializeNewReference(); if (mergeCoplanarRelations) pImpl_->MergeCoplanarRelations(); return meshID; } std::vector<int> Manifold::MeshID2Original() { return Manifold::Impl::meshID2Original_; } bool Manifold::IsManifold() const { return pImpl_->IsManifold(); } bool Manifold::MatchesTriNormals() const { return pImpl_->MatchesTriNormals(); } int Manifold::NumDegenerateTris() const { return pImpl_->NumDegenerateTris(); } Manifold& Manifold::Translate(glm::vec3 v) { pImpl_->transform_[3] += v; return *this; } Manifold& Manifold::Scale(glm::vec3 v) { glm::mat3 s(1.0f); for (int i : {0, 1, 2}) s[i] *= v; pImpl_->transform_ = s * pImpl_->transform_; return *this; } /** * Applys an Euler angle rotation to the manifold, first about the X axis, then * Y, then Z, in degrees. We use degrees so that we can minimize rounding error, * and elimiate it completely for any multiples of 90 degrees. Addtionally, more * efficient code paths are used to update the manifold when the transforms only * rotate by multiples of 90 degrees. */ Manifold& Manifold::Rotate(float xDegrees, float yDegrees, float zDegrees) { glm::mat3 rX(1.0f, 0.0f, 0.0f, // 0.0f, cosd(xDegrees), sind(xDegrees), // 0.0f, -sind(xDegrees), cosd(xDegrees)); glm::mat3 rY(cosd(yDegrees), 0.0f, -sind(yDegrees), // 0.0f, 1.0f, 0.0f, // sind(yDegrees), 0.0f, cosd(yDegrees)); glm::mat3 rZ(cosd(zDegrees), sind(zDegrees), 0.0f, // -sind(zDegrees), cosd(zDegrees), 0.0f, // 0.0f, 0.0f, 1.0f); pImpl_->transform_ = rZ * rY * rX * pImpl_->transform_; return *this; } Manifold& Manifold::Transform(const glm::mat4x3& m) { glm::mat4 old(pImpl_->transform_); pImpl_->transform_ = m * old; return *this; } /** * This function does not change the topology, but allows the vertices to be * moved according to any arbitrary input function. It is easy to create a * function that warps a geometrically valid object into one with is not, but * that is not checked here, so it is up to the user to choose their function * with discretion. */ Manifold& Manifold::Warp(std::function<void(glm::vec3&)> warpFunc) { pImpl_->ApplyTransform(); thrust::for_each_n(pImpl_->vertPos_.begin(), NumVert(), warpFunc); pImpl_->Update(); pImpl_->faceNormal_.resize(0); // force recalculation of triNormal pImpl_->CalculateNormals(); pImpl_->SetPrecision(); return *this; } Manifold& Manifold::Refine(int n) { pImpl_->Refine(n); return *this; } /** * This is a checksum-style verification of the collider, simply returning the * total number of edge-face bounding box overlaps between this and other. */ int Manifold::NumOverlaps(const Manifold& other) const { pImpl_->ApplyTransform(); other.pImpl_->ApplyTransform(); SparseIndices overlaps = pImpl_->EdgeCollisions(*other.pImpl_); int num_overlaps = overlaps.size(); overlaps = other.pImpl_->EdgeCollisions(*pImpl_); return num_overlaps += overlaps.size(); } Manifold Manifold::Boolean(const Manifold& second, OpType op) const { pImpl_->ApplyTransform(); second.pImpl_->ApplyTransform(); Boolean3 boolean(*pImpl_, *second.pImpl_, op); Manifold result; result.pImpl_ = std::make_unique<Impl>(boolean.Result(op)); return result; } Manifold Manifold::operator+(const Manifold& Q) const { return Boolean(Q, OpType::ADD); } Manifold& Manifold::operator+=(const Manifold& Q) { *this = *this + Q; return *this; } Manifold Manifold::operator-(const Manifold& Q) const { return Boolean(Q, OpType::SUBTRACT); } Manifold& Manifold::operator-=(const Manifold& Q) { *this = *this - Q; return *this; } Manifold Manifold::operator^(const Manifold& Q) const { return Boolean(Q, OpType::INTERSECT); } Manifold& Manifold::operator^=(const Manifold& Q) { *this = *this ^ Q; return *this; } /** * Split cuts this manifold in two using the input manifold. The first result is * the intersection, second is the difference. This is more efficient than doing * them separately. */ std::pair<Manifold, Manifold> Manifold::Split(const Manifold& cutter) const { pImpl_->ApplyTransform(); cutter.pImpl_->ApplyTransform(); Boolean3 boolean(*pImpl_, *cutter.pImpl_, OpType::SUBTRACT); std::pair<Manifold, Manifold> result; result.first.pImpl_ = std::make_unique<Impl>(boolean.Result(OpType::INTERSECT)); result.second.pImpl_ = std::make_unique<Impl>(boolean.Result(OpType::SUBTRACT)); return result; } /** * Convient version of Split for a half-space. The first result is in the * direction of the normal, second is opposite. Origin offset is the distance of * the plane from the origin in the direction of the normal vector. The length * of the normal is not important, as it is normalized internally. */ std::pair<Manifold, Manifold> Manifold::SplitByPlane(glm::vec3 normal, float originOffset) const { return Split(Halfspace(BoundingBox(), normal, originOffset)); } /** * Identical to SplitbyPlane, but calculating and returning only the first * result. */ Manifold Manifold::TrimByPlane(glm::vec3 normal, float originOffset) const { pImpl_->ApplyTransform(); return *this ^ Halfspace(BoundingBox(), normal, originOffset); } } // namespace manifold
the_stack
// ----------------------------------------------------------------------------------------- // NVEnc by rigaya // ----------------------------------------------------------------------------------------- // // The MIT License // // Copyright (c) 2014-2016 rigaya // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // // ------------------------------------------------------------------------------------------ #include <map> #include <array> #include <iostream> #include <fstream> #include <algorithm> #include <numeric> #define _USE_MATH_DEFINES #include <cmath> #include "convert_csp.h" #include "NVEncFilterTransform.h" #include "NVEncParam.h" #pragma warning (push) #pragma warning (disable: 4819) #include "cuda_runtime.h" #include "device_launch_parameters.h" #pragma warning (pop) #include "rgy_cuda_util_kernel.h" static const int TRASNPOSE_BLOCK_DIM = 16; static const int TRASNPOSE_TILE_DIM = 64; static const int FLIP_BLOCK_DIM = 16; template<typename TypePixel4, bool flipX, bool flipY> __global__ void kernel_transpose_plane( uint8_t *__restrict__ pDst, const int dstPitch, const int dstWidth, // = srcHeight const int dstHeight, // = srcWidth const uint8_t *__restrict__ pSrc, const int srcPitch ) { __shared__ decltype(TypePixel4::x) stemp[TRASNPOSE_TILE_DIM][TRASNPOSE_TILE_DIM + 4]; const int srcHeight = dstWidth; const int srcWidth = dstHeight; const int dstBlockX = blockIdx.x; const int dstBlockY = blockIdx.y; const int srcBlockX = (flipX) ? gridDim.y - 1 - blockIdx.y : blockIdx.y; const int srcBlockY = (flipY) ? gridDim.x - 1 - blockIdx.x : blockIdx.x; const int offsetX = (flipX) ? srcWidth - ALIGN(srcWidth, TRASNPOSE_TILE_DIM) : 0; const int offsetY = (flipY) ? srcHeight - ALIGN(srcHeight, TRASNPOSE_TILE_DIM) : 0; { for (int j = threadIdx.y; j < TRASNPOSE_TILE_DIM; j += TRASNPOSE_BLOCK_DIM) { const int srcX = srcBlockX * TRASNPOSE_TILE_DIM + threadIdx.x * 4 + offsetX; const int srcY = srcBlockY * TRASNPOSE_TILE_DIM + j + offsetY; TypePixel4 val = { 128, 128, 128, 128 }; if (srcX < srcWidth && srcY < srcHeight) { TypePixel4 *ptr_src = (TypePixel4 *)(pSrc + srcY * srcPitch + srcX * sizeof(TypePixel4::x)); if ((offsetX & 3) == 0) { val = ptr_src[0]; } else { decltype(TypePixel4::x) *ptr_src_elem = (decltype(TypePixel4::x) *)ptr_src; val.x = ptr_src_elem[0]; val.y = ptr_src_elem[1]; val.z = ptr_src_elem[2]; val.w = ptr_src_elem[3]; } } *(TypePixel4 *)&stemp[j][threadIdx.x * 4] = val; } } __syncthreads(); { for (int j = threadIdx.y; j < TRASNPOSE_TILE_DIM; j += TRASNPOSE_BLOCK_DIM) { const int dstX = dstBlockX * TRASNPOSE_TILE_DIM + threadIdx.x * 4; const int dstY = dstBlockY * TRASNPOSE_TILE_DIM + j; const int tmpY = (flipX) ? TRASNPOSE_TILE_DIM - 1 - j : j; if (dstX < dstWidth && dstY < dstHeight) { TypePixel4 val = { 0, 0, 0, 0 }; if (flipY) { val.x = stemp[TRASNPOSE_TILE_DIM - (threadIdx.x+1) * 4 + 3][tmpY]; val.y = stemp[TRASNPOSE_TILE_DIM - (threadIdx.x+1) * 4 + 2][tmpY]; val.z = stemp[TRASNPOSE_TILE_DIM - (threadIdx.x+1) * 4 + 1][tmpY]; val.w = stemp[TRASNPOSE_TILE_DIM - (threadIdx.x+1) * 4 + 0][tmpY]; } else { val.x = stemp[threadIdx.x * 4 + 0][tmpY]; val.y = stemp[threadIdx.x * 4 + 1][tmpY]; val.z = stemp[threadIdx.x * 4 + 2][tmpY]; val.w = stemp[threadIdx.x * 4 + 3][tmpY]; } TypePixel4 *ptr_dst = (TypePixel4 *)(pDst + dstY * dstPitch + dstX * sizeof(TypePixel4::x)); *ptr_dst = val; } } } }; template<typename TypePixel4, bool flipX, bool flipY> cudaError_t transpose_plane( RGYFrameInfo *pOutputFrame, const RGYFrameInfo *pInputFrame, cudaStream_t stream ) { dim3 blockSize(TRASNPOSE_BLOCK_DIM, TRASNPOSE_BLOCK_DIM); dim3 gridSize( divCeil(pOutputFrame->width, TRASNPOSE_TILE_DIM), divCeil(pOutputFrame->height, TRASNPOSE_TILE_DIM)); kernel_transpose_plane<TypePixel4, flipX, flipY><<<gridSize, blockSize, 0, stream>>>( (uint8_t *)pOutputFrame->ptr, pOutputFrame->pitch, pOutputFrame->width, // = srcHeight pOutputFrame->height, // = srcWidth (const uint8_t *)pInputFrame->ptr, pInputFrame->pitch); auto cudaerr = cudaGetLastError(); if (cudaerr != cudaSuccess) { return cudaerr; } return cudaerr; } template<typename TypePixel4, bool flipX, bool flipY> __global__ void kernel_flip_plane( uint8_t *__restrict__ pDst, const int dstPitch, const int dstWidth, const int dstHeight, const uint8_t *__restrict__ pSrc, const int srcPitch ) { __shared__ decltype(TypePixel4::x) stemp[FLIP_BLOCK_DIM][FLIP_BLOCK_DIM*4]; const int dstBlockX = blockIdx.x; const int dstBlockY = blockIdx.y; const int srcBlockX = (flipX) ? gridDim.x - 1 - blockIdx.x : blockIdx.x; const int srcBlockY = (flipY) ? gridDim.y - 1 - blockIdx.y : blockIdx.y; const int offsetX = (flipX) ? dstWidth - ALIGN(dstWidth, FLIP_BLOCK_DIM*4) : 0; const int offsetY = (flipY) ? dstHeight - ALIGN(dstHeight, FLIP_BLOCK_DIM) : 0; const int srcX = (srcBlockX * FLIP_BLOCK_DIM + threadIdx.x) * 4 + offsetX; const int srcY = srcBlockY * FLIP_BLOCK_DIM + threadIdx.y + offsetY; TypePixel4 val = { 128, 128, 128, 128 }; if (srcX < dstWidth && srcY < dstHeight) { TypePixel4 *ptr_src = (TypePixel4 *)(pSrc + srcY * srcPitch + srcX * sizeof(TypePixel4::x)); if ((offsetX & 3) == 0) { val = ptr_src[0]; } else { decltype(TypePixel4::x) *ptr_src_elem = (decltype(TypePixel4::x) *)ptr_src; val.x = ptr_src_elem[0]; val.y = ptr_src_elem[1]; val.z = ptr_src_elem[2]; val.w = ptr_src_elem[3]; } } *(TypePixel4 *)&stemp[threadIdx.y][threadIdx.x * 4] = val; __syncthreads(); const int dstX = (dstBlockX * FLIP_BLOCK_DIM + threadIdx.x) * 4; const int dstY = dstBlockY * FLIP_BLOCK_DIM + threadIdx.y; const int tmpY = (flipY) ? FLIP_BLOCK_DIM - 1 - threadIdx.y : threadIdx.y; val = *(TypePixel4 *)&stemp[tmpY][threadIdx.x * 4]; if (flipX) { TypePixel4 val2 = *(TypePixel4 *)&stemp[tmpY][FLIP_BLOCK_DIM * 4 - (threadIdx.x + 1) * 4]; val.x = val2.w; val.y = val2.z; val.z = val2.y; val.w = val2.x; } else { val = *(TypePixel4 *)&stemp[tmpY][threadIdx.x * 4]; } if (dstX < dstWidth && dstY < dstHeight) { TypePixel4 *ptr_dst = (TypePixel4 *)(pDst + dstY * dstPitch + dstX * sizeof(TypePixel4::x)); *ptr_dst = val; } }; template<typename TypePixel4, bool flipX, bool flipY> cudaError_t flip_plane( RGYFrameInfo *pOutputFrame, const RGYFrameInfo *pInputFrame, cudaStream_t stream ) { dim3 blockSize(FLIP_BLOCK_DIM, FLIP_BLOCK_DIM); dim3 gridSize( divCeil(pOutputFrame->width, FLIP_BLOCK_DIM*4), divCeil(pOutputFrame->height, FLIP_BLOCK_DIM)); kernel_flip_plane<TypePixel4, flipX, flipY> << <gridSize, blockSize, 0, stream >> > ( (uint8_t *)pOutputFrame->ptr, pOutputFrame->pitch, pOutputFrame->width, pOutputFrame->height, (const uint8_t *)pInputFrame->ptr, pInputFrame->pitch); auto cudaerr = cudaGetLastError(); if (cudaerr != cudaSuccess) { return cudaerr; } return cudaerr; } template<typename TypePixel4> cudaError_t transform_plane( RGYFrameInfo *pOutputPlane, const RGYFrameInfo *pInputPlane, const std::shared_ptr<NVEncFilterParamTransform> pParam, cudaStream_t stream ) { if (pParam->trans.transpose) { if (pParam->trans.flipX && pParam->trans.flipY) { return transpose_plane<TypePixel4, true, true>(pOutputPlane, pInputPlane, stream); } else if (pParam->trans.flipX) { return transpose_plane<TypePixel4, true, false>(pOutputPlane, pInputPlane, stream); } else if (pParam->trans.flipY) { return transpose_plane<TypePixel4, false, true>(pOutputPlane, pInputPlane, stream); } else { return transpose_plane<TypePixel4, false, false>(pOutputPlane, pInputPlane, stream); } } else { if (pParam->trans.flipX && pParam->trans.flipY) { return flip_plane<TypePixel4, true, true>(pOutputPlane, pInputPlane, stream); } else if (pParam->trans.flipX) { return flip_plane<TypePixel4, true, false>(pOutputPlane, pInputPlane, stream); } else if (pParam->trans.flipY) { return flip_plane<TypePixel4, false, true>(pOutputPlane, pInputPlane, stream); } else { return flip_plane<TypePixel4, false, false>(pOutputPlane, pInputPlane, stream); } } } template<typename TypePixel4> cudaError_t transform_frame(RGYFrameInfo *pOutputFrame, const RGYFrameInfo *pInputFrame, const std::shared_ptr<NVEncFilterParamTransform> pParam, cudaStream_t stream ) { cudaError_t cudaerr = cudaSuccess; const auto planeInputY = getPlane(pInputFrame, RGY_PLANE_Y); const auto planeInputU = getPlane(pInputFrame, RGY_PLANE_U); const auto planeInputV = getPlane(pInputFrame, RGY_PLANE_V); auto planeOutputY = getPlane(pOutputFrame, RGY_PLANE_Y); auto planeOutputU = getPlane(pOutputFrame, RGY_PLANE_U); auto planeOutputV = getPlane(pOutputFrame, RGY_PLANE_V); cudaerr = transform_plane<TypePixel4>(&planeOutputY, &planeInputY, pParam, stream); if (cudaerr != cudaSuccess) { return cudaerr; } cudaerr = transform_plane<TypePixel4>(&planeOutputU, &planeInputU, pParam, stream); if (cudaerr != cudaSuccess) { return cudaerr; } cudaerr = transform_plane<TypePixel4>(&planeOutputV, &planeInputV, pParam, stream); if (cudaerr != cudaSuccess) { return cudaerr; } return cudaerr; } NVEncFilterTransform::NVEncFilterTransform() : m_weight0(), m_weight1() { m_sFilterName = _T("transform"); } NVEncFilterTransform::~NVEncFilterTransform() { close(); } RGY_ERR NVEncFilterTransform::checkParam(const std::shared_ptr<NVEncFilterParamTransform> pNnediParam) { if (pNnediParam->frameOut.height <= 0 || pNnediParam->frameOut.width <= 0) { AddMessage(RGY_LOG_ERROR, _T("Invalid frame size.\n")); return RGY_ERR_INVALID_PARAM; } return RGY_ERR_NONE; } RGY_ERR NVEncFilterTransform::init(shared_ptr<NVEncFilterParam> pParam, shared_ptr<RGYLog> pPrintMes) { RGY_ERR sts = RGY_ERR_NONE; m_pPrintMes = pPrintMes; auto prm = std::dynamic_pointer_cast<NVEncFilterParamTransform>(pParam); if (!prm) { AddMessage(RGY_LOG_ERROR, _T("Invalid parameter type.\n")); return RGY_ERR_INVALID_PARAM; } //パラメータチェック if ((sts = checkParam(prm)) != RGY_ERR_NONE) { return sts; } if (prm->trans.transpose) { prm->frameOut.width = prm->frameIn.height; prm->frameOut.height = prm->frameIn.width; } auto cudaerr = AllocFrameBuf(prm->frameOut, 1); if (cudaerr != cudaSuccess) { AddMessage(RGY_LOG_ERROR, _T("failed to allocate memory: %s.\n"), char_to_tstring(cudaGetErrorName(cudaerr)).c_str()); return RGY_ERR_MEMORY_ALLOC; } prm->frameOut.pitch = m_pFrameBuf[0]->frame.pitch; setFilterInfo(pParam->print()); m_pParam = pParam; return sts; } tstring NVEncFilterParamTransform::print() const { return trans.print(); } RGY_ERR NVEncFilterTransform::run_filter(const RGYFrameInfo *pInputFrame, RGYFrameInfo **ppOutputFrames, int *pOutputFrameNum, cudaStream_t stream) { RGY_ERR sts = RGY_ERR_NONE; if (pInputFrame->ptr == nullptr) { return sts; } auto prm = std::dynamic_pointer_cast<NVEncFilterParamTransform>(m_pParam); if (!prm) { AddMessage(RGY_LOG_ERROR, _T("Invalid parameter type.\n")); return RGY_ERR_INVALID_PARAM; } *pOutputFrameNum = 1; if (ppOutputFrames[0] == nullptr) { auto pOutFrame = m_pFrameBuf[m_nFrameIdx].get(); ppOutputFrames[0] = &pOutFrame->frame; ppOutputFrames[0]->picstruct = pInputFrame->picstruct; m_nFrameIdx = (m_nFrameIdx + 1) % m_pFrameBuf.size(); } const auto memcpyKind = getCudaMemcpyKind(pInputFrame->deivce_mem, ppOutputFrames[0]->deivce_mem); if (memcpyKind != cudaMemcpyDeviceToDevice) { AddMessage(RGY_LOG_ERROR, _T("only supported on device memory.\n")); return RGY_ERR_UNSUPPORTED; } if (m_pParam->frameOut.csp != m_pParam->frameIn.csp) { AddMessage(RGY_LOG_ERROR, _T("csp does not match.\n")); return RGY_ERR_UNSUPPORTED; } static const std::map<RGY_CSP, decltype(transform_frame<uchar4>)*> func_list = { { RGY_CSP_YV12, transform_frame<uchar4> }, { RGY_CSP_YV12_16, transform_frame<ushort4> }, { RGY_CSP_YUV444, transform_frame<uchar4> }, { RGY_CSP_YUV444_16, transform_frame<ushort4> } }; if (func_list.count(pInputFrame->csp) == 0) { AddMessage(RGY_LOG_ERROR, _T("unsupported csp %s.\n"), RGY_CSP_NAMES[pInputFrame->csp]); return RGY_ERR_UNSUPPORTED; } func_list.at(pInputFrame->csp)(ppOutputFrames[0], pInputFrame, prm, stream ); auto cudaerr = cudaGetLastError(); if (cudaerr != cudaSuccess) { AddMessage(RGY_LOG_ERROR, _T("error at transform(%s): %s.\n"), RGY_CSP_NAMES[pInputFrame->csp], char_to_tstring(cudaGetErrorString(cudaerr)).c_str()); return RGY_ERR_CUDA; } return sts; } void NVEncFilterTransform::close() { m_pFrameBuf.clear(); }
the_stack
#define XTALK_CONST_FRAC 0.25f #define XTALK_CONST_FRAC_REF 0.86f #define XTALK_CONST_FRAC_REF_COMP 0.14f #define XTALK_MAGIC_FLOW 32.0f #define XTALK_MAGIC_LAMBDA 1.425f #define XTALK_MAGIC_DISCOUNT 0.33f #define POLYCLONAL_PPF_CUTOFF 0.84f #define POLYCLONAL_POS_THRESHOLD 0.25f #define POLYCLONAL_BAD_READ_THRESHOLD 100.0f ////////////////////////////////////////////// //XTALK MAGIC AND DEVICE FUNCTIONS class MagicXTalkForFLow{ float cscale; float cscale_ref; float magic_hplus_ref; protected: __device__ inline float modulateEffectByFlow(const float start_frac, const float flow_num, const float offset) const { float approach_one_rate = flow_num/(flow_num+offset); return ( (1.0f-start_frac) * approach_one_rate + start_frac); } __device__ inline void setCscaleRefForFlow(const int flow) { cscale_ref = modulateEffectByFlow( XTALK_CONST_FRAC_REF, flow, XTALK_MAGIC_FLOW); } __device__ inline void setCscaleForFlow(const int flow ) { this->cscale = modulateEffectByFlow( XTALK_CONST_FRAC, flow, XTALK_MAGIC_FLOW); } __device__ inline void setMagicHplusRef( const float regionMeanSignal){ this->magic_hplus_ref = getMagicRefConstant() * regionMeanSignal; } public: __device__ MagicXTalkForFLow(const int flow, const float regionMeanSignal){ setCscaleForFlow(flow); setCscaleRefForFlow(flow); setMagicHplusRef(regionMeanSignal); } __device__ inline float getCscale() const { return cscale; } __device__ inline float getCscaleRef() const { return cscale_ref; } __device__ inline float getMagicRefConstant() const { return (XTALK_CONST_FRAC_REF_COMP/XTALK_CONST_FRAC_REF) * getCscaleRef(); } __device__ inline float getMagicCscaleConstant() const{ return XTALK_MAGIC_DISCOUNT * getCscale(); } __device__ inline float getMagicHplusRef() const { return this->magic_hplus_ref; } __device__ inline float getMagicHPlusCorrector(const float etbR, const float bead_corrector) const { float magic_bead_corrector = (getMagicCscaleConstant()*etbR*bead_corrector); float magic_hplus_corrector = XTALK_MAGIC_LAMBDA* ( magic_bead_corrector - getMagicHplusRef()); return magic_hplus_corrector; } }; //Ampl pointsd to Amplitude of current bead //rx ry are coordinates of current bead in region //regId id of current region __device__ inline float UnweaveMap( const float * AmplCpy, const float default_signal, const int rx, const int ry, const size_t regId ) { float sum = 0.0f; int phase = ConstXTalkP.getPhase(rx); int ly = 0; for (int r=(-ConstXTalkP.getSpanY()); r<=(ConstXTalkP.getSpanY()); r++, ly++) { int lx=0; for (int c=(-ConstXTalkP.getSpanX()); c<=(ConstXTalkP.getSpanX()); c++, lx++) { int tx = c + rx; int ty = r + ry; if ((tx < 0) || tx>= (ImgRegP.getRegW(regId)) || (ty < 0) || ty>=(ImgRegP.getRegH(regId))) { // if we are on the edge of the region...as a stand-in for actual data use the region average signal //ToDo: instead of region average we can use actual values from neighboring regions if available sum += default_signal * (ConstXTalkP.coeff(lx,ly,phase)); //if(rx == 4 && ry == 0 ) //printf("GPU %d %d, %f ,%d %d, %d %d, %f, %d %d, %d %f %f\n", rx,ry, *AmplCpy, c, r, tx, ty, default_signal, lx, ly, phase, ConstXTalkP.coeff(lx,ly,phase), sum); }else{ float amplCopy = LDG_ACCESS(AmplCpy,r*ImgRegP.getImgW()+c); sum += amplCopy * (ConstXTalkP.coeff(lx,ly,phase)); //if(rx == 4 && ry == 0 ) //printf("GPU %d %d, %f ,%d %d, %d %d, %f, %d %d, %d %f %f\n",rx,ry, *AmplCpy, c, r, tx,ty , amplCopy, lx, ly, phase, ConstXTalkP.coeff(lx,ly,phase), sum); } } } return sum; } //__device__ inline //float EmptyCorrector(float default_signal) //{ // float sum = 0.0f; // for (int mx=0; mx< XTALK_MAP; mx++){ // sum += default_signal*ConstXTalkP.odd(mx); // } // return sum; //} __device__ inline void DoXTalk( const PerFlowParamsRegion * perFlowRegP, const PerNucParamsRegion * perNucRegP, const MagicXTalkForFLow & MagicStuff, const float* AmplIn, const float * AmpCpy, float * AmplOut, const float regAvgSig, const float valCopies, const float valR, const size_t regId, const size_t rx, const size_t ry ) { float Ampl = *AmplIn; float etbR = ComputeETBR(perNucRegP,perFlowRegP->getRatioDrift(),valR, valCopies); float bead_corrector = UnweaveMap(AmpCpy,regAvgSig, rx,ry,regId); float hplus_corrector = MagicStuff.getMagicHPlusCorrector(etbR,bead_corrector); //bead_corrector - empty_corrector; Ampl -= hplus_corrector/(valCopies); if(Ampl != Ampl) Ampl = 0.0f; // NaN check *AmplOut = Ampl; } // END XTALK DEVICE FUNCTIONS ////////////////////////////////////////////// ////////////////////////////////////////////// // POLYCLONAL DEVICE FUNCTIONS __device__ inline void ClonalFilterUpdate( float * PolyClonalCube, unsigned short * BeadStateMask, const float * Ampl, const size_t planeStride, float const CopyDrift, const float copies ) { float keynorm = *(PolyClonalCube + PolyKeyNorm * planeStride); float tmpAmpl = ((*Ampl) * copies)/keynorm; float * pPPF = PolyClonalCube + PolyPpf * planeStride; float * pSSQ = PolyClonalCube + PolySsq * planeStride; if((tmpAmpl > POLYCLONAL_POS_THRESHOLD) || (ConstGlobalP.isLastClonalUpdateFlow(ConstFlowP.getRealFnum()))) { float ppf; ppf = (tmpAmpl > POLYCLONAL_POS_THRESHOLD)?(*pPPF + 1):(*pPPF); if(ConstGlobalP.isLastClonalUpdateFlow(ConstFlowP.getRealFnum())) ppf /= ConstGlobalP.getClonalFilterNumFlows(); //average across training flows after last training flow collected *pPPF = ppf; } float x = tmpAmpl - ::round(tmpAmpl); *pSSQ = *pSSQ + x * x; if(tmpAmpl >POLYCLONAL_BAD_READ_THRESHOLD) *BeadStateMask = ((*BeadStateMask) | BkgMaskBadRead); } // END POLYCLONAL DEVICE FUNCTIONS ////////////////////////////////////////////// __device__ inline void EffectiveAmplitudeForRawWells( const float *AmplIn, float *AmpOut, float copies, float copyDrift) { float copyMultiplier = ::pow(copyDrift, (float)ConstFlowP.getRealFnum()); //*AmpOut = (*AmplIn) * copies * copyMultiplier; // Copies are written separately in the new format *AmpOut = (*AmplIn) * copyMultiplier; } ////////////////////////////////////////////// //KERNELS //one threadblock per region //calculates region mean signal, //updates signal for non live beads //sm layout: numwarps * uncompressed frames + numwaprs integers to sum up count //reduce empty average __global__ __launch_bounds__(128, 16) void UpdateSignalMap_k( const unsigned short * RegionMask, const unsigned short * bfMask, const float* BeadParamCube, float * ResultCube, float * regionAvg // has to be inited to 0.0f for all regions // float * beadAvg // has to be inited to 0.0f for all regions ) { extern __shared__ float smemSum[]; // uncompressed frames per warp float * sm_base = smemSum; float * sm_warp_base = sm_base + threadIdx.y*blockDim.x; float * sm = sm_warp_base + threadIdx.x; //same for all warps within block const size_t regId = blockIdx.x + blockIdx.y * gridDim.x; const size_t windowSize = blockDim.x; //window size to slide accross row const size_t nextWorkRowStride = ImgRegP.getImgW() * blockDim.y; //stride to get to next row to work on const size_t regWidth = ImgRegP.getRegW(regId); const size_t regHeight = ImgRegP.getRegH(regId); //already filters out regions with 0 live beads. so no more checks are needed if( LDG_ACCESS(RegionMask,regId) != RegionMaskLive) return; //starting coordinates for each thread within region size_t rx = threadIdx.x; //region x to work on size_t ry = threadIdx.y; //starting offset for each thread within image size_t idx = ImgRegP.getWellIdx(regId,rx,ry); bfMask += idx; float * AmpCopy = ResultCube + ImgRegP.getImgSize() * ResultAmplCopyMapXTalk + idx; const float * Ampl = ResultCube + ImgRegP.getImgSize() * ResultAmpl + idx; const float * copies = BeadParamCube + ImgRegP.getImgSize() * BpCopies + idx; regionAvg += regId; //beadAvg += regId; *sm = 0; //float Cnt = 0; while(ry < regHeight){ size_t windowStart = 0; const unsigned short* bfmaskRow = bfMask; const float* AmplRow = Ampl; const float* copiesRow = copies; float* AmpCopyRow = AmpCopy; //slide warp/window across row and create sum for of num live beads for each warp while(windowStart < regWidth){ if(rx < regWidth){ //if bead still in reagion set sm according to mask if(Match(bfmaskRow,(MaskType)MaskLive)){ float ampcpy = (*AmplRow) * (*copiesRow); *AmpCopyRow = ampcpy; *sm += ampcpy; //Cnt += 1.0f; //increase t0 count to calculate average. } } //slide window rx += windowSize; windowStart += windowSize; bfmaskRow += windowSize; AmplRow += windowSize; AmpCopyRow += windowSize; copiesRow += windowSize; } //row done rx = threadIdx.x; ry += blockDim.y; bfMask += nextWorkRowStride; Ampl += nextWorkRowStride; AmpCopy += nextWorkRowStride; copies += nextWorkRowStride; } float sigSum = ReduceSharedMemory(sm_base,sm); __syncthreads(); //*sm = Cnt; //Cnt = ReduceSharedMemory(sm_base,sm); //__syncthreads(); if(threadIdx.x == 0 && threadIdx.y == 0){ // printf("regionSum GPU: %f / %d\n", sigSum, ImgRegP.getRegSize(regId) ); *regionAvg = sigSum/ImgRegP.getRegSize(regId); } //*beadAvg = sigSum/Cnt; // Cnt always > 0 otherwise region will not even be handles by the kernel } // execute with one warp per row and 2D thread blocks of width warp length // each warp will slide across one row of the region // kernel parameters: // thread block dimensions (WARPSIZE,n,1) //n = number of warps per block) // grid dimension ( numRegions.x, (imgH + n-1)/n, 1) // one block per region in x direction and one per n img rows in y direction __global__ __launch_bounds__(128, 16) void PostProcessingCorrections_k( const unsigned short * RegionMask, const PerFlowParamsRegion * perFlowRegP, const PerNucParamsRegion * perNucRegP, const unsigned short * bfMask, const float* BeadParamCube, unsigned short * BeadStateMask, float* PolyClonalCube, float* ResultCube, float * regionAvgSignal // has to be inited to 0.0f for all regions ) { const size_t regionCol = blockIdx.x; const size_t regionRow = (blockIdx.y*blockDim.y)/ImgRegP.getRegH(); const size_t regId = ImgRegP.getRegIdFromGrid(regionCol,regionRow); if( LDG_ACCESS(RegionMask,regId) != RegionMaskLive) return; const size_t regionWidth = ImgRegP.getRegW(regId); const size_t windowWidth = blockDim.x; const size_t ix = regionCol * ImgRegP.getRegW()+ threadIdx.x; const size_t iy = (blockIdx.y*blockDim.y) + threadIdx.y; const size_t idx = ImgRegP.getWellIdx(ix,iy); const size_t ry = iy%ImgRegP.getRegH(); size_t rx = threadIdx.x; const size_t planeStride = ImgRegP.getImgSize(); //per bead const float * copies = BeadParamCube + BpCopies*planeStride + idx; const float * R = BeadParamCube + BpR*planeStride + idx; const float * AmpCpy = ResultCube + planeStride * ResultAmplCopyMapXTalk + idx; //when no longer using Xtlak on host one of these buffers can be removed const float * AmplIn = ResultCube + planeStride * ResultAmpl + idx; float * AmplOut = ResultCube + planeStride * ResultAmpl + idx; BeadStateMask += idx; PolyClonalCube += idx; bfMask += idx; //per region perFlowRegP += regId; perNucRegP += ImgRegP.getNumRegions() * ConstFlowP.getNucId() + regId; const float regAvgSig = LDG_LOAD(regionAvgSignal+regId); MagicXTalkForFLow MagicStuff(ConstFlowP.getRealFnum(), regAvgSig); while(rx < regionWidth ){ //while thread inside region if(Match(bfMask,MaskLive)){ if(ConfigP.PerformWellsLevelXTalk()) DoXTalk(perFlowRegP,perNucRegP,MagicStuff,AmplIn,AmpCpy,AmplOut,regAvgSig,*copies,*R,regId,rx,ry); //use Xtalk corrected amplitude // Prepare wells amplitude for raw wells writing by multiplicative scaling with copy multiplier EffectiveAmplitudeForRawWells(AmplOut, AmplOut, *copies, perFlowRegP->getCopyDrift()); if( ConfigP.PerformPolyClonalFilter() && ConstGlobalP.isClonalUpdateFlow(ConstFlowP.getRealFnum())) ClonalFilterUpdate(PolyClonalCube,BeadStateMask,AmplOut,planeStride,perFlowRegP->getCopyDrift(),*copies); // ClonalFilterUpdate(PolyClonalCube,BeadStateMask,AmplOut,planeStride,perFlowRegP->getCopyDrift(),*copies,ry); } rx += windowWidth; copies += windowWidth; R += windowWidth; bfMask += windowWidth; AmplIn += windowWidth; AmpCpy += windowWidth; AmplOut += windowWidth; BeadStateMask += windowWidth; PolyClonalCube += windowWidth; } } /* __device__ inline void loadSharedMemoryAll( float * AmplBase, size_t regId, size_t startRy, float * sm ) { size_t regBaseIdx = ImgRegP.getRegBaseIdx(regId); size_t globalx = ImgRegP.getXFromIdx(regBaseIdx); szie_t globaly = ImgRegP.getYFromIdx(regBaseIdx); size_t tIdx = threadIdx.y * blockDim.x + threadIdx.x; int rx = tIdx - ConstXTalkP.getSpanX(); int ry = startRy - ConstXTalkP.getSpanY(); int windowDimX = blokcDim.x + 2 * ConstXTalkP.getSpanX(); int windowDimY = blokcDim.y + 2 * ConstXTalkP.getSpanY(); for (int r= ry; r<= ry+windowSimY; r++) { for (int c= rx; c<=(rx+windowDimX); c++) { if ((r < 0) || (r>= ImgRegP.getRegH(regId)) || (c < 0) || (c>=ImgRegP.getRegH(regId))) { sum += default_signal*ConstXTalkP.coeff(lx,ly,phase); }else{ if(!useSharedMemory) sum += LDG_ACCESS(Ampl,r*AmplWidth+c) * ConstXTalkP.coeff(lx,ly,phase); else sum += Ampl[r*AmplWidth+c] * ConstXTalkP.coeff(lx,ly,phase); } } } } //__device__ inline // execute with one warp per row and 2D thread blocks of width warp length // each warp will slide across one row of the region // kernel parameters: // thread block dimensions (8,8) // grid dimension ( numRegions.x, (imgH + n-1)/n, 1) // one block per region in x direction and one per n img rows in y direction __global__ __launch_bounds__(128, 32) void ProtonXTalkShared_k( const unsigned short * RegionMask, const PerFlowParamsRegion * perFlowRegP, const PerNucParamsRegion * perNucRegP, const unsigned short * bfMask, const float* BeadParamCube, unsigned short * BeadStateMask, float* PolyClonalCube, float* ResultCube, float * regionAvgSignal // has to be inited to 0.0f for all regions ) { extern __shared__ float smem[]; const size_t regionCol = blockIdx.x; const size_t regionRow = (blockIdx.y*blockDim.y)/ImgRegP.getRegH(); const size_t regId = ImgRegP.getRegIdFromGrid(regionCol,regionRow); if( LDG_ACCESS(RegionMask,regId) != RegionMaskLive) return; const size_t regionWidth = ImgRegP.getRegW(regId); const size_t windowWidth = blockDim.x; const size_t ix = regionCol * ImgRegP.getRegW()+ threadIdx.x; const size_t iy = (blockIdx.y*blockDim.y) + threadIdx.y; const size_t idx = ImgRegP.getWellIdx(ix,iy); const size_t ry = iy%ImgRegP.getRegH(); size_t rx = threadIdx.x; const size_t planeStride = ImgRegP.getImgSize(); //per bead const float * copies = BeadParamCube + BpCopies*planeStride + idx; const float * R = BeadParamCube + BpR*planeStride + idx; const float * AmplIn = ResultCube + planeStride * ResultAmpl + idx; const size_t AmplInWidth = ImgRegP.getImgW(); float * AmplOut = ResultCube + planeStride * ResultAmplXTalk + idx; BeadStateMask += idx; PolyClonalCube += idx; bfMask += idx; //per region perFlowRegP += regId; perNucRegP += ImgRegP.getNumRegions() * ConstFlowP.getNucId() + regId; const float regAvgSig = LDG_LOAD(regionAvgSignal+regId); MagicXTalkForFLow MagicStuff(ConstFlowP.getRealFnum(), regAvgSig); while(rx < regionWidth ){ //while thread inside region if(Match(bfMask,MaskLive)){ DoXTalk(perFlowRegP,perNucRegP,MagicStuff,AmplIn,AmplInWidth,AmplOut,regAvgSig,*copies,*R,regId,rx,ry); ClonalFilterUpdate(PolyClonalCube,BeadStateMask,AmplOut,planeStride,perFlowRegP->getCopyDrift(),*copies); } rx += windowWidth; copies += windowWidth; R += windowWidth; bfMask += windowWidth; AmplIn += windowWidth; AmplOut += windowWidth; BeadStateMask += windowWidth; PolyClonalCube += windowWidth; } } */
the_stack
#include <nvbio/basic/types.h> #include <nvbio/basic/exceptions.h> #include "bam_io.h" using namespace nvbio; /** ---- Read Functionality ---- **/ BAMReader::BAMReader(const char *fname) : eof(false) { fp = gzopen(fname, "rb"); if (fp == NULL) { throw nvbio::runtime_error("Could not open %s", fname); } if (!read_hdr()) { throw nvbio::runtime_error("Error parsing BAM file header"); } } BAMReader::~BAMReader() { gzclose(fp); } #define GZREAD(field) \ if (read_data(&(field), sizeof(field), __LINE__) == false) { \ eof = true; \ return false; \ } // parse the BAM header bool BAMReader::read_hdr(void) { GZREAD(header.magic); if (header.magic[0] != 'B' || header.magic[1] != 'A' || header.magic[2] != 'M' || header.magic[3] != '\1') { throw nvbio::runtime_error("error parsing BAM file (invalid magic)"); } GZREAD(header.l_text); header.text.resize(header.l_text); read_data(&header.text[0], header.l_text, __LINE__); // read reference sequence data GZREAD(header.n_ref); for(int c = 0; c < header.n_ref; c++) { std::string name; int32 l_name, l_ref; GZREAD(l_name); name.resize(l_name); read_data(&name[0], l_name, __LINE__); GZREAD(l_ref); header.sq_names.push_back(name); header.sq_lengths.push_back(l_ref); } data_start = gztell(fp); return true; } // load a batch of alignment records (AoS) bool BAMReader::read_aln_batch(std::vector<BAM_alignment_record>& batch, const uint64 batch_size) { uint64 aln_id; for(aln_id = 0; aln_id < batch_size; aln_id++) { if (eof) { break;} BAM_alignment_record record; GZREAD(record.header.block_size); z_off_t read_block_start = gztell(fp); GZREAD(record.header.refID); GZREAD(record.header.pos); GZREAD(record.header.bin_mq_nl); GZREAD(record.header.flag_nc); GZREAD(record.header.l_seq); GZREAD(record.header.next_refID); GZREAD(record.header.next_pos); GZREAD(record.header.tlen); // read the data section const uint32 data_len = record.header.block_size - (gztell(fp) - read_block_start); record.data.resize(data_len); read_data(&record.data[0], data_len, __LINE__); // store the record batch.push_back(record); } if (aln_id == 0) { return false; } return true; } // load a contiguous batch of raw byte alignment records (AoS) bool BAMReader::read_aln_batch_raw(BAM_alignment_batch_raw& raw_batch, const uint64 batch_size) { uint64 byte_offset = 0; uint64 aln_id; raw_batch.recs.resize(H_BATCH_SIZE_ALLOC*sizeof(BAM_alignment_record)); // size doesn't include the record data portion for(aln_id = 0; aln_id < batch_size; aln_id++) { if (eof) { break;} // read the size of the record uint32 record_size; if(!read_data(&record_size, sizeof(uint32), __LINE__)) { eof = true; break; } // allocate space raw_batch.recs.resize(byte_offset + record_size + sizeof(uint32)); raw_batch.offsets.push_back(byte_offset); // store the size memcpy(&raw_batch.recs[byte_offset], &record_size, sizeof(uint32)); byte_offset += sizeof(uint32); // read the data read_data(&raw_batch.recs[byte_offset], record_size, __LINE__); byte_offset += record_size; } if (byte_offset == 0) { return false; } raw_batch.offsets.push_back(byte_offset); return true; } // load a batch of alignment records (SoA) // load only the fields specified by the batch field_mask bool BAMReader::read_aln_batch(BAM_alignment_batch_SoA& batch, const uint64 batch_size) { batch.reset(); H_VectorU32 cigar_temp(64); BAM_alignment_header align; for(uint64 aln_id = 0; aln_id < batch_size; aln_id++) { if (eof) { break; } GZREAD(align.block_size); z_off_t read_block_start = gztell(fp); GZREAD(align.refID); GZREAD(align.pos); GZREAD(align.bin_mq_nl); GZREAD(align.flag_nc); GZREAD(align.l_seq); GZREAD(align.next_refID); GZREAD(align.next_pos); GZREAD(align.tlen); if(batch.field_mask & BAM_POSITIONS) { batch.positions.push_back(align.pos); } if(batch.field_mask & BAM_REFIDS) { if (align.refID < 0 || align.refID >= header.n_ref) { batch.refIDs.push_back(uint32(-1)); } else { batch.refIDs.push_back(align.refID); } } if(batch.field_mask & BAM_MAPQ) { batch.mapq.push_back(align.mapq()); } if(batch.field_mask & BAM_FLAGS) { batch.flags.push_back(align.flags()); } const uint64 read_name_off = batch.names.size(); const uint32 read_name_len = align.l_read_name(); if(batch.field_mask & BAM_NAMES) { batch.names.resize(read_name_off + read_name_len + 1); read_data(&batch.names[read_name_off], read_name_len, __LINE__); batch.names[read_name_off + read_name_len] = '\0'; } else { gzseek(fp, read_name_len, SEEK_CUR); } BAM_CRQ_index crq_index(batch.cigars.size(), align.num_cigar_ops(), batch.reads.size(), align.l_seq, batch.qualities.size(), align.l_seq); if(batch.field_mask & (BAM_CIGARS | BAM_QUALITIES | BAM_READS)) { batch.crq_index.push_back(crq_index); } const uint32 cigar_len = (align.flag_nc & 0xffff); if(batch.field_mask & BAM_CIGARS) { cigar_temp.resize(cigar_len); if (cigar_len) { read_data(&cigar_temp[0], sizeof(uint32) * cigar_len, __LINE__); for(uint32 c = 0; c < cigar_len; c++) { cigar_op op; op.op = cigar_temp[c] & 0xf; op.len = cigar_temp[c] >> 4; batch.cigars.push_back(op); } } } else { if (cigar_len) { gzseek(fp, sizeof(uint32) * cigar_len, SEEK_CUR); } } if(batch.field_mask & BAM_READS) { // figure out the length of the sequence data, rounded up to reach a dword boundary const uint32 padded_read_len_bp = ((align.l_seq + 7) / 8) * 8; // make sure we have enough memory, then read in the sequence batch.reads.resize(crq_index.read_start + padded_read_len_bp); uint64 *storage = (uint64 *)batch.reads.addrof(crq_index.read_start); read_data(storage, align.l_seq / 2, __LINE__); } else { gzseek(fp, align.l_seq / 2, SEEK_CUR); } if(batch.field_mask & BAM_QUALITIES) { batch.qualities.resize(crq_index.read_start + align.l_seq); read_data(&batch.qualities[crq_index.read_start], align.l_seq, __LINE__); } else { gzseek(fp, align.l_seq, SEEK_CUR); } const uint32 aux_len = align.block_size - (gztell(fp) - read_block_start); // align index if(batch.field_mask & (BAM_AUX | BAM_NAMES)) { BAM_NAUX_index idx(batch.aux_data.size(), aux_len, read_name_off); batch.aln_index.push_back(idx); } if(batch.field_mask & BAM_AUX) { const uint64 aux_start = batch.aux_data.size(); batch.aux_data.resize(aux_start + aux_len); read_data(&batch.aux_data[aux_start], aux_len, __LINE__); } else { gzseek(fp, aux_len, SEEK_CUR); } batch.num_alns++; } if (batch.num_alns == 0) { return false; } return true; } bool BAMReader::toSoA(const BAM_alignment_batch_raw& batch, BAM_alignment_batch_SoA& soa_batch) { soa_batch.reset(); BAM_alignment_header align; H_VectorU32 cigar_temp(64); int offset; for(uint64 idx = 0; idx < batch.offsets.size()-1; idx++) { uint32 len = batch.offsets[idx + 1] - batch.offsets[idx]; const uint8* data = &batch.recs.data()[batch.offsets[idx]]; offset = sizeof(uint32); memcpy(&align.refID, data + offset, sizeof(uint32)); offset += sizeof(uint32); memcpy(&align.pos, data + offset, sizeof(uint32)); offset += sizeof(uint32); memcpy(&align.bin_mq_nl, data + offset, sizeof(uint32)); offset += sizeof(uint32); memcpy(&align.flag_nc, data + offset, sizeof(uint32)); offset += sizeof(uint32); memcpy(&align.l_seq, data + offset, sizeof(uint32)); offset += sizeof(uint32); // skip remainder offset += 3*sizeof(int32); if(soa_batch.field_mask & BAM_REFIDS) { if (align.refID < 0 || align.refID >= header.n_ref) { soa_batch.refIDs.push_back(uint32(-1)); } else { soa_batch.refIDs.push_back(align.refID); } } if(soa_batch.field_mask & BAM_POSITIONS) { soa_batch.positions.push_back(align.pos); } if(soa_batch.field_mask & BAM_MAPQ) { soa_batch.mapq.push_back(align.mapq()); } if(soa_batch.field_mask & BAM_FLAGS) { soa_batch.flags.push_back(align.flags()); } const uint32 read_name_off = soa_batch.names.size(); const uint32 read_name_len = align.l_read_name(); if(soa_batch.field_mask & BAM_NAMES) { soa_batch.names.resize(read_name_off + read_name_len + 1); memcpy(&soa_batch.names[read_name_off], data + offset, read_name_len); soa_batch.names[read_name_off + read_name_len] = '\0'; } offset += read_name_len; BAM_CRQ_index crq_index(soa_batch.cigars.size(), align.num_cigar_ops(), soa_batch.reads.size(), align.l_seq, soa_batch.qualities.size(), align.l_seq); if(soa_batch.field_mask & (BAM_CIGARS | BAM_QUALITIES | BAM_READS)) { soa_batch.crq_index.push_back(crq_index); } const uint32 cigar_len = (align.flag_nc & 0xffff); if(soa_batch.field_mask & BAM_CIGARS) { cigar_temp.resize(cigar_len); if (cigar_len) { memcpy(&cigar_temp[0], data + offset, sizeof(uint32) * cigar_len); for(uint32 c = 0; c < cigar_len; c++) { cigar_op op; op.op = cigar_temp[c] & 0xf; op.len = cigar_temp[c] >> 4; soa_batch.cigars.push_back(op); } } } if (cigar_len) { offset += sizeof(uint32)*cigar_len; } if(soa_batch.field_mask & BAM_READS) { // figure out the length of the sequence data, rounded up to reach a dword boundary const uint32 padded_read_len_bp = ((align.l_seq + 7) / 8) * 8; // make sure we have enough memory, then read in the sequence soa_batch.reads.resize(crq_index.read_start + padded_read_len_bp); uint64 *storage = (uint64 *)soa_batch.reads.addrof(crq_index.read_start); memcpy(storage, data + offset, align.l_seq / 2); } offset += align.l_seq / 2; if(soa_batch.field_mask & BAM_QUALITIES) { soa_batch.qualities.resize(crq_index.read_start + align.l_seq); memcpy(&soa_batch.qualities[crq_index.read_start], data + offset, align.l_seq); } offset += align.l_seq; const uint32 aux_len = len - offset; // align index if(soa_batch.field_mask & (BAM_AUX | BAM_NAMES)) { BAM_NAUX_index idx(soa_batch.aux_data.size(), aux_len, read_name_off); soa_batch.aln_index.push_back(idx); } if(soa_batch.field_mask & BAM_AUX) { const uint64 aux_start = soa_batch.aux_data.size(); soa_batch.aux_data.resize(aux_start + aux_len); memcpy(&soa_batch.aux_data[aux_start], data + offset, aux_len); } offset += aux_len; soa_batch.num_alns++; } if (soa_batch.num_alns == 0) { return false; } return true; } bool BAMReader::read_data(void *output, unsigned int len, int line) { if(len == 0) { return true; } if (eof) { return false; } int ret = gzread(fp, output, len); if (ret > 0) { return true; } else { // check for EOF separately; zlib will not always return Z_STREAM_END at EOF below if (gzeof(fp)) { eof = true; } else { // ask zlib what happened and inform the user int err; const char *msg = gzerror(fp, &err); assert(err != Z_STREAM_END); // we're making the assumption that we never see Z_STREAM_END here if (err == 0) { ret = gzread(fp, output, len); if (ret > 0) { return true; } else { throw nvbio::runtime_error("Error processing BAM file (line %d): zlib error %d (%s) ret = %d", line, err, msg, ret); } } } return false; } } HTSBAMReader::HTSBAMReader(const char *fname) { fp = sam_open(fname, "r"); if (fp == NULL) { throw nvbio::runtime_error("Could not open %s", fname); } header = sam_hdr_read(fp); if (header == NULL) { throw nvbio::runtime_error("Error parsing BAM file header"); } } HTSBAMReader::~HTSBAMReader() { bam_hdr_destroy(header); sam_close(fp); } // returns false if no records were read bool HTSBAMReader::read_aln_batch(BAM_alignment_batch_SoA& batch, const uint64 batch_size) { batch.reset(); H_VectorU32 cigar_temp(64); bam1_t *b = (bam1_t*)calloc(1, sizeof(bam1_t)); for(uint64 aln_id = 0; aln_id < batch_size; aln_id++) { if (sam_read1(fp, header, b) < 0) break; if(batch.field_mask & BAM_POSITIONS) { batch.positions.push_back(b->core.pos); } if(batch.field_mask & BAM_REFIDS) { batch.refIDs.push_back(b->core.tid); } if(batch.field_mask & BAM_MAPQ) { batch.bin.push_back(b->core.bin); } if(batch.field_mask & BAM_MAPQ) { batch.mapq.push_back(b->core.qual); } if(batch.field_mask & BAM_FLAGS) { batch.flags.push_back(b->core.flag); } if(batch.field_mask == BAM_ALL) { batch.next_refIDs.push_back(b->core.mtid); batch.next_positions.push_back(b->core.mpos); } // data portion uint32 offset = 0; const uint64 read_name_off = batch.names.size(); const uint32 read_name_len = b->core.l_qname; if(batch.field_mask & BAM_NAMES) { batch.names.resize(read_name_off + read_name_len + 1); memcpy(&batch.names[read_name_off], b->data, read_name_len); batch.names[read_name_off + read_name_len] = '\0'; } offset += read_name_len; BAM_CRQ_index crq_index(batch.cigars.size(), b->core.n_cigar, batch.reads.size(), b->core.l_qseq, batch.qualities.size(), b->core.l_qseq); if(batch.field_mask & (BAM_CIGARS | BAM_QUALITIES | BAM_READS)) { batch.crq_index.push_back(crq_index); } const uint32 cigar_len = b->core.n_cigar; if(batch.field_mask & BAM_CIGARS) { cigar_temp.resize(cigar_len); if (cigar_len) { memcpy(&cigar_temp[0], b->data + offset, sizeof(uint32) * cigar_len); for(uint32 c = 0; c < cigar_len; c++) { cigar_op op; op.op = cigar_temp[c] & 0xf; op.len = cigar_temp[c] >> 4; batch.cigars.push_back(op); } } } if (cigar_len) { offset += sizeof(uint32)*cigar_len; } if(batch.field_mask & BAM_READS) { // figure out the length of the sequence data, rounded up to reach a dword boundary const uint32 padded_read_len_bp = ((b->core.l_qseq + 7) / 8) * 8; // make sure we have enough memory, then read in the sequence batch.reads.resize(crq_index.read_start + padded_read_len_bp); uint64 *storage = (uint64 *)batch.reads.addrof(crq_index.read_start); memcpy(storage, b->data + offset, b->core.l_qseq / 2); } offset += b->core.l_qseq / 2; if(batch.field_mask & BAM_QUALITIES) { batch.qualities.resize(crq_index.qual_start + b->core.l_qseq); memcpy(&batch.qualities[crq_index.qual_start], b->data + offset, b->core.l_qseq); } offset += b->core.l_qseq; const uint32 aux_len = b->l_data - offset; BAM_NAUX_index idx(batch.aux_data.size(), aux_len, read_name_off); if(batch.field_mask & (BAM_AUX | BAM_NAMES)) { batch.aln_index.push_back(idx); } if(batch.field_mask & BAM_AUX) { const uint64 aux_start = batch.aux_data.size(); batch.aux_data.resize(aux_start + aux_len); memcpy(&batch.aux_data[aux_start], b->data + offset, aux_len); } offset += aux_len; batch.num_alns++; if(batch.num_alns % 10000000 == 0) { printf("Loaded %llu records. \n", batch.num_alns); } } if (batch.num_alns == 0) { return false; } return true; } bool HTSBAMReader::read_aln_batch(std::vector<bam1_t*>& batch, const uint64 batch_size) { bam1_t *b = (bam1_t*)calloc(1, sizeof(bam1_t)); uint64 aln_id = 0; for(aln_id = 0; aln_id < batch_size; aln_id++) { if (sam_read1(fp, header, b) < 0) break; batch.push_back(b); } if (aln_id == 0) { return false; } return true; } /** ---- Write Functionality ---- **/ BAMWriter::BAMWriter(const char *fname) { fp = fopen(fname, "wt"); if (fp == NULL) { throw nvbio::runtime_error("Could not open %s for writing", fname); } // set a 256kb output buffer on fp and make sure it's not line buffered // this makes sure small fwrites do not land on disk straight away // (256kb was chosen based on the default stripe size for Linux mdraid RAID-5 volumes) setvbuf(fp, NULL, _IOFBF, 256 * 1024); } BAMWriter::~BAMWriter() { if (fp) { fclose(fp); fp = NULL; } } void BAMWriter::write_header(BAM_header& header) { data_buffer.append_data(&header.magic[0], 4); data_buffer.append_int32(header.l_text); // header text might be larger than the buffer size // cannot append the entire string if(header.l_text <= data_buffer.get_remaining_size()) { data_buffer.append_string(header.text.c_str()); } else { const char* header_text = header.text.c_str(); for(int i = 0; i < header.l_text; i++) { if (data_buffer.is_full()) { write_block(data_buffer); } data_buffer.append_int8(header_text[i]); } } if (data_buffer.is_full()) { write_block(data_buffer); } data_buffer.append_int32(header.n_ref); for(int i = 0; i < header.n_ref; i++) { data_buffer.append_int32((int32) header.sq_names[i].length()); if(header.sq_names[i].length() <= data_buffer.get_remaining_size()) { data_buffer.append_string(header.sq_names[i].c_str()); } else { const char* seq_text = header.sq_names[i].c_str(); for(uint32 j = 0; j < header.sq_names[i].length(); j++) { if (data_buffer.is_full()) { write_block(data_buffer); } data_buffer.append_int8(seq_text[j]); } } if (data_buffer.is_full()) { write_block(data_buffer); } data_buffer.append_int8(0); data_buffer.append_int32(header.sq_lengths[i]); } if (data_buffer.get_pos()) { write_block(data_buffer); } } // write out raw byte record data in the order specified by shuffle_idx void BAMWriter::write_aln_batch_raw(BAM_alignment_batch_raw& batch, H_VectorU64 shuffle_idx) { for(uint64 i = 0; i < shuffle_idx.size(); i++) { uint64 idx = shuffle_idx[i]; uint32 len = batch.offsets[idx + 1] - batch.offsets[idx]; // records can be of varying size, must ensure sufficient buffer space if(len <= data_buffer.get_remaining_size()) { data_buffer.append_data(&batch.recs[batch.offsets[idx]], len); } else { for(uint32 j = 0; j < len; j++) { if (data_buffer.is_full()) { write_block(data_buffer); } data_buffer.append_int8(batch.recs[batch.offsets[idx + j]]); } } if (data_buffer.is_full()) { write_block(data_buffer); } } if (data_buffer.get_pos()) { write_block(data_buffer); } } void BAMWriter::write_block(io::DataBuffer& block) { io::DataBuffer compressed; bgzf.start_block(compressed); bgzf.compress(compressed, block); bgzf.end_block(compressed); fwrite(compressed.get_base_ptr(), compressed.pos, 1, fp); block.rewind(); } HTSBAMWriter::HTSBAMWriter(const char *fname) { fp = sam_open(fname, "wb"); if (fp == NULL) { throw nvbio::runtime_error("Could not open %s for writing", fname); } } HTSBAMWriter::~HTSBAMWriter() { if (fp) { sam_close(fp); fp = NULL; } } void HTSBAMWriter::write_hdr(bam_hdr_t* header) { sam_hdr_write(fp, header); } // assumes the batch contains all the fields void HTSBAMWriter::write_aln_batch(BAM_alignment_batch_SoA& batch, H_VectorU64 shuffle_idx, bam_hdr_t* header) { bam1_t *b = (bam1_t*)calloc(1, sizeof(bam1_t)); b->data = (uint8_t*)malloc(ALNREC_SIZE_ALLOC); b->m_data = ALNREC_SIZE_ALLOC; for(uint64 idx = 0; idx < batch.num_alns; idx++) { uint64 i = shuffle_idx[idx]; BAM_CRQ_index crq = batch.crq_index[i]; BAM_NAUX_index naux = batch.aln_index[i]; bam1_core_t *c = &b->core; c->tid = batch.refIDs[i]; c->pos = batch.positions[i]; c->bin = batch.bin[i]; c->qual = batch.mapq[i]; if(i < batch.num_alns-1) { c->l_qname = batch.aln_index[i+1].name_start-naux.name_start-1; // remove the trailing \0 } else { c->l_qname = batch.names.size()-naux.name_start-1; } c->flag = batch.flags[i]; c->n_cigar = crq.cigar_len; c->l_qseq = crq.read_len; c->mtid = batch.next_refIDs[i]; c->mpos = batch.next_positions[i]; c->isize = 0; //TODO b->l_data = c->l_qname*sizeof(uint8) + crq.cigar_len*sizeof(cigar_op) + crq.qual_len*sizeof(uint8) + (crq.read_len/2)*sizeof(uint8) + naux.aux_data_len*sizeof(uint8); if(b->l_data > b->m_data) { b->data = (uint8_t*)realloc(b->data, b->l_data); b->m_data = b->l_data; } //qname-cigar-seq-qual-aux memcpy(b->data, &batch.names[naux.name_start], c->l_qname); memcpy(b->data, &batch.cigars[crq.cigar_start], crq.cigar_len); memcpy(b->data, (uint64 *)batch.reads.addrof(crq.read_start), crq.read_len/2); memcpy(b->data, &batch.qualities[crq.qual_start], crq.qual_len); memcpy(b->data, &batch.aux_data[naux.aux_data_start], naux.aux_data_len); sam_write1(fp, header, b); } free(b->data); free(b); } void HTSBAMWriter::write_aln_batch(std::vector<bam1_t*>& batch, H_VectorU64 shuffle_idx, bam_hdr_t* header) { for(uint64 i = 0; i < batch.size(); i++) { sam_write1(fp, header, batch[i]); } }
the_stack
#include <feature-transform.h> #include <cnn-utility.h> #include <cuda_profiler_api.h> using namespace std; #define DEBUG_STR(x) ("\33[33m"#x"\33[0m = " + to_string(x) + "\t") #define VECTOR std::vector #define WHERE std #include <operators.inl> #undef VECTOR #undef WHERE #define __CUDA_CONSTANTS__ \ const int nThreads = blockDim.x * blockDim.y;\ const int tx = threadIdx.x;\ const int ty = threadIdx.y;\ int tid = tx + blockDim.x * ty;\ int x0 = blockIdx.x * blockDim.x;\ int y0 = blockIdx.y * blockDim.y;\ const int x = x0 + tx;\ const int y = y0 + ty; /*! Convert each row to a 2D image * \param data Each col in data is a feature vector. # of cols = # of data # of rows = image size * \param s Size of the image. # of rows in data = s.height x s.width */ vector<mat> reshapeVectors2Images(const mat& data, const SIZE s) { size_t nData = data.getCols(); vector<mat> images(nData); for (size_t i=0; i<nData; ++i) { images[i].resize(s.height, s.width); CCE(cudaMemcpy(images[i].getData(), data.getData() + i * data.getRows(), sizeof(float) * s.height * s.width, cudaMemcpyDeviceToDevice)); } CCE(cudaDeviceSynchronize()); return images; } template <bool rot180kernel> __device__ void load_kernel_into_shm(float* const K, const float* const kernel, int kH, int kW, int tid, int nThreads) { // Copy kernel in global memory to shared memory for (; tid<kW * kH; tid += nThreads) { int xx = tid % kH; int yy = tid / kH; if ( xx >= kH || yy >= kW ) continue; if (rot180kernel) K[yy * kH + xx] = kernel[yy * kH + xx]; else K[(kW - 1 - yy) * kH + (kH - 1 - xx)] = kernel[yy * kH + xx]; } } template <bool rot180data, bool rot180kernel> __global__ void convn_valid_kernel_with_shm( float *output, const int vH, const int vW, const float *data, const int H, const int W, const float *kernel, const int kH, const int kW, const int output_step, const int data_step, const int kernel_step) { __CUDA_CONSTANTS__; output += blockIdx.z * output_step; data += blockIdx.z * data_step; kernel += blockIdx.z * kernel_step; extern __shared__ float K[]; // Copy kernel in global memory to shared memory load_kernel_into_shm<rot180kernel>(K, kernel, kH, kW, tid, nThreads); // Copy data in global memory to shared memory float* D = K + kW * kH; int HEIGHT_STEP = blockDim.x + kH - 1; int WIDTH_STEP = blockDim.y + kW - 1; int nTotal = WIDTH_STEP * HEIGHT_STEP; for (; tid<nTotal; tid += nThreads) { int xx = tid % HEIGHT_STEP + x0; int yy = tid / HEIGHT_STEP + y0; if (xx >= H || yy >= W) continue; // rotate data 180 degree if (rot180data) D[tid] = data[ (W - 1 - yy) * H + (H - 1 - xx) ]; else D[tid] = data[ yy * H + xx ]; } __syncthreads(); if (x >= vH || y >= vW) return; float sum = 0; D += ty * HEIGHT_STEP + tx; for (int i = 0; i < kW; ++i) for(int j = 0; j < kH; ++j) sum += K[ i * kH + j ] * D[ i * HEIGHT_STEP + j ]; output[ y * vH + x ] += sum; } __global__ void convn_valid_kernel(float *output, float *data, float *kernel, const int H, const int W, const int kH, const int kW) { // Matrix index int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; // vH, vW stands for valid H and valid W const int vH = H - kH + 1; const int vW = W - kW + 1; if (x >= vH || y >= vW) return; x += kH - 1; y += kW - 1; float sum = 0; for (int i = 0; i < kW; ++i) for(int j = 0; j < kH; ++j) sum += kernel[ i * kH + j ] * data[ (y - i) * H + (x - j) ]; x -= kH - 1; y -= kW - 1; output[ y * vH + x ] = sum; } __global__ void convn_same_kernel(float *output, float *data, float *kernel, const int H, const int W, const int kH, const int kW) { // Matrix index const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= H || y >= W) return; const int i0 = kW / 2, j0 = kH / 2; float sum = 0; for (int i = 0; i < kW; ++i) { for(int j = 0; j < kH; ++j) { int ii = y - i + i0; int jj = x - j + j0; if ( ii < 0 || ii >= W || jj < 0 || jj >= H ) continue; sum += kernel[ i * kH + j ] * data[ ii * H + jj ]; } } output[y * H + x] = sum; } template <bool rot180data, bool rot180kernel> __global__ void convn_full_kernel_with_shm( float *output, const int fH, const int fW, float *data, const int H, const int W, float *kernel, const int kH, const int kW, const int output_step, const int data_step, const int kernel_step) { __CUDA_CONSTANTS__; output += blockIdx.z * output_step; data += blockIdx.z * data_step; kernel += blockIdx.z * kernel_step; extern __shared__ float K[]; // Copy kernel in global memory to shared memory load_kernel_into_shm<rot180kernel>(K, kernel, kH, kW, tid, nThreads); // Copy data in global memory to shared memory float* D = K + kW * kH; const int HEIGHT_STEP = blockDim.x + kH - 1; const int WIDTH_STEP = blockDim.y + kW - 1; const int nTotal = WIDTH_STEP * HEIGHT_STEP; x0 -= (kH - 1); y0 -= (kW - 1); for (; tid<nTotal; tid += nThreads) { int xx = x0 + tid % HEIGHT_STEP; int yy = y0 + tid / HEIGHT_STEP; if (yy < 0 || yy >= W || xx < 0 || xx >= H) D[tid] = 0; else { if (rot180data) D[tid] = data[ (W - 1 - yy) * H + (H - 1 - xx) ]; else D[tid] = data[ yy * H + xx ]; } } __syncthreads(); if (x >= fH || y >= fW) return; float sum = 0; D += ty * HEIGHT_STEP + tx; for (int i = 0; i < kW; ++i) for(int j = 0; j < kH; ++j) sum += K[ i * kH + j ] * D[ i * HEIGHT_STEP + j]; output[ y * fH + x ] += sum; } __global__ void convn_full_kernel(float *output, float *data, float *kernel, int H, int W, int kH, int kW) { // Matrix index int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; // fH, fW stands for full H and full W const int fH = H + kH - 1; const int fW = W + kW - 1; if (x >= fH || y >= fW) return; float sum = 0; for (int i = 0; i < kW; ++i) { for(int j = 0; j < kH; ++j) { int ii = y - i; int jj = x - j; if ( ii < 0 || ii >= W || jj < 0 || jj >= H ) continue; sum += kernel[ i * kH + j ] * data[ ii * H + jj ]; } } output[ y * fH + x ] = sum; } SIZE get_convn_size(SIZE data, SIZE kernel, ConvType type) { switch (type) { case SAME: case SAME_SHM: return data; case VALID: case VALID_SHM: return max(data - kernel + 1, SIZE(0, 0)); case FULL: case FULL_SHM: return data + kernel - 1; default: throw runtime_error(RED_ERROR + "Unknown type of convolution."); }; } SIZE get_convn_size(const mat& data, const mat& kernel, ConvType type) { SIZE dSize(data.getRows(), data.getCols()); SIZE kSize(kernel.getRows(), kernel.getCols()); return get_convn_size(dSize, kSize, type); } template <typename T> size_t getShmSizeNeeded(const dim3 &threads, SIZE kernel) { return ( kernel.area() + (threads.x + kernel.height - 1) * (threads.y + kernel.width - 1) ) * sizeof(T); } size_t getSuitableShmSize(dim3 &grids, dim3 &threads, SIZE kernel) { size_t shm_size = getShmSizeNeeded<float>(threads, kernel); const size_t kMaxSharedMemorySize = 48 * 1024; // 48 KB const size_t kMinSharedMemorySize = 16 * 1024; // 16 KB while ( shm_size > kMaxSharedMemorySize && threads.x * threads.y >= 32 ) { if ( threads.x >= threads.y ) { threads.x /= 2; grids.x *= 2; } else { threads.y /= 2; grids.y *= 2; } shm_size = getShmSizeNeeded<float>(threads, kernel); } if (shm_size > kMaxSharedMemorySize ) { char buf[512]; sprintf(buf, "Exceeds maximum shared memory available. (%d bytes)\n" "kernel = (%d, %d), grids = (%u, %u, %u), threads = (%u, %u, %u) " " => %lu bytes of shared memory needed.", kMaxSharedMemorySize, kernel.height, kernel.width, grids.x, grids.y, grids.z, threads.x, threads.y, threads.z, shm_size); throw runtime_error(RED_ERROR + to_string(buf)); } // Choose one of the configurations: // (1) 48 KB register (L1) + 16 KB Shared Memory // (2) 16 KB register (L1) + 48 KB Shared Memory if (shm_size < kMinSharedMemorySize) cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); else cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); return shm_size; } mat convn(const mat& data, const mat& kernel, ConvType type) { int H = data.getRows(), W = data.getCols(), kH = kernel.getRows(), kW = kernel.getCols(); SIZE s = get_convn_size(data, kernel, type); mat output(s.height, s.width); ALLOCATE_GRIDS_AND_THREADS(s.height, s.width); switch (type) { case SAME: convn_same_kernel<<< grids, threads >>>( output.getData(), data.getData(), kernel.getData(), H, W, kH, kW); break; case SAME_SHM: // TODO break; case VALID: convn_valid_kernel<<< grids, threads >>>( output.getData(), data.getData(), kernel.getData(), H, W, kH, kW); break; case VALID_SHM: // TODO break; case FULL: convn_full_kernel<<< grids, threads >>>( output.getData(), data.getData(), kernel.getData(), H, W, kH, kW); break; case FULL_SHM: // TODO break; default: throw runtime_error(RED_ERROR + "Unknown convolution type"); } CCE(cudaPeekAtLastError()); CCE(cudaDeviceSynchronize()); return output; } __global__ void downsample_kernel(float *dst, float *src, const uint8_t scale, const uint8_t H, const uint8_t W) { // Matrix index uint16_t x = blockIdx.x*blockDim.x + threadIdx.x; uint16_t y = blockIdx.y*blockDim.y + threadIdx.y; const uint8_t h = H / scale; const uint8_t w = W / scale; if ( x >= h || y >= w ) return; dst += blockIdx.z * h * w + (y * h + x); x *= scale; y *= scale; float sum = 0; src += blockIdx.z * H * W + (y * H + x); for (uint8_t i=0; i<scale; ++i) { for (uint8_t j=0; j<scale; ++j) { if ( y + i < W && x + j < H ) sum += src[i * H + j]; } } *dst = sum / (scale * scale); } __global__ void upsample_kernel(float *dst, float *src, const uint8_t h, const uint8_t w, const uint8_t H, const uint8_t W) { // Matrix index const uint16_t x = blockIdx.x*blockDim.x + threadIdx.x; const uint16_t y = blockIdx.y*blockDim.y + threadIdx.y; src += blockIdx.z * h * w; dst += blockIdx.z * H * W; uint8_t scale = H / h; if (x >= H || y >= W ) return; uint8_t sx = x / scale; uint8_t sy = y / scale; if (sx == h) --sx; if (sy == w) --sy; dst[y * H + x] = src[sy * h + sx]; } mat downsample(const mat& x, size_t scale, SIZE size) { int batch_size = x.getCols(); SIZE output_size = size / scale; if ( x.getRows() != size.height * size.width ) throw runtime_error(RED_ERROR + DEBUG_STR(x.getRows()) + DEBUG_STR(size.height) + DEBUG_STR(size.width)); mat output(output_size.area(), batch_size); ALLOCATE_GRIDS_AND_THREADS(output_size.height, output_size.width); grids.z = batch_size; downsample_kernel<<<grids, threads>>>( output.getData(), x.getData(), scale, output_size.height, output_size.width); CCE(cudaDeviceSynchronize()); return output; } mat upsample(const mat& x, SIZE s, SIZE img) { int batch_size = x.getCols(); int H = s.height, W = s.width, h = img.height, w = img.width; if ( x.getRows() != img.height * img.width ) throw runtime_error(RED_ERROR + DEBUG_STR(x.getRows()) + DEBUG_STR(img.height) + DEBUG_STR(img.width)); mat output(H * W, batch_size); ALLOCATE_GRIDS_AND_THREADS(H, W); grids.z = batch_size; upsample_kernel<<<grids, threads>>>( output.getData(), x.getData(), h, w, H, W); CCE(cudaDeviceSynchronize()); return output; } /*! * Implementation of ConvolutionalLayer goes here. (GPU part only) * * */ void ConvolutionalLayer::update_bias(const mat& delta) { vector<mat> deltas = versplit(delta, getNumOutputMaps(), get_output_img_size().area()); for (size_t j=0; j<getNumOutputMaps(); ++j) _bias[j] -= sum_all(deltas[j]); } void ConvolutionalLayer::update_kernel(const mat& fin, const mat& delta) { size_t batch_size = fin.getCols(); size_t nInputs = getNumInputMaps(); size_t nOutputs = getNumOutputMaps(); SIZE kernel = this->get_kernel_size(); SIZE img_in = this->get_input_img_size(); SIZE img_out = this->get_output_img_size(); // Update kernels with learning rate vector<mat> Z(nInputs, mat(kernel.area(), nOutputs, 0)); ALLOCATE_GRIDS_AND_THREADS(kernel.height, kernel.width); grids.z = nOutputs; size_t shm_size = getSuitableShmSize(grids, threads, img_out); // printf("grids = (%lu, %lu, %lu), threads = (%lu, %lu, %lu) \n", grids.x, grids.y, grids.z, threads.x, threads.y, threads.z); for (size_t i=0; i<nInputs; ++i) { for (size_t b=0; b<batch_size; ++b) { convn_valid_kernel_with_shm<true, false><<< grids, threads, shm_size, 0 >>>( Z[i].getData(), kernel.height, kernel.width, fin.getData() + i * img_in.area() + b * fin.getRows(), img_in.height, img_in.width, delta.getData() + b * delta.getRows(), img_out.height, img_out.width, kernel.area(), 0, img_out.area()); } } CCE(cudaPeekAtLastError()); for (size_t i=0; i<nInputs; ++i) _kernels[i] -= reshapeVectors2Images(Z[i], this->get_kernel_size()); } /*! * Implementation of ConvolutionalLayer goes here. (GPU part only) * * */ void ConvolutionalLayer::feedForward(mat& fout, const mat& fin) { size_t nInputs = getNumInputMaps(), nOutputs = getNumOutputMaps(); size_t batch_size = fin.getCols(); SIZE kernel = this->get_kernel_size(); SIZE img_in = this->get_input_img_size(); SIZE img_out = this->get_output_img_size(); // Map _bias[i] to bias, and then to fout // ______________________ // / % %% ... % // / % %% ... % // / % %% ... % 1st feature map // / % %% ... % // / % %% ... % // / ______________________ // / # ## ... # // % # ## ... # // _bias = # ----- # => fout = ## ... # 2nd feature map // @ # ## ... # // \ # ## ... # // \ ______________________ // \ @ @@ ... @ // \ @ @@ ... @ // \ @ @@ ... @ 3rd feature map // \ @ @@ ... @ // \ @ @@ ... @ // ______________________ // hmat bias(img_out.area() * nOutputs + 1, 1); for (size_t j=0; j<nOutputs; ++j) { for (size_t a=0; a<img_out.area(); ++a) bias[j*img_out.area() + a] = _bias[j]; } fout = mat(bias) * mat(1, batch_size, 1.0f); ALLOCATE_GRIDS_AND_THREADS(img_out.height, img_out.width); grids.z = batch_size; size_t shm_size = getSuitableShmSize(grids, threads, kernel); for (size_t j=0; j<nOutputs; ++j) { for (size_t i=0; i<nInputs; ++i) { convn_valid_kernel_with_shm<false, false><<< grids, threads, shm_size, 0 >>>( fout.getData() + j * img_out.area(), img_out.height, img_out.width, fin.getData() + i * img_in.area(), img_in.height, img_in.width, _kernels[i][j].getData(), kernel.height, kernel.width, fout.getRows(), fin.getRows(), 0); } } CCE(cudaPeekAtLastError()); } void ConvolutionalLayer::feedBackward(mat& error, const mat& delta) { size_t nInputs = getNumInputMaps(), nOutputs = getNumOutputMaps(); size_t batch_size = delta.getCols(); SIZE kernel = this->get_kernel_size(); SIZE img_in = this->get_input_img_size(); SIZE img_out = this->get_output_img_size(); error.resize(img_in.area() * nInputs + 1, batch_size, 0.); ALLOCATE_GRIDS_AND_THREADS(img_in.height, img_in.width); grids.z = batch_size; size_t shm_size = getSuitableShmSize(grids, threads, kernel); for (size_t i=0; i<nInputs; ++i) { for (size_t j=0; j<nOutputs; ++j) { convn_full_kernel_with_shm<false, true><<< grids, threads, shm_size, 0 >>>( error.getData() + i * img_in.area(), img_in.height, img_in.width, delta.getData() + j * img_out.area(), img_out.height, img_out.width, _kernels[i][j].getData(), kernel.height, kernel.width, error.getRows(), delta.getRows(), 0); } } } /*! * Implementation of SubSamplingLayer goes here. (GPU part only) * * */ void SubSamplingLayer::feedForward(mat& fout, const mat& fin) { SIZE img_in = this->get_input_img_size(); SIZE img_out = this->get_output_img_size(); ALLOCATE_GRIDS_AND_THREADS(img_out.height, img_out.width); grids.z = getNumOutputMaps(); fout.resize(img_out.area() * getNumOutputMaps() + 1, fin.getCols()); for (size_t i=0; i<fin.getCols(); ++i) { downsample_kernel<<<grids, threads>>>( fout.getData() + i * fout.getRows(), fin.getData() + i * fin.getRows(), _scale, img_in.height, img_in.width); } CCE(cudaDeviceSynchronize()); } void SubSamplingLayer::feedBackward(mat& error, const mat& delta) { assert(&delta != &error); SIZE img_in = this->get_input_img_size(); SIZE img_out = this->get_output_img_size(); ALLOCATE_GRIDS_AND_THREADS(img_in.height, img_in.width); grids.z = getNumInputMaps(); error.resize(img_in.area() * getNumInputMaps() + 1, delta.getCols()); for (size_t i=0; i<delta.getCols(); ++i) { upsample_kernel<<<grids, threads>>>( error.getData() + i * error.getRows(), delta.getData() + i * delta.getRows(), img_out.height, img_out.width, img_in.height, img_in.width); } error *= 1.0f / (_scale * _scale); CCE(cudaDeviceSynchronize()); }
the_stack
* \file * cub::AgentSegmentFixup implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduce-value-by-key. */ #pragma once #include <iterator> #include "single_pass_scan_operators.cuh" #include "../block/block_load.cuh" #include "../block/block_store.cuh" #include "../block/block_scan.cuh" #include "../block/block_discontinuity.cuh" #include "../iterator/cache_modified_input_iterator.cuh" #include "../iterator/constant_input_iterator.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Tuning policy types ******************************************************************************/ /** * Parameterizable tuning policy type for AgentSegmentFixup */ template < int _BLOCK_THREADS, ///< Threads per thread block int _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) BlockLoadAlgorithm _LOAD_ALGORITHM, ///< The BlockLoad algorithm to use CacheLoadModifier _LOAD_MODIFIER, ///< Cache load modifier for reading input elements BlockScanAlgorithm _SCAN_ALGORITHM> ///< The BlockScan algorithm to use struct AgentSegmentFixupPolicy { enum { BLOCK_THREADS = _BLOCK_THREADS, ///< Threads per thread block ITEMS_PER_THREAD = _ITEMS_PER_THREAD, ///< Items per thread (per tile of input) }; static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; ///< The BlockLoad algorithm to use static const CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; ///< Cache load modifier for reading input elements static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; ///< The BlockScan algorithm to use }; /****************************************************************************** * Thread block abstractions ******************************************************************************/ /** * \brief AgentSegmentFixup implements a stateful abstraction of CUDA thread blocks for participating in device-wide reduce-value-by-key */ template < typename AgentSegmentFixupPolicyT, ///< Parameterized AgentSegmentFixupPolicy tuning policy type typename PairsInputIteratorT, ///< Random-access input iterator type for keys typename AggregatesOutputIteratorT, ///< Random-access output iterator type for values typename EqualityOpT, ///< KeyT equality operator type typename ReductionOpT, ///< ValueT reduction operator type typename OffsetT> ///< Signed integer type for global offsets struct AgentSegmentFixup { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- // Data type of key-value input iterator typedef typename std::iterator_traits<PairsInputIteratorT>::value_type KeyValuePairT; // Value type typedef typename KeyValuePairT::Value ValueT; // Tile status descriptor interface type typedef ReduceByKeyScanTileState<ValueT, OffsetT> ScanTileStateT; // Constants enum { BLOCK_THREADS = AgentSegmentFixupPolicyT::BLOCK_THREADS, ITEMS_PER_THREAD = AgentSegmentFixupPolicyT::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, // Whether or not do fixup using RLE + global atomics USE_ATOMIC_FIXUP = (CUB_PTX_ARCH >= 350) && (Equals<ValueT, float>::VALUE || Equals<ValueT, int>::VALUE || Equals<ValueT, unsigned int>::VALUE || Equals<ValueT, unsigned long long>::VALUE), // Whether or not the scan operation has a zero-valued identity value (true if we're performing addition on a primitive type) HAS_IDENTITY_ZERO = (Equals<ReductionOpT, cub::Sum>::VALUE) && (Traits<ValueT>::PRIMITIVE), }; // Cache-modified Input iterator wrapper type (for applying cache modifier) for keys typedef typename If<IsPointer<PairsInputIteratorT>::VALUE, CacheModifiedInputIterator<AgentSegmentFixupPolicyT::LOAD_MODIFIER, KeyValuePairT, OffsetT>, // Wrap the native input pointer with CacheModifiedValuesInputIterator PairsInputIteratorT>::Type // Directly use the supplied input iterator type WrappedPairsInputIteratorT; // Cache-modified Input iterator wrapper type (for applying cache modifier) for fixup values typedef typename If<IsPointer<AggregatesOutputIteratorT>::VALUE, CacheModifiedInputIterator<AgentSegmentFixupPolicyT::LOAD_MODIFIER, ValueT, OffsetT>, // Wrap the native input pointer with CacheModifiedValuesInputIterator AggregatesOutputIteratorT>::Type // Directly use the supplied input iterator type WrappedFixupInputIteratorT; // Reduce-value-by-segment scan operator typedef ReduceByKeyOp<cub::Sum> ReduceBySegmentOpT; // Parameterized BlockLoad type for pairs typedef BlockLoad< KeyValuePairT, BLOCK_THREADS, ITEMS_PER_THREAD, AgentSegmentFixupPolicyT::LOAD_ALGORITHM> BlockLoadPairs; // Parameterized BlockScan type typedef BlockScan< KeyValuePairT, BLOCK_THREADS, AgentSegmentFixupPolicyT::SCAN_ALGORITHM> BlockScanT; // Callback type for obtaining tile prefix during block scan typedef TilePrefixCallbackOp< KeyValuePairT, ReduceBySegmentOpT, ScanTileStateT> TilePrefixCallbackOpT; // Shared memory type for this threadblock union _TempStorage { struct { typename BlockScanT::TempStorage scan; // Smem needed for tile scanning typename TilePrefixCallbackOpT::TempStorage prefix; // Smem needed for cooperative prefix callback }; // Smem needed for loading keys typename BlockLoadPairs::TempStorage load_pairs; }; // Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; //--------------------------------------------------------------------- // Per-thread fields //--------------------------------------------------------------------- _TempStorage& temp_storage; ///< Reference to temp_storage WrappedPairsInputIteratorT d_pairs_in; ///< Input keys AggregatesOutputIteratorT d_aggregates_out; ///< Output value aggregates WrappedFixupInputIteratorT d_fixup_in; ///< Fixup input values InequalityWrapper<EqualityOpT> inequality_op; ///< KeyT inequality operator ReductionOpT reduction_op; ///< Reduction operator ReduceBySegmentOpT scan_op; ///< Reduce-by-segment scan operator //--------------------------------------------------------------------- // Constructor //--------------------------------------------------------------------- // Constructor __device__ __forceinline__ AgentSegmentFixup( TempStorage& temp_storage, ///< Reference to temp_storage PairsInputIteratorT d_pairs_in, ///< Input keys AggregatesOutputIteratorT d_aggregates_out, ///< Output value aggregates EqualityOpT equality_op, ///< KeyT equality operator ReductionOpT reduction_op) ///< ValueT reduction operator : temp_storage(temp_storage.Alias()), d_pairs_in(d_pairs_in), d_aggregates_out(d_aggregates_out), d_fixup_in(d_aggregates_out), inequality_op(equality_op), reduction_op(reduction_op), scan_op(reduction_op) {} //--------------------------------------------------------------------- // Cooperatively scan a device-wide sequence of tiles with other CTAs //--------------------------------------------------------------------- /** * Process input tile. Specialized for atomic-fixup */ template <bool IS_LAST_TILE> __device__ __forceinline__ void ConsumeTile( OffsetT num_remaining, ///< Number of global input items remaining (including this tile) int tile_idx, ///< Tile index OffsetT tile_offset, ///< Tile offset ScanTileStateT& tile_state, ///< Global tile state descriptor Int2Type<true> use_atomic_fixup) ///< Marker whether to use atomicAdd (instead of reduce-by-key) { KeyValuePairT pairs[ITEMS_PER_THREAD]; // Load pairs KeyValuePairT oob_pair; oob_pair.key = -1; if (IS_LAST_TILE) BlockLoadPairs(temp_storage.load_pairs).Load(d_pairs_in + tile_offset, pairs, num_remaining, oob_pair); else BlockLoadPairs(temp_storage.load_pairs).Load(d_pairs_in + tile_offset, pairs); // RLE #pragma unroll for (int ITEM = 1; ITEM < ITEMS_PER_THREAD; ++ITEM) { ValueT* d_scatter = d_aggregates_out + pairs[ITEM - 1].key; if (pairs[ITEM].key != pairs[ITEM - 1].key) atomicAdd(d_scatter, pairs[ITEM - 1].value); else pairs[ITEM].value = reduction_op(pairs[ITEM - 1].value, pairs[ITEM].value); } // Flush last item if valid ValueT* d_scatter = d_aggregates_out + pairs[ITEMS_PER_THREAD - 1].key; if ((!IS_LAST_TILE) || (pairs[ITEMS_PER_THREAD - 1].key >= 0)) atomicAdd(d_scatter, pairs[ITEMS_PER_THREAD - 1].value); } /** * Process input tile. Specialized for reduce-by-key fixup */ template <bool IS_LAST_TILE> __device__ __forceinline__ void ConsumeTile( OffsetT num_remaining, ///< Number of global input items remaining (including this tile) int tile_idx, ///< Tile index OffsetT tile_offset, ///< Tile offset ScanTileStateT& tile_state, ///< Global tile state descriptor Int2Type<false> use_atomic_fixup) ///< Marker whether to use atomicAdd (instead of reduce-by-key) { KeyValuePairT pairs[ITEMS_PER_THREAD]; KeyValuePairT scatter_pairs[ITEMS_PER_THREAD]; // Load pairs KeyValuePairT oob_pair; oob_pair.key = -1; if (IS_LAST_TILE) BlockLoadPairs(temp_storage.load_pairs).Load(d_pairs_in + tile_offset, pairs, num_remaining, oob_pair); else BlockLoadPairs(temp_storage.load_pairs).Load(d_pairs_in + tile_offset, pairs); CTA_SYNC(); KeyValuePairT tile_aggregate; if (tile_idx == 0) { // Exclusive scan of values and segment_flags BlockScanT(temp_storage.scan).ExclusiveScan(pairs, scatter_pairs, scan_op, tile_aggregate); // Update tile status if this is not the last tile if (threadIdx.x == 0) { // Set first segment id to not trigger a flush (invalid from exclusive scan) scatter_pairs[0].key = pairs[0].key; if (!IS_LAST_TILE) tile_state.SetInclusive(0, tile_aggregate); } } else { // Exclusive scan of values and segment_flags TilePrefixCallbackOpT prefix_op(tile_state, temp_storage.prefix, scan_op, tile_idx); BlockScanT(temp_storage.scan).ExclusiveScan(pairs, scatter_pairs, scan_op, prefix_op); tile_aggregate = prefix_op.GetBlockAggregate(); } // Scatter updated values #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { if (scatter_pairs[ITEM].key != pairs[ITEM].key) { // Update the value at the key location ValueT value = d_fixup_in[scatter_pairs[ITEM].key]; value = reduction_op(value, scatter_pairs[ITEM].value); d_aggregates_out[scatter_pairs[ITEM].key] = value; } } // Finalize the last item if (IS_LAST_TILE) { // Last thread will output final count and last item, if necessary if (threadIdx.x == BLOCK_THREADS - 1) { // If the last tile is a whole tile, the inclusive prefix contains accumulated value reduction for the last segment if (num_remaining == TILE_ITEMS) { // Update the value at the key location OffsetT last_key = pairs[ITEMS_PER_THREAD - 1].key; d_aggregates_out[last_key] = reduction_op(tile_aggregate.value, d_fixup_in[last_key]); } } } } /** * Scan tiles of items as part of a dynamic chained scan */ __device__ __forceinline__ void ConsumeRange( int num_items, ///< Total number of input items int num_tiles, ///< Total number of input tiles ScanTileStateT& tile_state) ///< Global tile state descriptor { // Blocks are launched in increasing order, so just assign one tile per block int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; // Current tile index OffsetT tile_offset = tile_idx * TILE_ITEMS; // Global offset for the current tile OffsetT num_remaining = num_items - tile_offset; // Remaining items (including this tile) if (num_remaining > TILE_ITEMS) { // Not the last tile (full) ConsumeTile<false>(num_remaining, tile_idx, tile_offset, tile_state, Int2Type<USE_ATOMIC_FIXUP>()); } else if (num_remaining > 0) { // The last tile (possibly partially-full) ConsumeTile<true>(num_remaining, tile_idx, tile_offset, tile_state, Int2Type<USE_ATOMIC_FIXUP>()); } } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
#pragma once #include <gunrock/util/io/modified_store.cuh> #include <gunrock/util/operators.cuh> namespace gunrock { namespace util { namespace io { /** * Scatter a tile of data items using the corresponding tile of scatter_offsets */ template <int LOG_LOADS_PER_TILE, // Number of vector loads (log) int LOG_LOAD_VEC_SIZE, // Number of items per vector load (log) int ACTIVE_THREADS, // Active threads that will be loading st::CacheModifier CACHE_MODIFIER> // Cache modifier (e.g., // CA/CG/CS/NONE/etc.) struct ScatterTile { enum { LOADS_PER_TILE = 1 << LOG_LOADS_PER_TILE, LOAD_VEC_SIZE = 1 << LOG_LOAD_VEC_SIZE, LOG_ELEMENTS_PER_THREAD = LOG_LOADS_PER_TILE + LOG_LOAD_VEC_SIZE, ELEMENTS_PER_THREAD = 1 << LOG_ELEMENTS_PER_THREAD, TILE_SIZE = ACTIVE_THREADS * ELEMENTS_PER_THREAD, }; //--------------------------------------------------------------------- // Helper Structures //--------------------------------------------------------------------- // Iterate next vector item template <int LOAD, int VEC, int dummy = 0> struct Iterate { // Unguarded template <typename T, void Transform(T &), typename SizeT> static __device__ __forceinline__ void Invoke( T *dest, T data[][LOAD_VEC_SIZE], SizeT scatter_offsets[][LOAD_VEC_SIZE]) { Transform(data[LOAD][VEC]); ModifiedStore<CACHE_MODIFIER>::St(data[LOAD][VEC], dest + scatter_offsets[LOAD][VEC]); Iterate<LOAD, VEC + 1>::template Invoke<T, Transform>(dest, data, scatter_offsets); } // Guarded by flags template <typename T, void Transform(T &), typename Flag, typename SizeT> static __device__ __forceinline__ void Invoke( T *dest, T data[][LOAD_VEC_SIZE], SizeT scatter_offsets[][LOAD_VEC_SIZE], Flag valid_flags[][LOAD_VEC_SIZE]) { if (valid_flags[LOAD][VEC]) { Transform(data[LOAD][VEC]); ModifiedStore<CACHE_MODIFIER>::St(data[LOAD][VEC], dest + scatter_offsets[LOAD][VEC]); } Iterate<LOAD, VEC + 1>::template Invoke<T, Transform>( dest, data, scatter_offsets, valid_flags); } // Guarded by partial tile size template <typename T, void Transform(T &), typename SizeT> static __device__ __forceinline__ void Invoke( T *dest, T data[][LOAD_VEC_SIZE], SizeT scatter_offsets[][LOAD_VEC_SIZE], const SizeT &partial_tile_size) { int tile_rank = (threadIdx.x << LOG_LOAD_VEC_SIZE) + (LOAD * ACTIVE_THREADS * LOAD_VEC_SIZE) + VEC; if (tile_rank < partial_tile_size) { Transform(data[LOAD][VEC]); ModifiedStore<CACHE_MODIFIER>::St(data[LOAD][VEC], dest + scatter_offsets[LOAD][VEC]); } Iterate<LOAD, VEC + 1>::template Invoke<T, Transform>( dest, data, scatter_offsets, partial_tile_size); } // Guarded by flags and partial tile size template <typename T, void Transform(T &), typename Flag, typename SizeT> static __device__ __forceinline__ void Invoke( T *dest, T data[][LOAD_VEC_SIZE], SizeT scatter_offsets[][LOAD_VEC_SIZE], Flag valid_flags[][LOAD_VEC_SIZE], const SizeT &partial_tile_size) { int tile_rank = (threadIdx.x << LOG_LOAD_VEC_SIZE) + (LOAD * ACTIVE_THREADS * LOAD_VEC_SIZE) + VEC; if (valid_flags[LOAD][VEC] && (tile_rank < partial_tile_size)) { Transform(data[LOAD][VEC]); ModifiedStore<CACHE_MODIFIER>::St(data[LOAD][VEC], dest + scatter_offsets[LOAD][VEC]); } Iterate<LOAD, VEC + 1>::template Invoke<T, Transform>( dest, data, scatter_offsets, valid_flags, partial_tile_size); } }; // Next Load template <int LOAD, int dummy> struct Iterate<LOAD, LOAD_VEC_SIZE, dummy> { // Unguarded template <typename T, void Transform(T &), typename SizeT> static __device__ __forceinline__ void Invoke( T *dest, T data[][LOAD_VEC_SIZE], SizeT scatter_offsets[][LOAD_VEC_SIZE]) { Iterate<LOAD + 1, 0>::template Invoke<T, Transform>(dest, data, scatter_offsets); } // Guarded by flags template <typename T, void Transform(T &), typename Flag, typename SizeT> static __device__ __forceinline__ void Invoke( T *dest, T data[][LOAD_VEC_SIZE], SizeT scatter_offsets[][LOAD_VEC_SIZE], Flag valid_flags[][LOAD_VEC_SIZE]) { Iterate<LOAD + 1, 0>::template Invoke<T, Transform>( dest, data, scatter_offsets, valid_flags); } // Guarded by partial tile size template <typename T, void Transform(T &), typename SizeT> static __device__ __forceinline__ void Invoke( T *dest, T data[][LOAD_VEC_SIZE], SizeT scatter_offsets[][LOAD_VEC_SIZE], const SizeT &partial_tile_size) { Iterate<LOAD + 1, 0>::template Invoke<T, Transform>( dest, data, scatter_offsets, partial_tile_size); } // Guarded by flags and partial tile size template <typename T, void Transform(T &), typename Flag, typename SizeT> static __device__ __forceinline__ void Invoke( T *dest, T data[][LOAD_VEC_SIZE], SizeT scatter_offsets[][LOAD_VEC_SIZE], Flag valid_flags[][LOAD_VEC_SIZE], const SizeT &partial_tile_size) { Iterate<LOAD + 1, 0>::template Invoke<T, Transform>( dest, data, scatter_offsets, valid_flags, partial_tile_size); } }; // Terminate template <int dummy> struct Iterate<LOADS_PER_TILE, 0, dummy> { // Unguarded template <typename T, void Transform(T &), typename SizeT> static __device__ __forceinline__ void Invoke( T *dest, T data[][LOAD_VEC_SIZE], SizeT scatter_offsets[][LOAD_VEC_SIZE]) {} // Guarded by flags template <typename T, void Transform(T &), typename Flag, typename SizeT> static __device__ __forceinline__ void Invoke( T *dest, T data[][LOAD_VEC_SIZE], SizeT scatter_offsets[][LOAD_VEC_SIZE], Flag valid_flags[][LOAD_VEC_SIZE]) {} // Guarded by partial tile size template <typename T, void Transform(T &), typename SizeT> static __device__ __forceinline__ void Invoke( T *dest, T data[][LOAD_VEC_SIZE], SizeT scatter_offsets[][LOAD_VEC_SIZE], const SizeT &partial_tile_size) {} // Guarded by flags and partial tile size template <typename T, void Transform(T &), typename Flag, typename SizeT> static __device__ __forceinline__ void Invoke( T *dest, T data[][LOAD_VEC_SIZE], SizeT scatter_offsets[][LOAD_VEC_SIZE], Flag valid_flags[][LOAD_VEC_SIZE], const SizeT &partial_tile_size) {} }; //--------------------------------------------------------------------- // Interface //--------------------------------------------------------------------- /* * Scatter to destination with transform. The write is * predicated on the element's index in * the tile is not exceeding the partial tile size */ template <typename T, void Transform(T &), // Assignment function to transform the stored // value typename SizeT> static __device__ __forceinline__ void Scatter( T *dest, T data[][LOAD_VEC_SIZE], SizeT scatter_offsets[][LOAD_VEC_SIZE], const SizeT &partial_tile_size = TILE_SIZE) { if (partial_tile_size < TILE_SIZE) { // guarded IO Iterate<0, 0>::template Invoke<T, Transform>(dest, data, scatter_offsets, partial_tile_size); } else { // unguarded IO Iterate<0, 0>::template Invoke<T, Transform>(dest, data, scatter_offsets); } } /* * Scatter to destination. The write is predicated on the element's index in * the tile is not exceeding the partial tile size */ template <typename T, typename SizeT> static __device__ __forceinline__ void Scatter( T *dest, T data[][LOAD_VEC_SIZE], SizeT scatter_offsets[][LOAD_VEC_SIZE], const SizeT &partial_tile_size = TILE_SIZE) { Scatter<T, Operators<T>::NopTransform>(dest, data, scatter_offsets, partial_tile_size); } /* * Scatter to destination with transform. The write is * predicated on valid flags and that the element's index in * the tile is not exceeding the partial tile size */ template <typename T, void Transform(T &), // Assignment function to transform the stored // value typename Flag, typename SizeT> static __device__ __forceinline__ void Scatter( T *dest, T data[][LOAD_VEC_SIZE], Flag flags[][LOAD_VEC_SIZE], SizeT scatter_offsets[][LOAD_VEC_SIZE], const SizeT &partial_tile_size = TILE_SIZE) { if (partial_tile_size < TILE_SIZE) { // guarded by flags and partial tile size Iterate<0, 0>::template Invoke<T, Transform>(dest, data, scatter_offsets, flags, partial_tile_size); } else { // guarded by flags Iterate<0, 0>::template Invoke<T, Transform>(dest, data, scatter_offsets, flags); } } /* * Scatter to destination. The write is * predicated on valid flags and that the element's index in * the tile is not exceeding the partial tile size */ template <typename T, typename Flag, typename SizeT> static __device__ __forceinline__ void Scatter( T *dest, T data[][LOAD_VEC_SIZE], Flag flags[][LOAD_VEC_SIZE], SizeT scatter_offsets[][LOAD_VEC_SIZE], const SizeT &partial_tile_size = TILE_SIZE) { Scatter<T, Operators<T>::NopTransform>(dest, data, flags, scatter_offsets, partial_tile_size); } }; } // namespace io } // namespace util } // namespace gunrock
the_stack
#include "error_correct.h" #include "utils.h" #include <nvbio/basic/pipeline_context.h> #include <nvbio/basic/numbers.h> #include <nvbio/basic/atomics.h> #include <nvbio/basic/bloom_filter.h> #include <nvbio/basic/primitives.h> #include <nvbio/basic/console.h> #include <nvbio/basic/timer.h> #include <nvbio/basic/threads.h> #include <nvbio/basic/system.h> #include <nvbio/basic/exceptions.h> #include <nvbio/basic/cuda/arch.h> #include <nvbio/basic/cuda/ldg.h> #include <nvbio/io/sequence/sequence.h> #include <stdio.h> #include <stdlib.h> using namespace nvbio; /// /// A functor to sample kmers and insert them in a Bloom filter /// template <typename string_set_type, typename qual_set_type, typename trusted_filter_type> struct ErrorCorrectFunctor { /// constructor /// ///\param _k kmer length ///\param _alpha the sampling frequency ///\param _string_set the input string set to sample ///\param _filter the kmer Bloom filter /// NVBIO_HOST_DEVICE ErrorCorrectFunctor( const uint32 _k, string_set_type _string_set, qual_set_type _qual_set, const trusted_filter_type _trusted_filter, uint64* _stats, const float _max_correction, const uint8 _bad_quality, const uint8 _new_quality) : K(int(_k)), kmask( (uint64(1u) << (K*2))-1u ), string_set( _string_set ), qual_set( _qual_set ), trusted_kmers(_trusted_filter), stats(_stats), max_correction(_max_correction), bad_quality(_bad_quality), new_quality(_new_quality) {} /// is trusted /// NVBIO_FORCEINLINE NVBIO_HOST_DEVICE bool is_trusted(const KmerCode& kmer) const { return kmer.is_valid() && trusted_kmers[ kmer.code ]; } /// build the stored kmers list /// template <typename string_type> NVBIO_HOST_DEVICE void mark_solid_kmers(const int read_len, const string_type& read, bool* solid) const { KmerCode kmer( K ); for (int i = 0; i < K-1; ++i) kmer.push_back( read[i] ); for (int i = K-1; i < read_len; ++i) { kmer.push_back( read[i] ); solid[ i - K + 1 ] = is_trusted( kmer ); } } /// find the longest stored kmer /// NVBIO_HOST_DEVICE int2 find_longest_solid_kmer(const int kmer_count, const bool* solid) const { int longest_count = 0, stored_count = 0; int begin = -1; for (int i = 0; i < kmer_count; ++i) { if (solid[i]) ++stored_count; else { if (longest_count < stored_count) { longest_count = stored_count; begin = i - stored_count; } stored_count = 0; } } if (longest_count < stored_count) { longest_count = stored_count; begin = kmer_count - stored_count; } if (longest_count == 0) return make_int2( 0, 0 ); // unreliable read! if (longest_count >= kmer_count) return make_int2( begin, kmer_count ); return make_int2( begin, begin + longest_count ); } /// find the next non-solid position, left to right (i.e. in [from,to), from <= to) /// NVBIO_FORCEINLINE NVBIO_HOST_DEVICE int find_right_nonsolid(const int from, const int to, const bool* solid) const { for (int k = from; k < to; ++k) if (!solid[k]) return k; return to; } /// find the next non-solid position, right to left (i.e. in [to,from], from >= to) /// NVBIO_FORCEINLINE NVBIO_HOST_DEVICE int find_left_nonsolid(const int from, const int to, const bool* solid) const { for (int k = from; k >= to; --k) if (!solid[k]) return k; return to - 1; } /// find the best right change /// template <typename string_type> NVBIO_HOST_DEVICE KmerCode best_right_change(const int read_len, const string_type& read, const int from, const int to, const KmerCode& fixed_kmer, int& best_to, int& best_change, int& best_count) const { KmerCode best_kmer; best_to = -1; best_change = -1; best_count = 0; for (int j = 0; j < 4; ++j) { KmerCode kmer = fixed_kmer; kmer.push_back( j ); if (!is_trusted( kmer )) continue; if (best_to == -1) best_to = from-1; // check how many kmers this change can fix int k; for (k = from; k <= to; ++k) { kmer.push_back( read[k] ); if (!is_trusted( kmer )) break; } // Try to extend 1 position if (k > to && to == read_len - 1) { for (int m = 0; m < K - 1 - (to - from + 1); ++m) { for (int l = 0; l < 4; ++l) { KmerCode tmp_kmer( kmer ); tmp_kmer.push_back( l ); if (is_trusted( tmp_kmer )) { kmer.push_back( l ); ++k; break; } } } } if (k > best_to) { best_count = 1; best_to = k; best_change = j; best_kmer = kmer; } else if (k == best_to) { ++best_count; if (k == from && j == 0) { best_count = 1; best_change = j; best_kmer = kmer; } else if (k == from && best_change == 0) // [jp]: not sure this is correct - it seems it's setting best_count to 1 best_count = 1; // whenever the previous change was an 'A', but not if it was any other letter } } return best_kmer; } template <typename string_type> NVBIO_HOST_DEVICE bool check_right(KmerCode tmp_kmer, const string_type& read, const int pos, const int len) const { for (int t = 0; t < len; ++t) { tmp_kmer.push_back( read[pos + t] ); if (!is_trusted( tmp_kmer )) return false; } return true; } template <typename string_type> NVBIO_HOST_DEVICE bool adjust_right(const int read_len, const string_type& read, KmerCode kmer, const int pos) const { // check whether it is possible to extend by K/2 + 1 bases if (pos + K/2 + 1 >= read_len) return false; for (int c = 0; c < 4; ++c) { if (c == read[pos - 1]) continue; KmerCode tmp_kmer = kmer; tmp_kmer.shift_right( 1 ); tmp_kmer.push_back( c ); if (is_trusted( tmp_kmer )) { // test whether this branch makes sense if (check_right( tmp_kmer, read, pos, K/2 + 1 )) return true; } } return false; } template <typename string_type> NVBIO_HOST_DEVICE void fix_right( const int read_len, const string_type& read, const int2 longest_range, int* fix, const bool* solid, int& trimStart, int& badSuffix, bool& ambiguous) const { const int longest_count = longest_range.y - longest_range.x; const int kmer_count = read_len - K + 1; // from now on, i is the "current" index in the read we are fixing int i = longest_range.y; // scan right KmerCode kmer( K ); if (longest_range.y >= kmer_count) { // the kmers are all correct, force skip the correction. i = read_len + 1; } else { // build the first kmer to fix if (longest_count < K) { for (i = longest_range.y; i < longest_range.y - 1 + K; ++i) kmer.push_back( read[i] ); } else { // adjust the anchor if necessary for (int j = K / 2 - 1; j >= 0; --j) { for (i = longest_range.y - j - 1; i < longest_range.y - j + K - 1; ++i) kmer.push_back( read[i] ); if (adjust_right( read_len, read, kmer, i )) { // adjust the anchor --i; kmer.shift_right( 1 ); break; } } } } for (; i < read_len;) { const int from = i + 1; const int to = (i + K - 1 < read_len) ? i + K - 1 : read_len - 1; int best_to; int best_change; int best_count; // find the best right change const KmerCode tmp_kmer = best_right_change( read_len, read, from, to, kmer, best_to, best_change, best_count ); if (best_to == -1 || (best_count > 1 && (best_to <= to || to - i + 1 < K))) { trimStart = i; break; } if (best_count <= 1) { // unambiguous fix fix[i] = best_change; } else { // ambiguous fix fix[i] = -2; ambiguous = true; } if (best_to >= read_len) break; if (best_to <= to) { // there are multiple errors in the region kmer = tmp_kmer; kmer.shift_right( 1 ); i = best_to; } else { // search for next error. const int k = find_right_nonsolid( to - K + 2, kmer_count, solid ); // [jp] shouldn't it be k = from? [to - K + 2 = (i + K - 1) - K + 2 = from + K - 2 - K + 2 = from] if (k >= kmer_count) break; kmer.restart(); for (i = k; i < k + K - 1; ++i) { if (fix[i] < 0) kmer.push_back( read[i] ); else kmer.push_back( fix[i] ); } } } } /// find the best left change /// template <typename string_type> NVBIO_HOST_DEVICE KmerCode best_left_change(const int read_len, const string_type& read, const int from, const int to, const KmerCode& fixed_kmer, int& best_to, int& best_change, int& best_count) const { KmerCode best_kmer; best_to = read_len + 1; best_change = -1; best_count = 0; for (int j = 0; j < 4; ++j) { KmerCode kmer = fixed_kmer; kmer.push_front( j ); if (!is_trusted( kmer )) continue; if (best_to == read_len + 1) best_to = from + 1; int k; // check how many kmers this change can fix for (k = from; k >= to; --k) { kmer.push_front( read[k] ); if (!is_trusted( kmer )) break; } // try extension if (k < to && to == 0) { for (int m = 0; m < K - 1 - (from - to + 1); ++m) { for (int l = 0; l < 4; ++l) { KmerCode tmp_kmer( kmer ); tmp_kmer.push_front( l ); if (is_trusted( tmp_kmer )) { kmer.push_front( l ); --k; break; } } } } if (k < best_to) { best_count = 1; best_to = k; best_change = j; best_kmer = kmer; } else if (k == best_to) { ++best_count; if (k == from && j == read[from+1]) { best_count = 1; best_change = j; best_kmer = kmer; } else if (k == from && best_change == read[from+1]) best_count = 1; } } return best_kmer; } template <typename string_type> NVBIO_HOST_DEVICE bool check_left(KmerCode tmp_kmer, const string_type& read, const int pos, const int len) const { for (int t = 0; t < len; ++t) { tmp_kmer.push_front( read[pos - t] ); if (!is_trusted( tmp_kmer )) return false; } return true; } template <typename string_type> NVBIO_HOST_DEVICE bool adjust_left(const int read_len, const string_type& read, KmerCode kmer, const int pos) const { // check whether it is possible to extend by K/2 + 1 bases if (pos - 1 - K/2 < 0) return false; for (int c = 0; c < 4; ++c) { if (c == read[pos]) continue; KmerCode tmp_kmer = kmer; tmp_kmer.push_back( 0 ); // append an 'A' tmp_kmer.push_front( c ); if (is_trusted( tmp_kmer )) { // test whether this branch makes sense if (check_left( tmp_kmer, read, pos - 1, K/2 + 1 )) return true; } } return false; } template <typename string_type> NVBIO_HOST_DEVICE void fix_left( const int read_len, const string_type& read, const int2 longest_range, int* fix, const bool* solid, int& trimStart, int& badPrefix, bool& ambiguous) const { const int longest_count = longest_range.y - longest_range.x; KmerCode kmer( K ); // from now on, i is the "current" index in the read we are fixing int i; if (longest_range.x) { // force skip i = -1; } else { // set the starting point i = longest_range.x - 1; if (longest_count < K) { kmer.restart(); for (i = longest_range.x; i < longest_range.x + K - 1; ++i) kmer.push_back( read[i] ); kmer.push_back( 0 ); } else { // adjust the left side of the anchor for (int j = K / 2 - 1; j >= 0; --j) { const int pos = longest_range.x + j; kmer.restart(); for (i = pos; i < pos + K; ++i) kmer.push_back( read[i] ); if (adjust_left( read_len, read, kmer, pos )) { // adjust the anchor i = pos; kmer.push_back( 0 ); // append an 'A' break; } } } } for (; i >= 0;) { KmerCode fixed_kmer( kmer ); const int from = i - 1; const int to = nvbio::max( i - K + 1, 0 ); int best_to; int best_change; int best_count; // find the best left change const KmerCode tmp_kmer = best_left_change( read_len, read, from, to, kmer, best_to, best_change, best_count ); if (best_to == read_len + 1 || (best_count > 1 && (best_to >= to || i - to + 1 < K))) { badPrefix = i + 1; break; } if (best_count <= 1) fix[i] = best_change; else { fix[i] = -2; ambiguous = true; } if (best_to < 0) break; if (best_to >= to) { kmer = tmp_kmer; kmer.push_front( 0 ); // prepend an 'A' i = best_to; } else { // search for next error const int k = find_left_nonsolid( to - 1, 0, solid ); if (k < 0) break; kmer.restart(); for (i = k + 1; i < k + K; ++i) { if (fix[i] < 0) kmer.push_back( read[i] ); else kmer.push_back( fix[i] ); } i = k; kmer.push_back( 0 ); // append an 'A' } } } template <typename read_type, typename qual_type> NVBIO_HOST_DEVICE int correct(read_type read, qual_type qual, int& badPrefix, int& badSuffix) const { badPrefix = 0; badSuffix = 0; const int read_len = (int)length( read ); if (read_len < K) return 0; int fix[MAX_READ_LENGTH]; bool solid[MAX_READ_LENGTH]; int trimStart = -1; bool ambiguous = false; // // build the solid array // mark_solid_kmers( read_len, read, solid ); const int kmer_count = read_len - K + 1; // // mark trusted kmers // trimStart = read_len; for (int i = 0; i < read_len; ++i) fix[i] = -1; // find the longest trusted kmer const int2 longest_range = find_longest_solid_kmer( kmer_count, solid ); const int longest_count = longest_range.y - longest_range.x; if (longest_count == 0) return -1; // unreliable read! // check whether all kmers are reliable if (longest_count >= kmer_count) return 0; // fix the right end of the read fix_right( read_len, read, longest_range, fix, solid, trimStart, badSuffix, ambiguous ); // fix the left end of the read fix_left( read_len, read, longest_range, fix, solid, trimStart, badPrefix, ambiguous ); float correct_count = 0; const uint8 N = 4; for (int i = 0; i < read_len; ++i) { if (i >= K && (fix[i - K] >= 0 && read[i - K] < N)) { correct_count -= (qual[i - K] <= bad_quality) ? 0.5f : 1.0f; } if (fix[i] >= 0 && read[i] < N) { correct_count += (qual[i] <= bad_quality) ? 0.5f : 1.0f; } if (correct_count > max_correction) return -1; // unreliable correction } int corrections = 0; for (int i = badPrefix; i < trimStart; ++i) { if (fix[i] < 0) continue; if (read[i] != fix[i]) { // fix the base read[i] = fix[i]; // fix the quality score if (new_quality != uint8('\0')) qual[i] = new_quality; ++corrections; } } badSuffix = read_len - trimStart; if (corrections == 0 && badPrefix == 0 && badSuffix == 0 && ambiguous) return -1; return corrections; } /// functor operator /// ///\param s input string index /// NVBIO_HOST_DEVICE void operator() (const uint32 s) const { typedef typename string_set_type::string_type read_type; typedef typename qual_set_type::string_type qual_type; // fetch the i-th string read_type read = string_set[s]; qual_type qual = qual_set[s]; int badPrefix, badSuffix; int corrections = correct( read, qual, badPrefix, badSuffix ); if (corrections == 0) atomic_add( stats + ERROR_FREE, 1u ); else if (corrections > 0) atomic_add( stats + CORRECTIONS, corrections ); else atomic_add( stats + UNFIXABLE, 1u ); if (badSuffix > 0) { atomic_add( stats + TRIMMED_READS, 1u ); atomic_add( stats + TRIMMED_BASES, badSuffix ); } } const int K; const uint64 kmask; mutable string_set_type string_set; mutable qual_set_type qual_set; const trusted_filter_type trusted_kmers; uint64* stats; const float max_correction; const uint8 bad_quality; const uint8 new_quality; }; // process the next batch // bool ErrorCorrectStage::process(PipelineContext& context) { typedef nvbio::io::SequenceDataEdit<DNA_N,io::SequenceDataView>::sequence_string_set_type string_set_type; typedef nvbio::io::SequenceDataEdit<DNA_N,io::SequenceDataView>::qual_string_set_type qual_set_type; // declare the Bloom filter types typedef nvbio::blocked_bloom_filter<hash_functor1, hash_functor2, nvbio::cuda::ldg_pointer<uint4> > trusted_filter_type; // declare the error corrector functor typedef ErrorCorrectFunctor<string_set_type,qual_set_type,trusted_filter_type> functor_type; log_debug(stderr, " error correction... started\n" ); // fetch the input io::SequenceDataHost* h_read_data = context.input<io::SequenceDataHost>( 0 ); float time = 0.0f; // introduce a timing scope try { const nvbio::ScopedTimer<float> timer( &time ); if (device >= 0) { // set the device cudaSetDevice( device ); // copy it to the device nvbio::io::SequenceDataDevice d_read_data( *h_read_data ); nvbio::io::SequenceDataView d_read_view( d_read_data ); // build an editable view nvbio::io::SequenceDataEdit<DNA_N,nvbio::io::SequenceDataView> d_read_edit( d_read_view ); // build the Bloom filter trusted_filter_type trusted_filter( TRUSTED_KMERS_FILTER_K, trusted_filter_size, (const uint4*)trusted_filter_storage ); // build the kmer sampling functor const functor_type error_corrector( k, d_read_edit.sequence_string_set(), d_read_edit.qual_string_set(), trusted_filter, stats_vec, max_correction, uint8(bad_quality), uint8(new_quality) ); // and apply the functor to all reads in the batch device_for_each( d_read_view.size(), error_corrector ); cudaDeviceSynchronize(); nvbio::cuda::check_error("error-correct"); // fetch the output nvbio::io::SequenceDataHost* output = context.output<nvbio::io::SequenceDataHost>(); // copy the modified device data to the output *output = d_read_data; } else { omp_set_num_threads( -device ); // fetch the output nvbio::io::SequenceDataHost* output = context.output<nvbio::io::SequenceDataHost>(); // copy from the input *output = *h_read_data; nvbio::io::SequenceDataView h_read_view( *output ); // build an editable view nvbio::io::SequenceDataEdit<DNA_N,nvbio::io::SequenceDataView> h_read_edit( h_read_view ); // build the Bloom filter trusted_filter_type trusted_filter( TRUSTED_KMERS_FILTER_K, trusted_filter_size, (const uint4*)trusted_filter_storage ); // build the kmer sampling functor const functor_type error_corrector( k, h_read_edit.sequence_string_set(), h_read_edit.qual_string_set(), trusted_filter, stats_vec, max_correction, uint8(bad_quality), uint8(new_quality) ); // and apply the functor to all reads in the batch host_for_each( h_read_view.size(), error_corrector ); } } catch (nvbio::cuda_error e) { log_error(stderr, "[ErrorCorrectStage] caught a nvbio::cuda_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::bad_alloc e) { log_error(stderr, "[ErrorCorrectStage] caught a nvbio::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::logic_error e) { log_error(stderr, "[ErrorCorrectStage] caught a nvbio::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (nvbio::runtime_error e) { log_error(stderr, "[ErrorCorrectStage] caught a nvbio::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (thrust::system::system_error e) { log_error(stderr, "[ErrorCorrectStage] caught a thrust::system_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::bad_alloc e) { log_error(stderr, "[ErrorCorrectStage] caught a std::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::logic_error e) { log_error(stderr, "[ErrorCorrectStage] caught a std::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (std::runtime_error e) { log_error(stderr, "[ErrorCorrectStage] caught a std::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); exit(1); } catch (...) { log_error(stderr, "[ErrorCorrectStage] caught an unknown exception!\n"); exit(1); } // update the time stats stats->m_mutex.lock(); stats->m_time += time; log_info(stderr, "\r processed reads [%llu, %llu] (%.1fM / %.2fG bps, %.1fK reads/s, %.1fM bps/s - %s<%d>) ", stats->m_reads, stats->m_reads + h_read_data->size(), 1.0e-6f * (h_read_data->bps()), 1.0e-9f * (stats->m_bps + h_read_data->bps()), stats->m_time ? (1.0e-3f * (stats->m_reads + h_read_data->size())) / stats->m_time : 0.0f, stats->m_time ? (1.0e-6f * (stats->m_bps + h_read_data->bps() )) / stats->m_time : 0.0f, device >= 0 ? "gpu" : "cpu", device >= 0 ? device : -device ); log_debug_cont(stderr, "\n"); log_debug(stderr, " error correction... done\n" ); log_debug(stderr, " peak memory : %.1f GB\n", float( peak_resident_memory() ) / float(1024*1024*1024)); stats->m_reads += h_read_data->size(); stats->m_bps += h_read_data->bps(); stats->m_mutex.unlock(); return true; }
the_stack
// HistogramSpec.cu // 实现直方图规定化算法 #include "HistogramSpec.h" #include "Histogram.h" #include <iostream> using namespace std; #include "ErrorCode.h" // 宏:HISTOGRAM_LEVEL // 输入图像直方图的灰度级,默认为 256。 #ifndef HISTOGRAM_LEVEL #define HISTOGRAM_LEVEL 256 #endif // 宏:HISTSPEC_LEVEL // 规定化后输出图像的灰度级,默认为 256。 #ifndef HISTSPEC_LEVEL #define HISTSPEC_LEVEL 256 #endif // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // Kernel 函数: _calDiffMatrixKer(根据原始和目标累积直方图建立差值矩阵) // 根据输入图像的原始累积直方图,以及目标累积直方图计算差值矩阵 // diffmatrix,并且初始化映射矩阵 maptable,将其各元素其初始值置为 // HISTSPEC_LEVEL. static __global__ void // Kernel 函数无返回值。 _calDiffMatrixKer( float *diffmatrix, // 差值矩阵。 float *cumhist, // 原始累积直方图。 float *cumhistequ, // 目标累积直方图。 unsigned int *maptable // 映射矩阵。 ); // Kernel 函数: _findColumnMinKer(查找列最小值) // 根据差值矩阵 diffmatrix,查找每一列的最小值,并将每一列出现最小 // 值的行号保存在数组 colmin 中。 static __global__ void // Kernel 函数无返回值。 _findColumnMinKer( float *diffmatrix, // 差值矩阵。 unsigned int *colmin // 列最小值矩阵。 ); // Kernel 函数: _findRowMinKer(查找行最小值) // 根据差值矩阵 diffmatrix,查找每一行的最小值,并将每一行出现最小 // 值的行列号保存在数组 rowmin 中。 static __global__ void // Kernel 函数无返回值。 _findRowMinKer( float *diffmatrix, // 差值矩阵。 unsigned int *rowmin // 行最小值矩阵。 ); // Kernel 函数: _groupMappingLawKer(计算灰度级之间的映射矩阵) // 根据组映射规则,通过行、列最小值矩阵,计算原始图像和目标图像灰度级 // 之间的映射关系。 static __global__ void // Kernel 函数无返回值。 _groupMappingLawKer( unsigned int *rowmin, // 行最小值矩阵。 unsigned int *colmin, // 列最小值矩阵。 unsigned int *maptable // 灰度级之间的映射矩阵。 ); // Kernel 函数: _maptableJudgeKer(整理灰度级之间的映射矩阵) // 根据原始累积直方图 devcumhist,以及向后匹配原则,整理灰度级之间的 // 映射矩阵。 static __global__ void // Kernel 函数无返回值。 _maptableJudgeKer( unsigned int *maptable, // 灰度级之间的映射矩阵。 float *devcumhist // 原始累积直方图。 ); // Kernel 函数: _mapToOutimgKer(计算输出图像) // 对于每个像素点,查找原始灰度级,并根据灰度级映射矩阵 maptable,得 // 到变化后灰度级,从而极端得到输出图像。 static __global__ void // Kernel 函数无返回值。 _mapToOutimgKer( ImageCuda inimg, // 输入图像。 ImageCuda outimg, // 输出图像。 unsigned int *maptable // 灰度级之间的映射矩阵。 ); // Kernel 函数: _calDiffMatrixKer(根据原始和目标累积直方图建立差值矩阵) static __global__ void _calDiffMatrixKer( float *diffmatrix, float *cumhist, float *cumhistequ, unsigned int *maptable) { // 申请大小为目标直方图灰度级 HISTSPEC_LEVEL 的共享内存。 __shared__ float shared_cumhistequ[HISTSPEC_LEVEL]; // 获取当前线程的块号。 int blocktid = blockIdx.x; // 获取当前线程的线程号。 int threadtid = threadIdx.x; // 计算差值矩阵中对应的输出点的位置。 int index = blockIdx.x * blockDim.x + threadIdx.x; // 申请局部变量,用于临时存储当前线程计算得到的差值。 float temp = 0.0; // 初始化灰度级匹配矩阵。 maptable[blocktid] = HISTSPEC_LEVEL; // 将目标累积直方图对应的存储在共享内存中,方便同一块内的线程共享,从而 // 提高读取速度。 shared_cumhistequ[threadtid] = cumhistequ[threadtid]; // 计算差值。 temp = shared_cumhistequ[threadtid] - cumhist[blocktid]; // 进行块内同步。 __syncthreads(); // 将计算所得的差值写入差值矩阵相应位置上,若临时变量 temp 大于或者等 // 于 0,直接赋值;若 temp 小于 0,则取其相反数之后再赋值。 *(diffmatrix + index) = (temp >= 0.0 ? temp : -temp); } // Kernel 函数: _findColumnMinKer(查找列最小值) static __global__ void _findColumnMinKer( float *diffmatrix, unsigned int *colmin) { // 获取当前线程的块号,即对应差值矩阵中当前的列号。 int blocktid = blockIdx.x; // 获取当前线程的线程号,即对应差值矩阵中当前的行号。 int threadtid = threadIdx.x; // 计算当前线程在差值矩阵中的偏移。 int tid = threadIdx.x * gridDim.x + blockIdx.x; int k; float tempfloat; unsigned int tempunint; // 申请一个大小等于原始直方图灰度级的 float 型共享内存,用于存储每 // 一列中待比较的差值。 __shared__ float shared[HISTOGRAM_LEVEL]; // 申请一个大小等于原始直方图灰度级的 unsigned int 型共享内存,用于 // 存储待比较差值对应的行号。 __shared__ unsigned int index[HISTOGRAM_LEVEL]; // 将当前线程对应的差值矩阵中的差值以及其对应的索引(即行号)保存 // 在该块的共享内存中。 shared[threadtid] = *(diffmatrix + tid); index[threadtid] = threadtid; // 块内同步,为了保证一个块内的所有线程都已经完成了上述操作,即存 // 储该列的差值以及索引到共享内存中。 __syncthreads(); // 使用双调排序的思想,找到该列的最小值。 for (k = 1; k < HISTOGRAM_LEVEL; k = k << 1) { // 对待排序的元素进行分组,每次都将差值较小的元素交换到数组中 // 较前的位置,然后改变分组大小,进而在比较上一次得到的较小值 // 并做相应的交换,以此类推,最终数组中第 0 号元素存放的是该列 // 的最小值。 if (((threadtid % (k << 1)) == 0) && shared[threadtid] > shared[threadtid + k] ) { // 两个差值进行交换。 tempfloat = shared[threadtid]; shared[threadtid] = shared[threadtid + k]; shared[threadtid + k] = tempfloat; // 交换相对应的索引 index 值。 tempunint = index[threadtid]; index[threadtid] = index[threadtid + k]; index[threadtid + k] = tempunint; } // 块内同步 __syncthreads(); } // 将当前列最小值出现的行号保存在数组 colmin 中。 colmin[blocktid] = index[0]; } // Kernel 函数: _findRowMinKer(查找行最小值) static __global__ void _findRowMinKer( float *diffmatrix, unsigned int *rowmin) { // 获取当前线程的块号。 int blocktid = blockIdx.x; // 获取当前线程的线程号。 int threadtid = threadIdx.x; // 计算当前线程在差值矩阵中的偏移。 int tid = blockIdx.x * blockDim.x + threadIdx.x; int k; float tempfloat; unsigned int tempunint; // 申请一个大小等于原始直方图灰度级的 float 型共享内存,用于存储每 // 一行中待比较的差值。 __shared__ float shared[HISTSPEC_LEVEL]; // 申请一个大小等于原始直方图灰度级的 unsigned int 型共享内存,用于 // 存储待比较差值对应的列号。 __shared__ unsigned int index[HISTSPEC_LEVEL]; // 将当前线程对应的差值矩阵中的差值以及其对应的索引(即列号)保存 // 在该块的共享内存中。 shared[threadtid] = *(diffmatrix + tid); index[threadtid] = threadtid; // 块内同步,为了保证一个块内的所有线程都已经完成了上述操作,即存 // 储该行的差值以及索引到共享内存中。 __syncthreads(); // 使用双调排序的思想,找到该行的最小值。 for (k = 1; k < HISTSPEC_LEVEL; k = k << 1) { // 对待排序的元素进行分组,每次都将差值较小的元素交换到数组中 // 较前的位置,然后改变分组大小,进而在比较上一次得到的较小值 // 并做相应的交换,以此类推,最终数组中第 0 号元素存放的是该行 // 的最小值。 if (((threadtid % (k << 1)) == 0) && shared[threadtid] > shared[threadtid + k] ) { // 两个差值进行交换。 tempfloat = shared[threadtid]; shared[threadtid] = shared[threadtid + k]; shared[threadtid + k] = tempfloat; // 交换相对应的索引index值。 tempunint = index[threadtid]; index[threadtid] = index[threadtid + k]; index[threadtid + k] = tempunint; } // 块内同步。 __syncthreads(); } // 将当前行最小值出现的列号保存在数组 rowmin 中。 rowmin[blocktid] = index[0]; } // Kernel 函数: _groupMappingLawKer(计算灰度级之间的映射矩阵) static __global__ void _groupMappingLawKer( unsigned int *rowmin, unsigned int *colmin, unsigned int *maptable) { // 获取当前的线程号。 int tid = threadIdx.x; // 通过行列最小值的关系,计算 group mapping law(GML)映射关系。 // 可得到初始的不完整的映射表。 maptable[colmin[tid]] = rowmin[colmin[tid]]; } // Kernel 函数: _maptableJudgeKer(整理灰度级之间的映射矩阵) static __global__ void _maptableJudgeKer( unsigned int *maptable, float *devcumhist) { // 获取当前的线程号。 int tid = threadIdx.x; int temp, i = tid; // 通过向高灰度匹配的原则,整理灰度级映射关系表。 while (devcumhist[tid] >= 0) { // 暂存映射表中的值。 temp = maptable[i]; // 判断如果当前映射表中的值是无效值,则向后进行匹配,直到 // 符合灰度级要求。 if (temp == HISTSPEC_LEVEL) { i++; } else { // 更新灰度级映射表中映射关系。 maptable[tid] = temp; break; } } } // Kernel 函数: _mapToOutimgKer(计算输出图像) static __global__ void _mapToOutimgKer( ImageCuda inimg, ImageCuda outimg, unsigned int *maptable) { // 计算想成对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并 // 行度缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 // 4 行上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx = r * inimg.pitchBytes + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int outidx = r * outimg.pitchBytes + c; // 读取第一个输入坐标点对应的像素值。 unsigned char intemp; intemp = inimg.imgMeta.imgData[inidx]; // 一个线程处理四个像素点. // 通过灰度级匹配矩阵,得到输入图像当前点所对应的变换后的灰度值,并赋值 // 给输出图像的对应位置。 // 线程中处理的第一个点。 outimg.imgMeta.imgData[outidx] = maptable[intemp]; // 处理剩下的三个像素点。 for (int i =0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各点 // 之间没有变化,故不用检查。 if (++r >= outimg.imgMeta.height) return; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 inidx += inimg.pitchBytes; outidx += outimg.pitchBytes; intemp = inimg.imgMeta.imgData[inidx]; // 通过灰度级匹配矩阵,得到输入图像当前点所对应的变换后的灰度值,并赋值 // 给输出图像的对应位置。 outimg.imgMeta.imgData[outidx] = maptable[intemp]; } } // Host 成员方法:HistogramEquilibrium(直方图均衡化) __host__ int HistogramSpec::HistogramEquilibrium(Image *inimg, Image *outimg) { // 检查输入和输出图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为输 // 入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图 // 像的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一 if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // 调用类 Histogram 中的方法,计算输入图像的直方图。 Histogram hist; unsigned int histogram[HISTOGRAM_LEVEL] = {0}; hist.histogram(inimg, histogram, true); // 计算原始直方图的累积直方图。 float cumsum = 0; float cumhist[HISTOGRAM_LEVEL] = {0}; for (int i = 0; i < HISTOGRAM_LEVEL; i++) { cumsum += histogram[i]; cumhist[i] = (float)cumsum / (inimg->height * inimg->width); } // 计算均衡化后的累积直方图,均衡化之后每个灰度值的概率相等。 float cumhistequ[HISTSPEC_LEVEL]={0}; for (int j = 0; j < HISTSPEC_LEVEL; j++) { cumhistequ[j] = (float)(j+1) / HISTSPEC_LEVEL; } // 在 Device 上分配临时空间。一次申请所有空间,然后通过偏移索引各个数组。 // 包括原始累积直方图 devcumhist,均衡化后累积直方图 devcumhistequ,行最 // 小值矩阵 devrowmin,列最小值矩阵 colmin,映射矩阵 devmaptable,差值矩 // 阵 devdiffmatrix。 cudaError_t cudaerrcode; float *alldevicepointer; float *devcumhist, *devcumhistequ, *devdiffmatrix; unsigned int *devcolmin, *devrowmin, *devmaptable; cudaerrcode = cudaMalloc( (void **)&alldevicepointer, (3 * HISTOGRAM_LEVEL + 2 * HISTOGRAM_LEVEL + HISTOGRAM_LEVEL * HISTOGRAM_LEVEL) * sizeof (float)); if (cudaerrcode != cudaSuccess) { return cudaerrcode; } // 初始化所有 Device 上的内存空间。 cudaerrcode = cudaMemset( alldevicepointer, 0, (3 * HISTOGRAM_LEVEL + 2 * HISTOGRAM_LEVEL + HISTOGRAM_LEVEL * HISTOGRAM_LEVEL) * sizeof (float)); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicepointer); return cudaerrcode; } // 通过偏移读取 devcumhist 内存空间。 devcumhist = alldevicepointer; // 将 Host 端计算的累积直方图 cumhist 拷贝到 Device 端。 cudaerrcode = cudaMemcpy(devcumhist, cumhist, HISTOGRAM_LEVEL * sizeof (float), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicepointer); return cudaerrcode; } // 通过偏移读取 devcumhistequ 内存空间。 devcumhistequ = alldevicepointer + HISTOGRAM_LEVEL; // 将 Host 端计算的累积直方图 cumhistequ 拷贝到 Device 端。 cudaerrcode = cudaMemcpy(devcumhistequ, cumhistequ, HISTSPEC_LEVEL * sizeof (float), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicepointer); return cudaerrcode; } // 通过偏移读取差值矩阵 devdiffmatrix 内存空间。 devdiffmatrix = alldevicepointer + 3 * HISTOGRAM_LEVEL + 2 * HISTOGRAM_LEVEL; // 通过偏移读取映射矩阵 devmaptable 内存空间,并将转换指针类型。 devmaptable = (unsigned int *)(alldevicepointer + 2 * (HISTOGRAM_LEVEL + HISTSPEC_LEVEL)); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; gridsize.x = HISTOGRAM_LEVEL; gridsize.y = 1; blocksize.x = HISTSPEC_LEVEL; blocksize.y = 1; // 调用核函数,计算差值矩阵。 _calDiffMatrixKer<<<gridsize, blocksize>>>( devdiffmatrix, devcumhist, devcumhistequ, devmaptable); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 通过偏移读取映射矩阵 devrowmin 内存空间,并将转换指针类型。 devrowmin = (unsigned int *)(alldevicepointer + HISTOGRAM_LEVEL + HISTSPEC_LEVEL); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 gridsize.x = HISTOGRAM_LEVEL; gridsize.y = 1; blocksize.x = HISTSPEC_LEVEL; blocksize.y = 1; // 调用核函数,计算行最小值。 _findRowMinKer<<<gridsize, blocksize>>>( devdiffmatrix, (unsigned int *)devrowmin); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 通过偏移读取映射矩阵 devcolmin 内存空间,并将转换指针类型。 devcolmin = (unsigned int *)(alldevicepointer + 2 * HISTOGRAM_LEVEL + HISTSPEC_LEVEL); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 gridsize.x = HISTSPEC_LEVEL; gridsize.y = 1; blocksize.x = HISTOGRAM_LEVEL; blocksize.y = 1; // 调用核函数,计算列最小值。 _findColumnMinKer<<<gridsize, blocksize>>>( devdiffmatrix, (unsigned int *)devcolmin); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 计算灰度级之间映射关系。 _groupMappingLawKer<<<1, HISTSPEC_LEVEL>>>( (unsigned int *)devrowmin, (unsigned int *)devcolmin, devmaptable); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 整理映射关系 _maptableJudgeKer<<<1, HISTOGRAM_LEVEL>>>( (unsigned int *)devmaptable, devcumhist); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 通过映射矩阵,得到输出图像。 _mapToOutimgKer<<<gridsize, blocksize>>>( insubimgCud, outsubimgCud,(unsigned int *)devmaptable); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 释放 Device 上的临时空间 alldevicedata。 cudaFree(alldevicepointer); return NO_ERROR; } // Host 成员方法:HistogramSpecByImage(根据参考图像进行规定化) __host__ int HistogramSpec::HistogramSpecByImage(Image *inimg, Image *outimg) { // 检查输入图像,参考图像和输出图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outimg == NULL || this->refimg == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为输 // 入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图 // 像的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一 if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // 调用类 Histogram 中的方法,计算输入图像的直方图。 Histogram hist; unsigned int histogram[HISTOGRAM_LEVEL] = {0}; hist.histogram(inimg, histogram, true); // 调用类 Histogram 中的方法,计算参考图像的直方图。 unsigned int histspec[HISTSPEC_LEVEL] = {0}; hist.histogram(this->refimg, histspec, true); // 计算原始直方图的累积直方图。 unsigned int cumsum = 0; float cumhist[HISTOGRAM_LEVEL] = {0.0}; for (int i = 0; i < HISTOGRAM_LEVEL; i++) { cumsum += histogram[i]; cumhist[i] = (float)cumsum / (inimg->height * inimg->width); } // 计算参考图像的累积直方图,均衡化之后每个灰度值的概率相等。 float cumhistspec[HISTSPEC_LEVEL]={0.0}; cumsum = 0; for (int i = 0; i < HISTSPEC_LEVEL; i++) { cumsum += histspec[i]; cumhistspec[i] = (float)cumsum / (this->refimg->width * this->refimg->height); } // 在 Device 上分配临时空间。一次申请所有空间,然后通过偏移索引各个数组。 // 包括原始累积直方图 devcumhist,均衡化后累积直方图 devcumhistequ,行最 // 小值矩阵 devrowmin,列最小值矩阵 colmin,映射矩阵 devmaptable,差值矩 // 阵 devdiffmatrix。 cudaError_t cudaerrcode; float *alldevicepointer; float *devcumhist, *devcumhistspec, *devdiffmatrix; unsigned int *devcolmin, *devrowmin, *devmaptable; cudaerrcode = cudaMalloc( (void **)&alldevicepointer, (3 * HISTOGRAM_LEVEL + 2 * HISTOGRAM_LEVEL + HISTOGRAM_LEVEL * HISTOGRAM_LEVEL) * sizeof (float)); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicepointer); return cudaerrcode; } // 初始化所有 Device 上的内存空间。 cudaerrcode = cudaMemset( alldevicepointer, 0, (3 * HISTOGRAM_LEVEL + 2 * HISTOGRAM_LEVEL + HISTOGRAM_LEVEL * HISTOGRAM_LEVEL) * sizeof (float)); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicepointer); return cudaerrcode; } // 通过偏移读取 devcumhist 内存空间。 devcumhist = alldevicepointer; // 将 Host 端计算的累积直方图 cumhist 拷贝到 Device 端。 cudaerrcode = cudaMemcpy(devcumhist, cumhist, HISTOGRAM_LEVEL * sizeof (float), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicepointer); return cudaerrcode; } // 通过偏移读取 devcumhistequ 内存空间。 devcumhistspec = alldevicepointer + HISTOGRAM_LEVEL; // 将 Host 端计算的累积直方图 cumhistspec 拷贝到 Device 端。 cudaerrcode = cudaMemcpy(devcumhistspec, cumhistspec, HISTSPEC_LEVEL * sizeof (float), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicepointer); return cudaerrcode; } // 通过偏移读取差值矩阵 devdiffmatrix 内存空间。 devdiffmatrix = alldevicepointer + 3 * HISTOGRAM_LEVEL + 2 * HISTOGRAM_LEVEL; // 通过偏移读取映射矩阵 devmaptable 内存空间,并将转换指针类型。 devmaptable = (unsigned int *)(alldevicepointer + 2 * (HISTOGRAM_LEVEL + HISTSPEC_LEVEL)); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; gridsize.x = HISTOGRAM_LEVEL; gridsize.y = 1; blocksize.x = HISTSPEC_LEVEL; blocksize.y = 1; // 调用核函数,计算差值矩阵。 _calDiffMatrixKer<<<gridsize, blocksize>>>( devdiffmatrix, devcumhist, devcumhistspec, devmaptable); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 通过偏移读取映射矩阵 devrowmin 内存空间,并将转换指针类型。 devrowmin = (unsigned int *)(alldevicepointer + HISTOGRAM_LEVEL + HISTSPEC_LEVEL); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 gridsize.x = HISTOGRAM_LEVEL; gridsize.y = 1; blocksize.x = HISTSPEC_LEVEL; blocksize.y = 1; // 调用核函数,计算行最小值。 _findRowMinKer<<<gridsize, blocksize>>>( devdiffmatrix, (unsigned int *)devrowmin); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 通过偏移读取映射矩阵 devcolmin 内存空间,并将转换指针类型。 devcolmin = (unsigned int *)(alldevicepointer + 2 * HISTOGRAM_LEVEL + HISTSPEC_LEVEL); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 gridsize.x = HISTSPEC_LEVEL; gridsize.y = 1; blocksize.x = HISTOGRAM_LEVEL; blocksize.y = 1; // 调用核函数,计算列最小值。 _findColumnMinKer<<<gridsize, blocksize>>>( devdiffmatrix, (unsigned int *)devcolmin); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 计算灰度级之间映射关系。 _groupMappingLawKer<<<1, HISTSPEC_LEVEL>>>( (unsigned int *)devrowmin, (unsigned int *)devcolmin, devmaptable); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 整理映射关系 _maptableJudgeKer<<<1, HISTOGRAM_LEVEL>>>( (unsigned int *)devmaptable, devcumhist); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 通过映射矩阵,得到输出图像。 _mapToOutimgKer<<<gridsize, blocksize>>>( insubimgCud, outsubimgCud,(unsigned int *)devmaptable); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 释放 Device 上的临时空间 alldevicedata。 cudaFree(alldevicepointer); return NO_ERROR; } // Host 成员方法:HistogramSpecByHisto(根据参考直方图进行规定化) __host__ int HistogramSpec::HistogramSpecByHisto(Image *inimg, Image *outimg) { // 检查输入和输出图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outimg == NULL || refHisto == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为输 // 入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图 // 像的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一 if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // 调用类 Histogram 中的方法,计算输入图像的直方图。 Histogram hist; unsigned int histogram[HISTOGRAM_LEVEL] = {0}; hist.histogram(inimg, histogram, true); // 计算原始直方图的累积直方图。 unsigned int cumsum = 0; float cumhist[HISTOGRAM_LEVEL] = {0}; for (int i = 0; i < HISTOGRAM_LEVEL; i++) { cumsum += histogram[i]; cumhist[i] = (float)cumsum / (inimg->height * inimg->width); } // 计算参考直方图的累积直方图。 float cumhistspec[HISTSPEC_LEVEL] = {0}; cumhistspec[0] = this->refHisto[0]; for (int i = 1; i < HISTSPEC_LEVEL; i++) { cumhistspec[i] = cumhistspec[i - 1] + this->refHisto[i]; } // 在 Device 上分配临时空间。一次申请所有空间,然后通过偏移索引各个数组。 // 包括原始累积直方图 devcumhist,均衡化后累积直方图 devcumhistequ,行最 // 小值矩阵 devrowmin,列最小值矩阵 colmin,映射矩阵 devmaptable,差值矩 // 阵 devdiffmatrix。 cudaError_t cudaerrcode; float *alldevicepointer; float *devcumhist, *devcumhistspec, *devdiffmatrix; unsigned int *devcolmin, *devrowmin, *devmaptable; cudaerrcode = cudaMalloc( (void **)&alldevicepointer, (3 * HISTOGRAM_LEVEL + 2 * HISTOGRAM_LEVEL + HISTOGRAM_LEVEL * HISTOGRAM_LEVEL) * sizeof (float)); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicepointer); return cudaerrcode; } // 初始化所有 Device 上的内存空间。 cudaerrcode = cudaMemset( alldevicepointer, 0, (3 * HISTOGRAM_LEVEL + 2 * HISTOGRAM_LEVEL + HISTOGRAM_LEVEL * HISTOGRAM_LEVEL) * sizeof (float)); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicepointer); return cudaerrcode; } // 通过偏移读取 devcumhist 内存空间。 devcumhist = alldevicepointer; // 将 Host 端计算的累积直方图 cumhist 拷贝到 Device 端。 cudaerrcode = cudaMemcpy(devcumhist, cumhist, HISTOGRAM_LEVEL * sizeof (float), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicepointer); return cudaerrcode; } // 通过偏移读取 devcumhistequ 内存空间。 devcumhistspec = alldevicepointer + HISTOGRAM_LEVEL; // 将 Host 端计算的参考直方图拷贝到 Device 端。 cudaerrcode = cudaMemcpy(devcumhistspec, cumhistspec, HISTSPEC_LEVEL * sizeof (float), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) { cudaFree(alldevicepointer); return cudaerrcode; } // 通过偏移读取差值矩阵 devdiffmatrix 内存空间。 devdiffmatrix = alldevicepointer + 3 * HISTOGRAM_LEVEL + 2 * HISTOGRAM_LEVEL; // 通过偏移读取映射矩阵 devmaptable 内存空间,并将转换指针类型。 devmaptable = (unsigned int *)(alldevicepointer + 2 * (HISTOGRAM_LEVEL + HISTSPEC_LEVEL)); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; gridsize.x = HISTOGRAM_LEVEL; gridsize.y = 1; blocksize.x = HISTSPEC_LEVEL; blocksize.y = 1; // 调用核函数,计算差值矩阵。 _calDiffMatrixKer<<<gridsize, blocksize>>>( devdiffmatrix, devcumhist, devcumhistspec, devmaptable); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 通过偏移读取映射矩阵 devrowmin 内存空间,并将转换指针类型。 devrowmin = (unsigned int *)(alldevicepointer + HISTOGRAM_LEVEL + HISTSPEC_LEVEL); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 gridsize.x = HISTOGRAM_LEVEL; gridsize.y = 1; blocksize.x = HISTSPEC_LEVEL; blocksize.y = 1; // 调用核函数,计算行最小值。 _findRowMinKer<<<gridsize, blocksize>>>( devdiffmatrix, (unsigned int *)devrowmin); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 通过偏移读取映射矩阵 devcolmin 内存空间,并将转换指针类型。 devcolmin = (unsigned int *)(alldevicepointer + 2 * HISTOGRAM_LEVEL + HISTSPEC_LEVEL); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 gridsize.x = HISTSPEC_LEVEL; gridsize.y = 1; blocksize.x = HISTOGRAM_LEVEL; blocksize.y = 1; // 调用核函数,计算列最小值。 _findColumnMinKer<<<gridsize, blocksize>>>( devdiffmatrix, (unsigned int *)devcolmin); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 计算灰度级之间映射关系。 _groupMappingLawKer<<<1, HISTSPEC_LEVEL>>>( (unsigned int *)devrowmin, (unsigned int *)devcolmin, devmaptable); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 整理映射关系 _maptableJudgeKer<<<1, HISTOGRAM_LEVEL>>>( (unsigned int *)devmaptable, devcumhist); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 通过映射矩阵,得到输出图像。 _mapToOutimgKer<<<gridsize, blocksize>>>( insubimgCud, outsubimgCud,(unsigned int *)devmaptable); if (cudaGetLastError() != cudaSuccess) { cudaFree(alldevicepointer); return CUDA_ERROR; } // 释放 Device 上的临时空间 alldevicedata。 cudaFree(alldevicepointer); return NO_ERROR; }
the_stack
#define MOD_NAMESPACE #define MOD_NAMESPACE_BEGIN namespace bowtie2 { namespace driver { #define MOD_NAMESPACE_END }} #define MOD_NAMESPACE_NAME bowtie2::driver #include <nvBowtie/bowtie2/cuda/compute_thread.h> #include <nvBowtie/bowtie2/cuda/defs.h> #include <nvBowtie/bowtie2/cuda/fmindex_def.h> #include <nvBowtie/bowtie2/cuda/params.h> #include <nvBowtie/bowtie2/cuda/stats.h> #include <nvBowtie/bowtie2/cuda/persist.h> #include <nvBowtie/bowtie2/cuda/scoring.h> #include <nvBowtie/bowtie2/cuda/mapq.h> #include <nvBowtie/bowtie2/cuda/aligner.h> #include <nvBowtie/bowtie2/cuda/aligner_inst.h> #include <nvBowtie/bowtie2/cuda/input_thread.h> #include <nvbio/basic/cuda/arch.h> #include <nvbio/basic/timer.h> #include <nvbio/basic/console.h> #include <nvbio/basic/options.h> #include <nvbio/basic/threads.h> #include <nvbio/basic/atomics.h> #include <nvbio/basic/html.h> #include <nvbio/basic/version.h> #include <nvbio/fmindex/bwt.h> #include <nvbio/fmindex/ssa.h> #include <nvbio/fmindex/fmindex.h> #include <nvbio/fmindex/fmindex_device.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <thrust/sort.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <vector> #include <algorithm> #include <numeric> #include <functional> namespace nvbio { namespace bowtie2 { namespace cuda { ComputeThreadSE::ComputeThreadSE( const uint32 _thread_id, const uint32 _device_id, const io::SequenceData& _reference_data, const io::FMIndexData& _driver_data, const std::map<std::string,std::string>& _options, const Params& _params, Stats& _stats) : thread_id( _thread_id ), device_id( _device_id ), reference_data_host( _reference_data ), driver_data_host( _driver_data ), options( _options ), input_thread( NULL ), output_file( NULL ), params( _params ), stats( _stats ) { log_visible(stderr, "[%u] nvBowtie cuda driver created on device %u\n", thread_id, device_id); // initialize the selected device cudaSetDevice( device_id ); cudaSetDeviceFlags( cudaDeviceMapHost | cudaDeviceLmemResizeToMax ); aligner = SharedPointer<Aligner>( new Aligner() ); Timer timer; timer.start(); const bool need_reverse = (params.allow_sub == 0 && USE_REVERSE_INDEX) || (params.allow_sub == 1 && params.subseed_len == 0 && params.mode == BestMappingApprox); reference_data_device.reset( new io::SequenceDataDevice( reference_data_host ) ); driver_data_device.reset( new io::FMIndexDataDevice( driver_data_host, io::FMIndexDataDevice::FORWARD | (need_reverse ? io::FMIndexDataDevice::REVERSE : 0u) | io::FMIndexDataDevice::SA ) ); timer.stop(); log_stats(stderr, "[%u] allocated device driver data (%.2f GB - %.1fs)\n", thread_id, float(driver_data_device->allocated()) / 1.0e9f, timer.seconds() ); } // gauge the favourite batch size // uint32 ComputeThreadSE::gauge_batch_size() { // switch to the selected device cudaSetDevice( device_id ); uint32 BATCH_SIZE; for (BATCH_SIZE = params.max_batch_size*1024; BATCH_SIZE >= 16*1024; BATCH_SIZE /= 2) { std::pair<uint64,uint64> mem_stats; // gauge how much memory we'd need if (aligner->init_alloc( BATCH_SIZE, params, kSingleEnd, false, &mem_stats ) == true) { log_stats(stderr, "[%u] estimated allocation sizes: HOST %lu MB, DEVICE %lu MB)\n", thread_id, mem_stats.first / (1024*1024), mem_stats.second / (1024*1024) ); break; } } return BATCH_SIZE; } void ComputeThreadSE::do_run() { log_visible(stderr, "[%u] nvBowtie cuda driver... started\n", thread_id); // switch to the selected device cudaSetDevice( device_id ); // build an empty report FILE* html_output = (params.report != std::string("")) ? fopen( params.report.c_str(), "w" ) : NULL; if (html_output) { // encapsulate the document { html::html_object html( html_output ); { const char* meta_list = "<meta http-equiv=\"refresh\" content=\"1\" />"; { html::header_object hd( html_output, "Bowtie2 Report", html::style(), meta_list ); } { html::body_object body( html_output ); } } } fclose( html_output ); } Timer timer; io::SequenceDataDevice& reference_data = *reference_data_device.get(); io::FMIndexDataDevice& driver_data = *driver_data_device.get(); log_stats(stderr, "[%u] allocated device driver data (%.2f GB - %.1fs)\n", thread_id, float(driver_data.allocated()) / 1.0e9f, timer.seconds() ); typedef FMIndexDef::type fm_index_type; fm_index_type fmi = driver_data.index(); fm_index_type rfmi = driver_data.rindex(); size_t free, total; cudaMemGetInfo(&free, &total); log_stats(stderr, "[%u] device has %ld of %ld MB free\n", thread_id, free/1024/1024, total/1024/1024); const uint32 BATCH_SIZE = input_thread->batch_size(); log_stats(stderr, "[%u] processing reads in batches of %uK\n", thread_id, BATCH_SIZE/1024); // setup the output file aligner->output_file = output_file; // initialize the aligner if (aligner->init( thread_id, BATCH_SIZE, params, kSingleEnd ) == false) return; nvbio::cuda::check_error("cuda initializations"); cudaMemGetInfo(&free, &total); log_stats(stderr, "[%u] ready to start processing: device has %ld MB free\n", thread_id, free/1024/1024); Timer global_timer; global_timer.start(); UberScoringScheme& scoring_scheme = params.scoring_scheme; uint32 n_reads = 0; io::SequenceDataHost local_read_data_host; io::HostOutputBatchSE local_output_batch_host; // loop through the batches of reads while (1) { uint32 read_begin; Timer io_timer; io_timer.start(); io::SequenceDataHost* read_data_host = input_thread->next( &read_begin ); io_timer.stop(); stats.read_io.add( read_data_host ? read_data_host->size() : 0u, io_timer.seconds() ); if (read_data_host == NULL) { log_verbose(stderr, "[%u] end of input reached\n", thread_id); break; } if (read_data_host->max_sequence_len() > Aligner::MAX_READ_LEN) { log_error(stderr, "[%u] unsupported read length %u (maximum is %u)\n", thread_id, read_data_host->max_sequence_len(), Aligner::MAX_READ_LEN ); break; } // make a local copy of the host batch local_read_data_host = *read_data_host; // mark this set as ready to be reused input_thread->release( read_data_host ); Timer timer; timer.start(); //aligner.output_file->start_batch( &local_read_data_host ); local_output_batch_host.read_data = &local_read_data_host; io::SequenceDataDevice read_data( local_read_data_host ); cudaThreadSynchronize(); timer.stop(); stats.read_HtoD.add( read_data.size(), timer.seconds() ); const uint32 count = read_data.size(); log_info(stderr, "[%u] aligning reads [%u, %u]\n", thread_id, read_begin, read_begin + count - 1u); log_verbose(stderr, "[%u] %u reads\n", thread_id, count); log_verbose(stderr, "[%u] %.3f M bps (%.1f MB)\n", thread_id, float(read_data.bps())/1.0e6f, float(read_data.words()*sizeof(uint32)+read_data.bps()*sizeof(char))/float(1024*1024)); log_verbose(stderr, "[%u] %.1f bps/read (min: %u, max: %u)\n", thread_id, float(read_data.bps())/float(read_data.size()), read_data.min_sequence_len(), read_data.max_sequence_len()); if (params.mode == AllMapping) { if (params.scoring_mode == EditDistanceMode) { all_ed( *aligner, params, fmi, rfmi, scoring_scheme, reference_data, driver_data, read_data, local_output_batch_host, stats ); } else { all_sw( *aligner, params, fmi, rfmi, scoring_scheme, reference_data, driver_data, read_data, local_output_batch_host, stats ); } } else { if (params.scoring_mode == EditDistanceMode) { best_approx_ed( *aligner, params, fmi, rfmi, scoring_scheme, reference_data, driver_data, read_data, local_output_batch_host, stats ); } else { best_approx_sw( *aligner, params, fmi, rfmi, scoring_scheme, reference_data, driver_data, read_data, local_output_batch_host, stats ); } } global_timer.stop(); stats.global_time += global_timer.seconds(); global_timer.start(); //aligner->output_file->end_batch(); // increase the total reads counter n_reads += count; log_verbose(stderr, "[%u] %.1f K reads/s\n", thread_id, 1.0e-3f * float(n_reads) / stats.global_time); } global_timer.stop(); stats.global_time += global_timer.seconds(); stats.n_reads = n_reads; if (params.report.length()) nvbio::bowtie2::cuda::generate_device_report( thread_id, stats, stats.mate1, params.report.c_str() ); log_visible(stderr, "[%u] nvBowtie cuda driver... done\n", thread_id); log_stats(stderr, "[%u] total : %.2f sec (avg: %.1fK reads/s).\n", thread_id, stats.global_time, 1.0e-3f * float(n_reads)/stats.global_time); log_stats(stderr, "[%u] mapping : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.map.time, 1.0e-6f * stats.map.avg_speed(), 1.0e-6f * stats.map.max_speed, stats.map.device_time); log_stats(stderr, "[%u] selecting : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.select.time, 1.0e-6f * stats.select.avg_speed(), 1.0e-6f * stats.select.max_speed, stats.select.device_time); log_stats(stderr, "[%u] sorting : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.sort.time, 1.0e-6f * stats.sort.avg_speed(), 1.0e-6f * stats.sort.max_speed, stats.sort.device_time); log_stats(stderr, "[%u] scoring : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.score.time, 1.0e-6f * stats.score.avg_speed(), 1.0e-6f * stats.score.max_speed, stats.score.device_time); log_stats(stderr, "[%u] locating : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.locate.time, 1.0e-6f * stats.locate.avg_speed(), 1.0e-6f * stats.locate.max_speed, stats.locate.device_time); log_stats(stderr, "[%u] backtracking : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.backtrack.time, 1.0e-6f * stats.backtrack.avg_speed(), 1.0e-6f * stats.backtrack.max_speed, stats.backtrack.device_time); log_stats(stderr, "[%u] finalizing : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.finalize.time, 1.0e-6f * stats.finalize.avg_speed(), 1.0e-6f * stats.finalize.max_speed, stats.finalize.device_time); log_stats(stderr, "[%u] results DtoH : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.alignments_DtoH.time, 1.0e-6f * stats.alignments_DtoH.avg_speed(), 1.0e-6f * stats.alignments_DtoH.max_speed); log_stats(stderr, "[%u] results I/O : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.io.time, 1.0e-6f * stats.io.avg_speed(), 1.0e-6f * stats.io.max_speed); log_stats(stderr, "[%u] reads HtoD : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.read_HtoD.time, 1.0e-6f * stats.read_HtoD.avg_speed(), 1.0e-6f * stats.read_HtoD.max_speed); log_stats(stderr, "[%u] reads I/O : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.read_io.time, 1.0e-6f * stats.read_io.avg_speed(), 1.0e-6f * stats.read_io.max_speed); } void ComputeThreadSE::run() { try { do_run(); } catch (nvbio::cuda_error &e) { log_error(stderr, "caught a nvbio::cuda_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (nvbio::bad_alloc &e) { log_error(stderr, "caught a nvbio::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (nvbio::logic_error &e) { log_error(stderr, "caught a nvbio::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (nvbio::runtime_error &e) { log_error(stderr, "caught a nvbio::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (std::bad_alloc &e) { log_error(stderr, "caught a std::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (std::logic_error &e) { log_error(stderr, "caught a std::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (std::runtime_error &e) { log_error(stderr, "caught a std::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (...) { log_error(stderr, "caught an unknown exception!\n"); } } ComputeThreadPE::ComputeThreadPE( const uint32 _thread_id, const uint32 _device_id, const io::SequenceData& _reference_data, const io::FMIndexData& _driver_data, const std::map<std::string,std::string>& _options, const Params& _params, Stats& _stats) : thread_id( _thread_id ), device_id( _device_id ), reference_data_host( _reference_data ), driver_data_host( _driver_data ), options( _options ), input_thread( NULL ), output_file( NULL ), params( _params ), stats( _stats ) { log_visible(stderr, "[%u] nvBowtie cuda driver created on device %u\n", thread_id, device_id); // initialize the selected device cudaSetDevice( device_id ); cudaSetDeviceFlags( cudaDeviceMapHost | cudaDeviceLmemResizeToMax ); aligner = SharedPointer<Aligner>( new Aligner() ); Timer timer; timer.start(); const bool need_reverse = (params.allow_sub == 0 && USE_REVERSE_INDEX) || (params.allow_sub == 1 && params.subseed_len == 0 && params.mode == BestMappingApprox); reference_data_device.reset( new io::SequenceDataDevice( reference_data_host ) ); driver_data_device.reset( new io::FMIndexDataDevice( driver_data_host, io::FMIndexDataDevice::FORWARD | (need_reverse ? io::FMIndexDataDevice::REVERSE : 0u) | io::FMIndexDataDevice::SA ) ); timer.stop(); log_stats(stderr, "[%u] allocated device driver data (%.2f GB - %.1fs)\n", thread_id, float(driver_data_device->allocated()) / 1.0e9f, timer.seconds() ); } // gauge the favourite batch size // uint32 ComputeThreadPE::gauge_batch_size() { // switch to the selected device cudaSetDevice( device_id ); uint32 BATCH_SIZE; for (BATCH_SIZE = params.max_batch_size*1024; BATCH_SIZE >= 16*1024; BATCH_SIZE /= 2) { std::pair<uint64,uint64> mem_stats; // gauge how much memory we'd need if (aligner->init_alloc( BATCH_SIZE, params, kPairedEnds, false, &mem_stats ) == true) { log_stats(stderr, "[%u] estimated allocation sizes: HOST %lu MB, DEVICE %lu MB)\n", thread_id, mem_stats.first / (1024*1024), mem_stats.second / (1024*1024) ); break; } } return BATCH_SIZE; } void ComputeThreadPE::do_run() { log_visible(stderr, "[%u] nvBowtie cuda driver... started\n", thread_id); // switch to the selected device cudaSetDevice( device_id ); // build an empty report FILE* html_output = (params.report != std::string("")) ? fopen( params.report.c_str(), "w" ) : NULL; if (html_output) { // encapsulate the document { html::html_object html( html_output ); { const char* meta_list = "<meta http-equiv=\"refresh\" content=\"1\" />"; { html::header_object hd( html_output, "Bowtie2 Report", html::style(), meta_list ); } { html::body_object body( html_output ); } } } fclose( html_output ); } Timer timer; io::SequenceDataDevice& reference_data = *reference_data_device.get(); io::FMIndexDataDevice& driver_data = *driver_data_device.get(); typedef FMIndexDef::type fm_index_type; fm_index_type fmi = driver_data.index(); fm_index_type rfmi = driver_data.rindex(); size_t free, total; cudaMemGetInfo(&free, &total); log_stats(stderr, "[%u] device has %ld of %ld MB free\n", thread_id, free/1024/1024, total/1024/1024); const uint32 BATCH_SIZE = input_thread->batch_size(); log_stats(stderr, "[%u] processing reads in batches of %uK\n", thread_id, BATCH_SIZE/1024); // setup the output file aligner->output_file = output_file; // initialize the aligner if (aligner->init( thread_id, BATCH_SIZE, params, kPairedEnds ) == false) return; nvbio::cuda::check_error("cuda initializations"); cudaMemGetInfo(&free, &total); log_stats(stderr, "[%u] ready to start processing: device has %ld MB free\n", thread_id, free/1024/1024); size_t stack_size_limit; cudaDeviceGetLimit( &stack_size_limit, cudaLimitStackSize ); log_debug(stderr, "[%u] max cuda stack size: %u\n", thread_id, stack_size_limit); Timer global_timer; global_timer.start(); UberScoringScheme& scoring_scheme = params.scoring_scheme; uint32 n_reads = 0; io::SequenceDataHost local_read_data_host1; io::SequenceDataHost local_read_data_host2; io::HostOutputBatchPE local_output_batch_host; // loop through the batches of reads while (1) { uint32 read_begin; Timer io_timer; io_timer.start(); std::pair<io::SequenceDataHost*,io::SequenceDataHost*> read_data_host_pair = input_thread->next( &read_begin ); io::SequenceDataHost* read_data_host1 = read_data_host_pair.first; io::SequenceDataHost* read_data_host2 = read_data_host_pair.second; io_timer.stop(); stats.read_io.add( read_data_host1 ? read_data_host1->size() : 0u, io_timer.seconds() ); if (read_data_host1 == NULL || read_data_host2 == NULL) { log_verbose(stderr, "[%u] end of input reached\n", thread_id); break; } if ((read_data_host1->max_sequence_len() > Aligner::MAX_READ_LEN) || (read_data_host2->max_sequence_len() > Aligner::MAX_READ_LEN)) { log_error(stderr, "[%u] unsupported read length %u (maximum is %u)\n", thread_id, nvbio::max(read_data_host1->max_sequence_len(), read_data_host2->max_sequence_len()), Aligner::MAX_READ_LEN ); break; } // make a local copy of the host batch local_read_data_host1 = *read_data_host1; local_read_data_host2 = *read_data_host2; // mark this set as ready to be reused input_thread->release( read_data_host_pair ); Timer timer; timer.start(); //aligner.output_file->start_batch( &local_read_data_host1, &local_read_data_host2 ); local_output_batch_host.read_data[0] = &local_read_data_host1; local_output_batch_host.read_data[1] = &local_read_data_host2; io::SequenceDataDevice read_data1( local_read_data_host1/*, io::ReadDataDevice::READS | io::ReadDataDevice::QUALS*/ ); io::SequenceDataDevice read_data2( local_read_data_host2/*, io::ReadDataDevice::READS | io::ReadDataDevice::QUALS*/ ); timer.stop(); stats.read_HtoD.add( read_data1.size(), timer.seconds() ); const uint32 count = read_data1.size(); log_info(stderr, "[%u] aligning reads [%u, %u]\n", thread_id, read_begin, read_begin + count - 1u); log_verbose(stderr, "[%u] %u reads\n", thread_id, count); log_verbose(stderr, "[%u] %.3f M bps (%.1f MB)\n", thread_id, float(read_data1.bps() + read_data2.bps())/1.0e6f, float(read_data1.words()*sizeof(uint32)+read_data1.bps()*sizeof(char))/float(1024*1024)+ float(read_data2.words()*sizeof(uint32)+read_data2.bps()*sizeof(char))/float(1024*1024)); log_verbose(stderr, "[%u] %.1f bps/read (min: %u, max: %u)\n", thread_id, float(read_data1.bps()+read_data2.bps())/float(read_data1.size()+read_data2.size()), nvbio::min( read_data1.min_sequence_len(), read_data2.min_sequence_len() ), nvbio::max( read_data1.max_sequence_len(), read_data2.max_sequence_len() )); if (params.mode == AllMapping) { log_error(stderr, "[%u] paired-end all-mapping is not yet supported!\n", thread_id); exit(1); } else { if (params.scoring_mode == EditDistanceMode) { best_approx_ed( *aligner, params, fmi, rfmi, scoring_scheme, reference_data, driver_data, read_data1, read_data2, local_output_batch_host, stats ); } else { best_approx_sw( *aligner, params, fmi, rfmi, scoring_scheme, reference_data, driver_data, read_data1, read_data2, local_output_batch_host, stats ); } } global_timer.stop(); stats.global_time += global_timer.seconds(); global_timer.start(); //aligner.output_file->end_batch(); // increase the total reads counter n_reads += count; log_verbose(stderr, "[%u] %.1f K reads/s\n", thread_id, 1.0e-3f * float(n_reads) / stats.global_time); } global_timer.stop(); stats.global_time += global_timer.seconds(); if (params.report.length()) nvbio::bowtie2::cuda::generate_device_report( thread_id, stats, stats.concordant, params.report.c_str() ); log_visible(stderr, "[%u] nvBowtie cuda driver... done\n", thread_id); log_stats(stderr, "[%u] total : %.2f sec (avg: %.1fK reads/s).\n", thread_id, stats.global_time, 1.0e-3f * float(n_reads)/stats.global_time); log_stats(stderr, "[%u] mapping : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.map.time, 1.0e-6f * stats.map.avg_speed(), 1.0e-6f * stats.map.max_speed, stats.map.device_time); log_stats(stderr, "[%u] scoring : %.2f sec (avg: %.1fM reads/s, max: %.3fM reads/s, %.2f device sec).).\n", thread_id, stats.scoring_pipe.time, 1.0e-6f * stats.scoring_pipe.avg_speed(), 1.0e-6f * stats.scoring_pipe.max_speed, stats.scoring_pipe.device_time); log_stats(stderr, "[%u] selecting : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.select.time, 1.0e-6f * stats.select.avg_speed(), 1.0e-6f * stats.select.max_speed, stats.select.device_time); log_stats(stderr, "[%u] sorting : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.sort.time, 1.0e-6f * stats.sort.avg_speed(), 1.0e-6f * stats.sort.max_speed, stats.sort.device_time); log_stats(stderr, "[%u] scoring(a) : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.score.time, 1.0e-6f * stats.score.avg_speed(), 1.0e-6f * stats.score.max_speed, stats.score.device_time); log_stats(stderr, "[%u] scoring(o) : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.opposite_score.time, 1.0e-6f * stats.opposite_score.avg_speed(), 1.0e-6f * stats.opposite_score.max_speed, stats.opposite_score.device_time); log_stats(stderr, "[%u] locating : %.2f sec (avg: %.3fM seeds/s, max: %.3fM seeds/s, %.2f device sec).\n", thread_id, stats.locate.time, 1.0e-6f * stats.locate.avg_speed(), 1.0e-6f * stats.locate.max_speed, stats.locate.device_time); log_stats(stderr, "[%u] backtracing(a) : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.backtrack.time, 1.0e-6f * stats.backtrack.avg_speed(), 1.0e-6f * stats.backtrack.max_speed, stats.backtrack.device_time); log_stats(stderr, "[%u] backtracing(o) : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.backtrack_opposite.time, 1.0e-6f * stats.backtrack_opposite.avg_speed(), 1.0e-6f * stats.backtrack_opposite.max_speed, stats.backtrack_opposite.device_time); log_stats(stderr, "[%u] finalizing : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s, %.2f device sec).\n", thread_id, stats.finalize.time, 1.0e-6f * stats.finalize.avg_speed(), 1.0e-6f * stats.finalize.max_speed, stats.finalize.device_time); log_stats(stderr, "[%u] results DtoH : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.alignments_DtoH.time, 1.0e-6f * stats.alignments_DtoH.avg_speed(), 1.0e-6f * stats.alignments_DtoH.max_speed); log_stats(stderr, "[%u] results I/O : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.io.time, 1.0e-6f * stats.io.avg_speed(), 1.0e-6f * stats.io.max_speed); log_stats(stderr, "[%u] reads HtoD : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.read_HtoD.time, 1.0e-6f * stats.read_HtoD.avg_speed(), 1.0e-6f * stats.read_HtoD.max_speed); log_stats(stderr, "[%u] reads I/O : %.2f sec (avg: %.3fM reads/s, max: %.3fM reads/s).\n", thread_id, stats.read_io.time, 1.0e-6f * stats.read_io.avg_speed(), 1.0e-6f * stats.read_io.max_speed); } void ComputeThreadPE::run() { try { do_run(); } catch (nvbio::cuda_error &e) { log_error(stderr, "caught a nvbio::cuda_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (nvbio::bad_alloc &e) { log_error(stderr, "caught a nvbio::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (nvbio::logic_error &e) { log_error(stderr, "caught a nvbio::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (nvbio::runtime_error &e) { log_error(stderr, "caught a nvbio::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (std::bad_alloc &e) { log_error(stderr, "caught a std::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (std::logic_error &e) { log_error(stderr, "caught a std::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (std::runtime_error &e) { log_error(stderr, "caught a std::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (...) { log_error(stderr, "caught an unknown exception!\n"); } } } // namespace cuda } // namespace bowtie2 } // namespace nvbio
the_stack
extern "C" { #include "ccv.h" } #include <ctype.h> #define CASE_TESTS // so that we don't include public available methods #include "../lib/cuda/cwc_convnet.cu" #include "../lib/ccv_convnet.c" extern "C" void cwc_bench_runtime(ccv_convnet_t* convnet, ccv_array_t* categorizeds, ccv_convnet_train_param_t params) { int batch = params.mini_batch; int i; const int device_id = 0; _cwc_convnet_alloc_reserved_both(convnet, batch, 0, params.layer_params); cwc_convnet_context_t* context = GPU(convnet)->contexts; for (i = 0; i < convnet->rows * convnet->cols * convnet->channels; i++) convnet->mean_activity->data.f32[i] = 128; cwc_convnet_batch_formation(0, categorizeds, convnet->mean_activity, 0, 0, 0, 0, 0, ccv_size(225, 225), 225, 225, convnet->rows, convnet->cols, convnet->channels, 1000, 0, batch, 0, batch, context->host[device_id].input, context->host[device_id].c); cudaMemcpy(context->device[device_id].input, context->host[device_id].input, sizeof(float) * convnet->rows * convnet->cols * convnet->channels * batch, cudaMemcpyHostToDevice); cudaEvent_t overallStart; cudaEvent_t overallStop; cudaEventCreate(&overallStart); cudaEventCreate(&overallStop); cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); float elapsed_time; EXTRA(GPU(convnet)->device[device_id].layers + 0)->vary.convolutional.forward.x = 4; EXTRA(GPU(convnet)->device[device_id].layers + 0)->vary.convolutional.forward.y = 8; EXTRA(GPU(convnet)->device[device_id].layers + 0)->vary.convolutional.forward.z = 32; EXTRA(GPU(convnet)->device[device_id].layers + 3)->vary.convolutional.forward.x = 4; EXTRA(GPU(convnet)->device[device_id].layers + 3)->vary.convolutional.forward.y = 8; EXTRA(GPU(convnet)->device[device_id].layers + 3)->vary.convolutional.forward.z = 32; EXTRA(GPU(convnet)->device[device_id].layers + 6)->vary.convolutional.forward.x = 4; EXTRA(GPU(convnet)->device[device_id].layers + 6)->vary.convolutional.forward.y = 8; EXTRA(GPU(convnet)->device[device_id].layers + 6)->vary.convolutional.forward.z = 32; EXTRA(GPU(convnet)->device[device_id].layers + 7)->vary.convolutional.forward.x = 4; EXTRA(GPU(convnet)->device[device_id].layers + 7)->vary.convolutional.forward.y = 8; EXTRA(GPU(convnet)->device[device_id].layers + 7)->vary.convolutional.forward.z = 32; EXTRA(GPU(convnet)->device[device_id].layers + 8)->vary.convolutional.forward.x = 4; EXTRA(GPU(convnet)->device[device_id].layers + 8)->vary.convolutional.forward.y = 8; EXTRA(GPU(convnet)->device[device_id].layers + 8)->vary.convolutional.forward.z = 32; cudaEventRecord(overallStart, context->device[device_id].data_stream); for (i = 0; i < convnet->count; i++) { ccv_convnet_layer_t* layer = GPU(convnet)->device[device_id].layers + i; cudaEventRecord(start, context->device[device_id].data_stream); _cwc_convnet_layer_forward_propagate(layer, device_id, i, layer->input.matrix.rows, layer->input.matrix.cols, batch, 0, i == 0 ? context->device[device_id].input : GPU(convnet)->device[device_id].forwards[i - 1], GPU(convnet)->device[device_id].forwards[i], GPU(convnet)->device[device_id].denoms[i], GPU(convnet)->device[device_id].unit, context); cudaEventRecord(stop, context->device[device_id].data_stream); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); if (layer->type == CCV_CONVNET_CONVOLUTIONAL) printf("%d %d %d, elapsed time for layer %d fprop: %f milliseconds\n", EXTRA(layer)->vary.convolutional.forward.x, EXTRA(layer)->vary.convolutional.forward.y, EXTRA(layer)->vary.convolutional.forward.z, i + 1, elapsed_time); else printf("elapsed time for layer %d fprop: %f milliseconds\n", i + 1, elapsed_time); } cudaEventRecord(overallStop, context->device[device_id].data_stream); cudaEventSynchronize(overallStop); cudaEventElapsedTime(&elapsed_time, overallStart, overallStop); printf("forward pass %f milliseconds\n", elapsed_time); EXTRA(GPU(convnet)->device[device_id].layers + 0)->vary.convolutional.backward.coefficient.x = 1; EXTRA(GPU(convnet)->device[device_id].layers + 0)->vary.convolutional.backward.coefficient.y = 3; EXTRA(GPU(convnet)->device[device_id].layers + 0)->vary.convolutional.backward.coefficient.z = 1; EXTRA(GPU(convnet)->device[device_id].layers + 3)->vary.convolutional.backward.coefficient.x = 4; EXTRA(GPU(convnet)->device[device_id].layers + 3)->vary.convolutional.backward.coefficient.y = 4; EXTRA(GPU(convnet)->device[device_id].layers + 3)->vary.convolutional.backward.coefficient.z = 16; EXTRA(GPU(convnet)->device[device_id].layers + 3)->vary.convolutional.backward.gradient.x = 4; EXTRA(GPU(convnet)->device[device_id].layers + 3)->vary.convolutional.backward.gradient.y = 6; EXTRA(GPU(convnet)->device[device_id].layers + 3)->vary.convolutional.backward.gradient.z = 24; EXTRA(GPU(convnet)->device[device_id].layers + 6)->vary.convolutional.backward.coefficient.x = 8; EXTRA(GPU(convnet)->device[device_id].layers + 6)->vary.convolutional.backward.coefficient.y = 3; EXTRA(GPU(convnet)->device[device_id].layers + 6)->vary.convolutional.backward.coefficient.z = 32; EXTRA(GPU(convnet)->device[device_id].layers + 6)->vary.convolutional.backward.gradient.x = 4; EXTRA(GPU(convnet)->device[device_id].layers + 6)->vary.convolutional.backward.gradient.y = 8; EXTRA(GPU(convnet)->device[device_id].layers + 6)->vary.convolutional.backward.gradient.z = 32; EXTRA(GPU(convnet)->device[device_id].layers + 7)->vary.convolutional.backward.coefficient.x = 8; EXTRA(GPU(convnet)->device[device_id].layers + 7)->vary.convolutional.backward.coefficient.y = 3; EXTRA(GPU(convnet)->device[device_id].layers + 7)->vary.convolutional.backward.coefficient.z = 32; EXTRA(GPU(convnet)->device[device_id].layers + 7)->vary.convolutional.backward.gradient.x = 4; EXTRA(GPU(convnet)->device[device_id].layers + 7)->vary.convolutional.backward.gradient.y = 8; EXTRA(GPU(convnet)->device[device_id].layers + 7)->vary.convolutional.backward.gradient.z = 32; EXTRA(GPU(convnet)->device[device_id].layers + 8)->vary.convolutional.backward.coefficient.x = 8; EXTRA(GPU(convnet)->device[device_id].layers + 8)->vary.convolutional.backward.coefficient.y = 4; EXTRA(GPU(convnet)->device[device_id].layers + 8)->vary.convolutional.backward.coefficient.z = 32; EXTRA(GPU(convnet)->device[device_id].layers + 8)->vary.convolutional.backward.gradient.x = 4; EXTRA(GPU(convnet)->device[device_id].layers + 8)->vary.convolutional.backward.gradient.y = 8; EXTRA(GPU(convnet)->device[device_id].layers + 8)->vary.convolutional.backward.gradient.z = 32; float* a = 0; cudaMalloc(&a, sizeof(float) * 1000 * batch); cudaMemcpy(a, GPU(convnet)->device[device_id].forwards[convnet->count - 1], sizeof(float) * 1000 * batch, cudaMemcpyDeviceToDevice); cudaEventRecord(overallStart, context->device[device_id].data_stream); for (i = convnet->count - 1; i >= 0; i--) { ccv_convnet_layer_t* layer = GPU(convnet)->device[device_id].layers + i; ccv_convnet_layer_t* configuration = GPU(convnet)->device[device_id].configurations + i; cudaEventRecord(start, context->device[device_id].data_stream); switch (layer->type) { case CCV_CONVNET_CONVOLUTIONAL: if (context->device[device_id].dor[i]) { int out_rows, out_cols, out_partition; ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition); _cwc_kern_mute_neuron <<<out_rows * out_cols * layer->net.convolutional.count, batch, 0, context->device[device_id].data_stream>>> (i == convnet->count - 1 ? a : GPU(convnet)->device[device_id].backwards[i + 1], context->device[device_id].dor[i]); } cwc_convnet_convolutional_backward_propagate(layer, batch, i == convnet->count - 1 ? a : GPU(convnet)->device[device_id].backwards[i + 1], GPU(convnet)->device[device_id].forwards[i], i > 0 ? GPU(convnet)->device[device_id].forwards[i - 1] : context->device[device_id].input, GPU(convnet)->device[device_id].backwards[i], configuration, GPU(convnet)->device[device_id].scratch, GPU(convnet)->device[device_id].unit, context->device[device_id].data_stream, context->device[device_id].data_cublas); assert(cudaGetLastError() == cudaSuccess); break; case CCV_CONVNET_FULL_CONNECT: if (context->device[device_id].dor[i]) _cwc_kern_mute_neuron <<<layer->net.full_connect.count, batch, 0, context->device[device_id].data_stream>>> (i == convnet->count - 1 ? a : GPU(convnet)->device[device_id].backwards[i + 1], context->device[device_id].dor[i]); cwc_convnet_full_connect_backward_propagate(layer, batch, i == convnet->count - 1 ? a : GPU(convnet)->device[device_id].backwards[i + 1], GPU(convnet)->device[device_id].forwards[i], i > 0 ? GPU(convnet)->device[device_id].forwards[i - 1] : context->device[device_id].input, GPU(convnet)->device[device_id].backwards[i], GPU(convnet)->device[device_id].unit, configuration->w, configuration->bias, context->device[device_id].data_stream, context->device[device_id].data_cublas); assert(cudaGetLastError() == cudaSuccess); break; case CCV_CONVNET_LOCAL_RESPONSE_NORM: cwc_convnet_rnorm_backward_propagate(layer, batch, i == convnet->count - 1 ? a : GPU(convnet)->device[device_id].backwards[i + 1], GPU(convnet)->device[device_id].forwards[i], i > 0 ? GPU(convnet)->device[device_id].forwards[i - 1] : context->device[device_id].input, GPU(convnet)->device[device_id].denoms[i], GPU(convnet)->device[device_id].backwards[i], context->device[device_id].data_stream); assert(cudaGetLastError() == cudaSuccess); break; case CCV_CONVNET_MAX_POOL: cwc_convnet_max_pool_backward_propagate(layer, batch, i == convnet->count - 1 ? a : GPU(convnet)->device[device_id].backwards[i + 1], GPU(convnet)->device[device_id].forwards[i], i > 0 ? GPU(convnet)->device[device_id].forwards[i - 1] : context->device[device_id].input, GPU(convnet)->device[device_id].backwards[i], context->device[device_id].data_stream); assert(cudaGetLastError() == cudaSuccess); break; case CCV_CONVNET_AVERAGE_POOL: cwc_convnet_average_pool_backward_propagate(layer, batch, i == convnet->count - 1 ? a : GPU(convnet)->device[device_id].backwards[i + 1], GPU(convnet)->device[device_id].backwards[i], context->device[device_id].data_stream); assert(cudaGetLastError() == cudaSuccess); break; } cudaEventRecord(stop, context->device[device_id].data_stream); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); if (layer->type == CCV_CONVNET_CONVOLUTIONAL) printf("%d %d %d, %d %d %d, elapsed time for layer %d bprop: %f milliseconds\n", EXTRA(layer)->vary.convolutional.backward.coefficient.x, EXTRA(layer)->vary.convolutional.backward.coefficient.y, EXTRA(layer)->vary.convolutional.backward.coefficient.z, EXTRA(layer)->vary.convolutional.backward.gradient.x, EXTRA(layer)->vary.convolutional.backward.gradient.y, EXTRA(layer)->vary.convolutional.backward.gradient.z, i + 1, elapsed_time); else printf("elapsed time for layer %d bprop: %f milliseconds\n", i + 1, elapsed_time); } cudaEventRecord(overallStop, context->device[device_id].data_stream); cudaEventSynchronize(overallStop); cudaEventElapsedTime(&elapsed_time, overallStart, overallStop); printf("backward pass %f milliseconds\n", elapsed_time); cudaEventDestroy(start); cudaEventDestroy(stop); cudaEventDestroy(overallStart); cudaEventDestroy(overallStop); cudaFree(a); }
the_stack
#include <array/NDArrayFactory.h> #include <exceptions/cuda_exception.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <ops/declarable/helpers/segment.h> #include <ops/declarable/helpers/segment_common.h> namespace sd { namespace ops { namespace helpers { // -------------------------------------------------------------------------------------------------------------- // // Segment Prod ops linear kernels // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentProdLinearKernel(void* input, sd::LongType const* inputShape, int* starts, int* lengths, sd::LongType numOfClasses, void* output, sd::LongType const* outputShape) { __shared__ sd::LongType xLen, zLen; __shared__ T* x; __shared__ T* z; if (threadIdx.x == 0) { x = reinterpret_cast<T*>(input); z = reinterpret_cast<T*>(output); xLen = shape::length(inputShape); zLen = shape::length(outputShape); } __syncthreads(); for (auto segment = blockIdx.x; segment < numOfClasses; segment += gridDim.x) { auto zIndex = shape::getIndexOffset(segment, outputShape); auto start = starts[segment]; auto finish = start + lengths[segment]; if (lengths[segment] == 0) { continue; } for (auto e = start + threadIdx.x; e < finish; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); sd::math::atomics::sd_atomicMul(&z[segment], x[xIndex]); } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void unsortedSegmentProdLinearKernel(T* input, sd::LongType const* inputShape, I* indices, sd::LongType const* indicesShape, int* starts, int* lengths, sd::LongType numOfClasses, T* output, sd::LongType const* outputShape) { __shared__ sd::LongType xLen, zLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); zLen = shape::length(outputShape); } __syncthreads(); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (auto idx = start; idx < xLen; idx += step) { auto xIndex = shape::getIndexOffset(idx, inputShape); auto yIndex = shape::getIndexOffset(idx, indicesShape); auto segment = indices[yIndex]; auto zIndex = shape::getIndexOffset(segment, outputShape); if (lengths[segment] == 0) { continue; } sd::math::atomics::sd_atomicMul(&output[zIndex], input[xIndex]); } } // -------------------------------------------------------------------------------------------------------------- // // SegmentProd kernel template <typename T, typename I> static SD_KERNEL void segmentProdTadKernel(void* inputBuf, sd::LongType const* inputShape, sd::LongType const* inputTads, sd::LongType const* inputTadOffsets, I* indices, int* starts, int* lengths, sd::LongType numOfClasses, void* outputBuf, sd::LongType const* outputShape, sd::LongType const* outputTads, sd::LongType const* outputTadOffsets) { __shared__ sd::LongType len, total; if (threadIdx.x == 0) { total = shape::sizeAt(inputShape, 0); len = shape::length(inputTads); } __syncthreads(); for (auto idx = blockIdx.x; idx < total; idx += gridDim.x) { auto x = reinterpret_cast<T*>(inputBuf) + inputTadOffsets[idx]; auto segment = indices[idx]; // / threadsPerSegment; auto z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment]; auto start = starts[segment]; auto finish = start + lengths[segment]; if (lengths[segment] == 0) continue; for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); sd::math::atomics::sd_atomicMul(&z[zIndex], x[xIndex]); } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void segmentProdFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) { auto stream = context->getCudaStream(); sd::LongType numClasses = indices->e<sd::LongType>(indices->lengthOf() - 1) + 1; NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context); NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context); output->assign(1); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32); fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); if (input->isVector()) { segmentProdLinearKernel<T, I><<<128, 256, 128, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto inputTads = packX.specialShapeInfo(); auto inputTadOffsets = packX.specialOffsets(); auto outputTads = packZ.specialShapeInfo(); auto outputTadOffsets = packZ.specialOffsets(); segmentProdTadKernel<T, I><<<128, 512, 2048, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets); } } // -------------------------------------------------------------------------------------------------------------- // void segmentProdFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), segmentProdFunctor_, (context, input, indices, output), SD_NUMERIC_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void unsortedSegmentProdFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); // NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2}); NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context); NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context); // NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0}); // classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), &row, &classes); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims(numOfClasses, indices->lengthOf(), numOfClasses * 32 + 32); // int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer()); fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); output->assign(1); if (input->isVector()) { unsortedSegmentProdLinearKernel<T, I><<<128, 256, 256, *stream>>>( input->dataBuffer()->specialAsT<T>(), input->specialShapeInfo(), indices->dataBuffer()->specialAsT<I>(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->dataBuffer()->specialAsT<T>(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto inputTads = packX.specialShapeInfo(); auto inputTadOffsets = packX.specialOffsets(); auto outputTads = packZ.specialShapeInfo(); auto outputTadOffsets = packZ.specialOffsets(); dims.x = input->sizeAt(0); segmentProdTadKernel<T, I> <<<128, 256, 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets); } } // -------------------------------------------------------------------------------------------------------------- // void unsortedSegmentProdFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentProdFunctor_, (context, input, indices, numOfClasses, output), SD_NUMERIC_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentProdBPLinearKernel(void* inputBuf, sd::LongType const* inputShape, void* forwardOutput, sd::LongType const* forwardShape, void* eps, sd::LongType const* epsShape, void* indicesBuf, sd::LongType const* indicesShape, void* outputBuf, sd::LongType const* outputShape) { __shared__ T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ sd::LongType xLen, gradLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); gradIn = reinterpret_cast<T*>(forwardOutput); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (auto e = start; e < xLen; e += step) { auto zOffset = shape::getIndexOffset(e, outputShape); auto xOffset = shape::getIndexOffset(e, inputShape); auto yOffset = shape::getIndexOffset(e, indicesShape); auto classIndex = y[yOffset]; auto gradOffsetI = shape::getIndexOffset(classIndex, forwardShape); auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape); z[zOffset] = gradOut[gradOffsetO] * gradIn[gradOffsetI] / x[xOffset]; } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentProdBPTadKernel(void* inputBuf, sd::LongType const* inputShape, void* forwardOutput, sd::LongType const* forwardShape, void* eps, sd::LongType const* epsShape, void* indicesBuf, sd::LongType const* indicesShape, void* outputBuf, sd::LongType const* outputShape, sd::LongType const* inputTad, sd::LongType const* inputOffsets, sd::LongType const* gradInTad, sd::LongType const* gradInOffsets, sd::LongType const* gradOutTad, sd::LongType const* gradOutOffsets, sd::LongType const* outTad, sd::LongType const* outOffsets) { __shared__ T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ sd::LongType xLen, yLen, gradLen, currentLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); yLen = shape::length(indicesShape); gradOut = reinterpret_cast<T*>(eps); gradIn = reinterpret_cast<T*>(forwardOutput); gradLen = shape::length(epsShape); currentLen = shape::length(outTad); } __syncthreads(); for (auto i = blockIdx.x; i < yLen; i += gridDim.x) { auto yIndex = shape::getIndexOffset(i, indicesShape); auto segment = y[yIndex]; T* current = x + inputOffsets[i]; T* currentOut = z + outOffsets[i]; T* in = gradIn + gradInOffsets[segment]; T* outGrad = gradOut + gradOutOffsets[segment]; for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) { currentOut[e] = outGrad[e] * in[e] / current[e]; } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> sd::Status segmentProdFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { auto stream = context->getCudaStream(); NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context); //->shapeInfo(), context); segmentProdFunctor_<T, I>(context, input, indices, &tempRes); NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); if (input->isVector()) { sd::LongType loopSize = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); // indices->e<sd::LongType>(loop_size - 1); segmentProdBPLinearKernel<T, I><<<gradOut->lengthOf(), loopSize, 256, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions); auto inputTads = packX.specialShapeInfo(); auto inputTadOffsets = packX.specialOffsets(); auto outputTads = packZ.specialShapeInfo(); auto outputTadOffsets = packZ.specialOffsets(); auto gradInTads = packGradIn.specialShapeInfo(); auto gradInTadOffsets = packGradIn.specialOffsets(); auto gradOutTads = packGradOut.specialShapeInfo(); auto gradOutTadOffsets = packGradOut.specialOffsets(); segmentProdBPTadKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut}); return sd::Status::OK; } // -------------------------------------------------------------------------------------------------------------- // sd::Status segmentProdFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentProdFunctorBP_, (context, input, indices, gradOut, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static sd::Status unsortedSegmentProdFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context); //->shapeInfo(), context); unsortedSegmentProdFunctor_<T, I>(context, input, indices, numOfClasses, &tempRes); NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); if (input->isVector()) { sd::LongType loopSize = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); // indices->e<sd::LongType>(loop_size - 1); segmentProdBPLinearKernel<T, I><<<gradOut->lengthOf(), loopSize, 256, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions); auto inputTads = packX.specialShapeInfo(); auto inputTadOffsets = packX.specialOffsets(); auto outputTads = packZ.specialShapeInfo(); auto outputTadOffsets = packZ.specialOffsets(); auto gradInTads = packGradIn.specialShapeInfo(); auto gradInTadOffsets = packGradIn.specialOffsets(); auto gradOutTads = packGradOut.specialShapeInfo(); auto gradOutTadOffsets = packGradOut.specialOffsets(); segmentProdBPTadKernel<T, I><<<indices->lengthOf(), input->lengthOf(), 256, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut}); return sd::Status::OK; } // -------------------------------------------------------------------------------------------------------------- // sd::Status unsortedSegmentProdFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentProdFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } // -------------------------------------------------------------------------------------------------------------- // } // namespace helpers } // namespace ops } // namespace sd
the_stack
// 1. Removed compute3.0 and lower compute capability // 2. Replaced 64bit shifts with byte permutations // 3. Removed pointer fetching from constant memory // 4. Better loop unrolling factors for cp5.0/5.2 // 6. More precomputations // 7. Increased default intensity (?) // 8. Restored Second nonce buffer // 9. Use video SIMD instruction for cumulative sum of bufidx // Provos Alexis - 2016 #include "miner.h" #include "cuda_helper.h" #include "cuda_vectors.h" #if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) #define __LDG_PTR "l" #else #define __LDG_PTR "r" #endif #ifdef _MSC_VER #define THREAD __declspec(thread) #else #define THREAD __thread #endif static cudaStream_t stream[MAX_GPUS][2]; __constant__ uint32_t key_init[16]; __constant__ uint32_t _ALIGN(16) c_data[64]; __constant__ uint32_t _ALIGN(8) buf_shifts[16]; /// constants /// static const __constant__ uint8 BLAKE2S_IV_Vec = { 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 }; static const uint8 BLAKE2S_IV_Vechost = { 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 }; static const uint32_t BLAKE2S_SIGMA_host[10][16] = { {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, {14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3}, {11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4}, {7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8}, {9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13}, {2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9}, {12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11}, {13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10}, {6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5}, {10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0}, }; #define SALSA(a,b,c,d){ \ b^=ROTL32(a+d, 7);\ c^=ROTL32(a+b, 9);\ d^=ROTL32(b+c, 13);\ a^=ROTL32(c+d, 18);\ } #define SALSA_CORE(state) { \ SALSA(state.s0,state.s4,state.s8,state.sc);\ SALSA(state.s5,state.s9,state.sd,state.s1);\ SALSA(state.sa,state.se,state.s2,state.s6);\ SALSA(state.sf,state.s3,state.s7,state.sb);\ SALSA(state.s0,state.s1,state.s2,state.s3);\ SALSA(state.s5,state.s6,state.s7,state.s4);\ SALSA(state.sa,state.sb,state.s8,state.s9);\ SALSA(state.sf,state.sc,state.sd,state.se);\ } #define CHACHA_STEP(a,b,c,d) { \ a += b; d = __byte_perm(d^a,0,0x1032); \ c += d; b = ROTL32(b^c, 12); \ a += b; d = __byte_perm(d^a,0,0x2103); \ c += d; b = ROTL32(b^c, 7); \ } #define CHACHA_CORE_PARALLEL(state){\ CHACHA_STEP(state.lo.s0, state.lo.s4, state.hi.s0, state.hi.s4); \ CHACHA_STEP(state.lo.s1, state.lo.s5, state.hi.s1, state.hi.s5); \ CHACHA_STEP(state.lo.s2, state.lo.s6, state.hi.s2, state.hi.s6); \ CHACHA_STEP(state.lo.s3, state.lo.s7, state.hi.s3, state.hi.s7); \ CHACHA_STEP(state.lo.s0, state.lo.s5, state.hi.s2, state.hi.s7); \ CHACHA_STEP(state.lo.s1, state.lo.s6, state.hi.s3, state.hi.s4); \ CHACHA_STEP(state.lo.s2, state.lo.s7, state.hi.s0, state.hi.s5); \ CHACHA_STEP(state.lo.s3, state.lo.s4, state.hi.s1, state.hi.s6); \ } #define BLAKE_G(idx0, idx1, a, b, c, d, key) { \ idx = BLAKE2S_SIGMA[idx0][idx1]; a += key[idx]; \ a += b; d = __byte_perm(d^a,0, 0x1032); \ c += d; b = ROTR32(b^c, 12); \ idx = BLAKE2S_SIGMA[idx0][idx1+1]; a += key[idx]; \ a += b; d = __byte_perm(d^a,0, 0x0321); \ c += d; b = ROTR32(b^c, 7); \ } #define BLAKE(a, b, c, d, key1,key2) { \ a += b + key1; \ d = __byte_perm(d^a, 0, 0x1032); \ c += d; b = ROTR32(b^c, 12); \ a += b + key2; \ d = __byte_perm(d^a, 0, 0x0321); \ c += d; b = ROTR32(b^c, 7); \ } #define BLAKE_G_PRE(idx0, idx1, a, b, c, d, key) { \ a += b + key[idx0]; \ d = __byte_perm(d^a, 0, 0x1032); \ c += d; b = ROTR32(b^c, 12); \ a += b + key[idx1]; \ d = __byte_perm(d^a, 0, 0x0321); \ c += d; b = ROTR32(b^c, 7); \ } #define BLAKE_G_PRE0(idx0, idx1, a, b, c, d, key) { \ a += b; d = __byte_perm(d^a, 0, 0x1032); \ c += d; b = ROTR32(b^c, 12); \ a += b; d = __byte_perm(d^a, 0, 0x0321); \ c += d; b = ROTR32(b^c, 7); \ } #define BLAKE_G_PRE1(idx0, idx1, a, b, c, d, key) { \ a += b + key[idx0]; \ d = __byte_perm(d^a, 0, 0x1032); \ c += d; b = ROTR32(b^c, 12); \ a += b; d = __byte_perm(d^a, 0, 0x0321); \ c += d; b = ROTR32(b^c, 7); \ } #define BLAKE_G_PRE2(idx0, idx1, a, b, c, d, key) { \ a += b; d = __byte_perm(d^a,0, 0x1032); \ c += d; b = ROTR32(b^c, 12); \ a += b + key[idx1]; \ d = __byte_perm(d^a,0, 0x0321); \ c += d; b = ROTR32(b^c, 7); \ } #define BLAKE_Ghost(idx0, idx1, a, b, c, d, key) { \ idx = BLAKE2S_SIGMA_host[idx0][idx1]; \ a += b + key[idx]; \ d = ROTR32(d^a, 16); \ c += d; b = ROTR32(b^c, 12); \ idx = BLAKE2S_SIGMA_host[idx0][idx1+1]; \ a += b + key[idx]; \ d = ROTR32(d^a, 8); \ c += d; b = ROTR32(b^c, 7); \ } __device__ __forceinline__ static void Blake2S_v2(uint32_t *out, const uint32_t* __restrict__ inout, const uint32_t * __restrict__ TheKey){ uint16 V; uint8 tmpblock; V.hi = BLAKE2S_IV_Vec; V.lo = BLAKE2S_IV_Vec; V.lo.s0 ^= 0x01012020; // Copy input block for later tmpblock = V.lo; V.hi.s4 ^= 64; // { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, BLAKE_G_PRE(0, 1, V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, TheKey); BLAKE_G_PRE(2, 3, V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, TheKey); BLAKE_G_PRE(4, 5, V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, TheKey); BLAKE_G_PRE(6, 7, V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, TheKey); BLAKE_G_PRE0(8, 9, V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, TheKey); BLAKE_G_PRE0(10, 11, V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, TheKey); BLAKE_G_PRE0(12, 13, V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, TheKey); BLAKE_G_PRE0(14, 15, V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, TheKey); // { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, BLAKE_G_PRE0(14, 10, V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, TheKey); BLAKE_G_PRE1(4, 8, V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, TheKey); BLAKE_G_PRE0(9, 15, V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, TheKey); BLAKE_G_PRE2(13, 6, V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, TheKey); BLAKE_G_PRE1(1, 12, V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, TheKey); BLAKE_G_PRE(0, 2, V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, TheKey); BLAKE_G_PRE2(11, 7, V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, TheKey); BLAKE_G_PRE(5, 3, V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, TheKey); // { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, BLAKE_G_PRE0(11, 8, V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, TheKey); BLAKE_G_PRE2(12, 0, V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, TheKey); BLAKE_G_PRE(5, 2, V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, TheKey); BLAKE_G_PRE0(15, 13, V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, TheKey); BLAKE_G_PRE0(10, 14, V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, TheKey); BLAKE_G_PRE(3, 6, V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, TheKey); BLAKE_G_PRE(7, 1, V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, TheKey); BLAKE_G_PRE2(9, 4, V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, TheKey); // { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, BLAKE_G_PRE1(7, 9, V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, TheKey); BLAKE_G_PRE(3, 1, V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, TheKey); BLAKE_G_PRE0(13, 12, V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, TheKey); BLAKE_G_PRE0(11, 14, V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, TheKey); BLAKE_G_PRE(2, 6, V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, TheKey); BLAKE_G_PRE1(5, 10, V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, TheKey); BLAKE_G_PRE(4, 0, V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, TheKey); BLAKE_G_PRE0(15, 8, V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, TheKey); // { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, BLAKE_G_PRE2(9, 0, V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, TheKey); BLAKE_G_PRE(5, 7, V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, TheKey); BLAKE_G_PRE(2, 4, V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, TheKey); BLAKE_G_PRE0(10, 15, V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, TheKey); BLAKE_G_PRE2(14, 1, V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, TheKey); BLAKE_G_PRE0(11, 12, V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, TheKey); BLAKE_G_PRE1(6, 8, V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, TheKey); BLAKE_G_PRE1(3, 13, V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, TheKey); // { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, BLAKE_G_PRE1(2, 12, V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, TheKey); BLAKE_G_PRE1(6, 10, V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, TheKey); BLAKE_G_PRE1(0, 11, V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, TheKey); BLAKE_G_PRE2(8, 3, V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, TheKey); BLAKE_G_PRE1(4, 13, V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, TheKey); BLAKE_G_PRE(7, 5, V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, TheKey); BLAKE_G_PRE0(15, 14, V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, TheKey); BLAKE_G_PRE1(1, 9, V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, TheKey); // { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, BLAKE_G_PRE2(12, 5, V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, TheKey); BLAKE_G_PRE1(1, 15, V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, TheKey); BLAKE_G_PRE0(14, 13, V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, TheKey); BLAKE_G_PRE1(4, 10, V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, TheKey); BLAKE_G_PRE(0, 7, V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, TheKey); BLAKE_G_PRE(6, 3, V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, TheKey); BLAKE_G_PRE2(9, 2, V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, TheKey); BLAKE_G_PRE0(8, 11, V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, TheKey); // { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, BLAKE_G_PRE0(13, 11, V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, TheKey); BLAKE_G_PRE1(7, 14, V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, TheKey); BLAKE_G_PRE2(12, 1, V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, TheKey); BLAKE_G_PRE1(3, 9, V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, TheKey); BLAKE_G_PRE(5, 0, V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, TheKey); BLAKE_G_PRE2(15, 4, V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, TheKey); BLAKE_G_PRE2(8, 6, V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, TheKey); BLAKE_G_PRE(2, 10, V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, TheKey); // { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, BLAKE_G_PRE1(6, 15, V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, TheKey); BLAKE_G_PRE0(14, 9, V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, TheKey); BLAKE_G_PRE2(11, 3, V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, TheKey); BLAKE_G_PRE1(0, 8, V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, TheKey); BLAKE_G_PRE2(12, 2, V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, TheKey); BLAKE_G_PRE2(13, 7, V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, TheKey); BLAKE_G_PRE(1, 4, V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, TheKey); BLAKE_G_PRE2(10, 5, V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, TheKey); // { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 }, BLAKE_G_PRE2(10, 2, V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, TheKey); BLAKE_G_PRE2(8, 4, V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, TheKey); BLAKE_G_PRE(7, 6, V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, TheKey); BLAKE_G_PRE(1, 5, V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, TheKey); BLAKE_G_PRE0(15, 11, V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, TheKey); BLAKE_G_PRE0(9, 14, V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, TheKey); BLAKE_G_PRE1(3, 12, V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, TheKey); BLAKE_G_PRE2(13, 0, V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, TheKey); V.lo = V.lo ^ tmpblock ^ V.hi; V.hi = BLAKE2S_IV_Vec; tmpblock = V.lo; V.hi.s4 ^= 128; V.hi.s6 = ~V.hi.s6; // { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, BLAKE_G_PRE(0, 1, V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, inout); BLAKE_G_PRE(2, 3, V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, inout); BLAKE_G_PRE(4, 5, V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, inout); BLAKE_G_PRE(6, 7, V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, inout); BLAKE_G_PRE(8, 9, V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, inout); BLAKE_G_PRE(10, 11, V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, inout); BLAKE_G_PRE(12, 13, V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, inout); BLAKE_G_PRE(14, 15, V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, inout); // { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, BLAKE_G_PRE(14, 10, V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, inout); BLAKE_G_PRE(4, 8, V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, inout); BLAKE_G_PRE(9, 15, V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, inout); BLAKE_G_PRE(13, 6, V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, inout); BLAKE_G_PRE(1, 12, V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, inout); BLAKE_G_PRE(0, 2, V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, inout); BLAKE_G_PRE(11, 7, V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, inout); BLAKE_G_PRE(5, 3, V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, inout); // { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, BLAKE_G_PRE(11, 8, V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, inout); BLAKE_G_PRE(12, 0, V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, inout); BLAKE_G_PRE(5, 2, V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, inout); BLAKE_G_PRE(15, 13, V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, inout); BLAKE_G_PRE(10, 14, V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, inout); BLAKE_G_PRE(3, 6, V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, inout); BLAKE_G_PRE(7, 1, V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, inout); BLAKE_G_PRE(9, 4, V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, inout); // { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, BLAKE_G_PRE(7, 9, V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, inout); BLAKE_G_PRE(3, 1, V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, inout); BLAKE_G_PRE(13, 12, V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, inout); BLAKE_G_PRE(11, 14, V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, inout); BLAKE_G_PRE(2, 6, V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, inout); BLAKE_G_PRE(5, 10, V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, inout); BLAKE_G_PRE(4, 0, V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, inout); BLAKE_G_PRE(15, 8, V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, inout); //#pragma unroll // 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10, // 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5, // 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0, BLAKE(V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, inout[9], inout[0]); BLAKE(V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, inout[5], inout[7]); BLAKE(V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, inout[2], inout[4]); BLAKE(V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, inout[10], inout[15]); BLAKE(V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, inout[14], inout[1]); BLAKE(V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, inout[11], inout[12]); BLAKE(V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, inout[6], inout[8]); BLAKE(V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, inout[3], inout[13]); BLAKE(V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, inout[2], inout[12]); BLAKE(V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, inout[6], inout[10]); BLAKE(V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, inout[0], inout[11]); BLAKE(V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, inout[8], inout[3]); BLAKE(V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, inout[4], inout[13]); BLAKE(V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, inout[7], inout[5]); BLAKE(V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, inout[15], inout[14]); BLAKE(V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, inout[1], inout[9]); BLAKE(V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, inout[12], inout[5]); BLAKE(V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, inout[1], inout[15]); BLAKE(V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, inout[14], inout[13]); BLAKE(V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, inout[4], inout[10]); BLAKE(V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, inout[0], inout[7]); BLAKE(V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, inout[6], inout[3]); BLAKE(V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, inout[9], inout[2]); BLAKE(V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, inout[8], inout[11]); // 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10, // 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5, BLAKE(V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, inout[13], inout[11]); BLAKE(V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, inout[7], inout[14]); BLAKE(V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, inout[12], inout[1]); BLAKE(V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, inout[3], inout[9]); BLAKE(V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, inout[5], inout[0]); BLAKE(V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, inout[15], inout[4]); BLAKE(V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, inout[8], inout[6]); BLAKE(V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, inout[2], inout[10]); BLAKE(V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, inout[6], inout[15]); BLAKE(V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, inout[14], inout[9]); BLAKE(V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, inout[11], inout[3]); BLAKE(V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, inout[0], inout[8]); BLAKE(V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, inout[12], inout[2]); BLAKE(V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, inout[13], inout[7]); BLAKE(V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, inout[1], inout[4]); BLAKE(V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, inout[10], inout[5]); // 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0, BLAKE(V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, inout[10], inout[2]); BLAKE(V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, inout[8], inout[4]); BLAKE(V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, inout[7], inout[6]); BLAKE(V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, inout[1], inout[5]); BLAKE(V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, inout[15], inout[11]); BLAKE(V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, inout[9], inout[14]); BLAKE(V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, inout[3], inout[12]); BLAKE(V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, inout[13], inout[0]); ((uint8*)out)[0] = V.lo ^ tmpblock ^ V.hi; } __device__ __forceinline__ static uint16 salsa_small_scalar_rnd(const uint16 &X) { uint16 state = X; for (uint32_t i = 0; i < 10; i++) { SALSA_CORE(state); } return(X + state); } __device__ __forceinline__ static uint16 chacha_small_parallel_rnd(const uint16 &X){ uint16 state = X; for (uint32_t i = 0; i < 10; i++) { CHACHA_CORE_PARALLEL(state); } return (X + state); } static void Blake2Shost(uint32_t * inout, const uint32_t * inkey){ uint16 V; uint32_t idx; uint8 tmpblock; V.hi = BLAKE2S_IV_Vechost; V.lo = BLAKE2S_IV_Vechost; V.lo.s0 ^= 0x01012020; // Copy input block for later tmpblock = V.lo; V.hi.s4 ^= 64; for(int x = 0; x < 10; ++x) { BLAKE_Ghost(x, 0x00, V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, inkey); BLAKE_Ghost(x, 0x02, V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, inkey); BLAKE_Ghost(x, 0x04, V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, inkey); BLAKE_Ghost(x, 0x06, V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, inkey); BLAKE_Ghost(x, 0x08, V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, inkey); BLAKE_Ghost(x, 0x0A, V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, inkey); BLAKE_Ghost(x, 0x0C, V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, inkey); BLAKE_Ghost(x, 0x0E, V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, inkey); } V.lo ^= V.hi; V.lo ^= tmpblock; V.hi = BLAKE2S_IV_Vechost; tmpblock = V.lo; V.hi.s4 ^= 128; V.hi.s6 = ~V.hi.s6; for(int x = 0; x < 10; ++x) { BLAKE_Ghost(x, 0x00, V.lo.s0, V.lo.s4, V.hi.s0, V.hi.s4, inout); BLAKE_Ghost(x, 0x02, V.lo.s1, V.lo.s5, V.hi.s1, V.hi.s5, inout); BLAKE_Ghost(x, 0x04, V.lo.s2, V.lo.s6, V.hi.s2, V.hi.s6, inout); BLAKE_Ghost(x, 0x06, V.lo.s3, V.lo.s7, V.hi.s3, V.hi.s7, inout); BLAKE_Ghost(x, 0x08, V.lo.s0, V.lo.s5, V.hi.s2, V.hi.s7, inout); BLAKE_Ghost(x, 0x0A, V.lo.s1, V.lo.s6, V.hi.s3, V.hi.s4, inout); BLAKE_Ghost(x, 0x0C, V.lo.s2, V.lo.s7, V.hi.s0, V.hi.s5, inout); BLAKE_Ghost(x, 0x0E, V.lo.s3, V.lo.s4, V.hi.s1, V.hi.s6, inout); } V.lo ^= V.hi ^ tmpblock; ((uint8*)inout)[0] = V.lo; } #define TPB 128 #define TPBchacha152 64 #define TPBchacha150 128 #define TPBchacha252 512 #define TPBchacha250 128 #define TPBsalsa152 512 #define TPBsalsa150 512 #define TPBsalsa252 512 #define TPBsalsa250 128 __global__ __launch_bounds__(TPB, 2) void neoscrypt_gpu_hash_start(int stratum, uint32_t threads, uint32_t startNonce,uint2x4* Input){ __shared__ uint32_t s_data[64 * TPB]; const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if(thread<threads){ const uint32_t Nonce = startNonce + thread; const uint32_t nonce = (stratum) ? cuda_swab32(Nonce) : Nonce; //freaking morons !!! uint32_t data18 = c_data[18]; uint32_t data20 = c_data[0]; uint32_t input[16]; uint32_t key[16] = { 0 }; uint32_t qbuf, rbuf, bitbuf; uint32_t* B = (uint32_t*)&s_data[threadIdx.x<<6]; #pragma unroll 8 for(uint32_t i = 0; i<64 ;i+=8){ *(uint2x4*)&B[i] = *(uint2x4*)&c_data[i]; } B[19] = nonce; B[39] = nonce; B[59] = nonce; //uint32_t values[11];//qbuf,bitbuf,shiftedvalues qbuf = buf_shifts[ 0]; bitbuf = buf_shifts[ 1]; uint32_t temp[9]; temp[0] = B[(0 + qbuf) & 0x3f] ^ buf_shifts[ 2]; temp[1] = B[(1 + qbuf) & 0x3f] ^ buf_shifts[ 3]; temp[2] = B[(2 + qbuf) & 0x3f] ^ buf_shifts[ 4]; temp[3] = B[(3 + qbuf) & 0x3f] ^ buf_shifts[ 5]; temp[4] = B[(4 + qbuf) & 0x3f] ^ buf_shifts[ 6]; temp[5] = B[(5 + qbuf) & 0x3f] ^ buf_shifts[ 7]; temp[6] = B[(6 + qbuf) & 0x3f] ^ buf_shifts[ 8]; temp[7] = B[(7 + qbuf) & 0x3f] ^ buf_shifts[ 9]; temp[8] = B[(8 + qbuf) & 0x3f] ^ buf_shifts[10]; uint32_t a = c_data[qbuf & 0x3f], b; #pragma unroll 8 for (uint32_t k = 0; k<16; k += 2){ b = c_data[(qbuf + k + 1) & 0x3f]; input[ k] = __byte_perm(a,b,bitbuf); a = c_data[(qbuf + k + 2) & 0x3f]; input[k+1] = __byte_perm(b,a,bitbuf); key[(k>>1)] = __byte_perm(temp[(k>>1)],temp[(k>>1)+1],bitbuf); } if(qbuf < 60){ const uint32_t noncepos = 19 - qbuf % 20; if (noncepos <= 16){ if (noncepos) input[noncepos - 1] = __byte_perm(data18,nonce,bitbuf); if (noncepos != 16U) input[noncepos] = __byte_perm(nonce,data20,bitbuf); } } Blake2S_v2(input, input, key); #pragma unroll 9 for (uint32_t k = 0; k < 9; k++) B[(k + qbuf) & 0x3f] = temp[k]; #pragma unroll 31 for (uint32_t i = 1; i < 31; i++) { uint8_t bufidx = 0; #pragma unroll 4 for (uint32_t x = 0; x < 8; x+=2){ bufidx+= __vsadu4(input[x],0) + __vsadu4(input[x+1],0); } qbuf = bufidx >> 2; rbuf = bufidx & 3; bitbuf = rbuf << 3; uint32_t temp[9]; uint32_t shift = 32 - bitbuf; const uint32_t byte_perm_shift = 0x76543210ULL >> (shift>>1); const uint32_t byte_perm_bitbf = 0x76543210ULL >> (bitbuf>>1); shift = (input[7]>>shift); temp[8] = B[(8 + qbuf) & 0x3f] ^ shift; shift = __byte_perm(input[ 6],input[ 7],byte_perm_shift); temp[7] = B[(7 + qbuf) & 0x3f] ^ shift; shift = __byte_perm(input[ 5],input[ 6],byte_perm_shift); temp[6] = B[(6 + qbuf) & 0x3f] ^ shift; shift = __byte_perm(input[ 4],input[ 5],byte_perm_shift); temp[5] = B[(5 + qbuf) & 0x3f] ^ shift; shift = __byte_perm(input[ 3],input[ 4],byte_perm_shift); temp[4] = B[(4 + qbuf) & 0x3f] ^ shift; shift = __byte_perm(input[ 2],input[ 3],byte_perm_shift); temp[3] = B[(3 + qbuf) & 0x3f] ^ shift; shift = __byte_perm(input[ 1],input[ 2],byte_perm_shift); temp[2] = B[(2 + qbuf) & 0x3f] ^ shift; shift = __byte_perm(input[ 0],input[ 1],byte_perm_shift); temp[1] = B[(1 + qbuf) & 0x3f] ^ shift; temp[0] = B[(0 + qbuf) & 0x3f] ^ (input[ 0]<<bitbuf); uint32_t a = c_data[qbuf & 0x3f]; #pragma unroll 8 for (int k = 0; k<16; k += 2) { const uint32_t b = c_data[(qbuf + k + 1) & 0x3f]; input[k] = __byte_perm(a,b,byte_perm_bitbf); a = c_data[(qbuf + k + 2) & 0x3f]; input[k+1] = __byte_perm(b,a,byte_perm_bitbf); key[(k>>1)] = __byte_perm(temp[(k>>1)],temp[(k>>1)+1],byte_perm_bitbf); } if(qbuf < 60){ const uint32_t noncepos = 19 - qbuf % 20U; if (noncepos <= 16){ if (noncepos) input[noncepos - 1] = __byte_perm(data18,nonce,byte_perm_bitbf); if (noncepos != 16U) input[noncepos] = __byte_perm(nonce,data20,byte_perm_bitbf); } } Blake2S_v2(input, input, key); #pragma unroll 9 for (int k = 0; k < 9; k++) B[(k + qbuf) & 0x3f] = temp[k]; } uint8_t bufidx = 0; #pragma unroll 4 for (uint32_t x = 0; x < 8; x+=2){ bufidx+= __vsadu4(input[x],0) + __vsadu4(input[x+1],0); } qbuf = bufidx >> 2; rbuf = bufidx & 3; uint32_t byte_perm_bitbf = 0x76543210ULL >> (rbuf<<2); uint2x4 output[8]; #pragma unroll for (uint32_t i = 0; i<64; i++) { const uint32_t a = (qbuf + i) & 0x3f, b = (qbuf + i + 1) & 0x3f; ((uint32_t*)output)[i] = __byte_perm(B[a],B[b],byte_perm_bitbf); } output[0] ^= ((uint2x4*)input)[0]; ((uint32_t*)output)[19] ^= nonce; ((uint32_t*)output)[39] ^= nonce; ((uint32_t*)output)[59] ^= nonce; #pragma unroll 8 for(uint32_t i=0;i<8;i++){ Input[i*threads+thread] = output[i] ^ ((uint2x4*)c_data)[i]; } } } __global__ #if __CUDA_ARCH__ > 500 __launch_bounds__(TPBchacha152, 6) #else __launch_bounds__(TPBchacha150, 6) #endif void neoscrypt_gpu_hash_chacha1_stream1(const uint32_t threads,const uint2x4* Input, uint2x4 *const __restrict__ W, uint2x4 *const __restrict__ Tr){ const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if(thread<threads){ uint2x4 X[8]; uint16* XV = (uint16*)X; #pragma unroll 8 for(int i = 0; i<8; i++) X[i] = __ldg4(&Input[i*threads+thread]); #pragma nounroll for(uint32_t i = 0; i < 128; ++i){ #if __CUDA_ARCH__ > 500 #pragma unroll 8 for(uint32_t j = 0; j<8; j++) W[(thread<<10) + (i<<3) + j] = X[j]; #else #pragma unroll 8 for(uint32_t j = 0; j<8; j++) W[(i*8+j)*threads+thread] = X[j]; #endif const uint16 temp = XV[2]; XV[0] = chacha_small_parallel_rnd(XV[0] ^ XV[3]); XV[2] = chacha_small_parallel_rnd(XV[1] ^ XV[0]); XV[1] = chacha_small_parallel_rnd(XV[2] ^ temp); XV[3] = chacha_small_parallel_rnd(XV[3] ^ XV[1]); } #pragma unroll 8 for(uint32_t i = 0; i<8; i++) Tr[i*threads+thread] = X[i]; } } __global__ #if __CUDA_ARCH__ > 500 __launch_bounds__(TPBchacha252, 1) #else __launch_bounds__(TPBchacha250, 1) #endif void neoscrypt_gpu_hash_chacha2_stream1(const uint32_t threads, const uint2x4 *const __restrict__ W, uint2x4 *const __restrict__ Tr){ const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); uint2x4 X[8]; uint16* XV = (uint16*)X; if(thread<threads){ #pragma unroll for(int i = 0; i<8; i++) X[i] = __ldg4(&Tr[i*threads+thread]); #pragma unroll 128 for(int t = 0; t < 128; t++){ const uint32_t idx = (X[6].x.x & 0x7F) << 3; #if __CUDA_ARCH__ > 500 #pragma unroll 8 for(uint32_t j = 0; j<8; j++) X[j] ^= __ldg4(&W[(thread<<10) + idx + j]); #else #pragma nounroll for(uint32_t j = 0; j<8; j++) X[j] ^= __ldg4(&W[(idx+j)*threads + thread]); #endif const uint16 temp = XV[2]; XV[0] = chacha_small_parallel_rnd(XV[0] ^ XV[3]); XV[2] = chacha_small_parallel_rnd(XV[1] ^ XV[0]); XV[1] = chacha_small_parallel_rnd(XV[2] ^ temp); XV[3] = chacha_small_parallel_rnd(XV[3] ^ XV[1]); } #pragma unroll 8 for(uint32_t i = 0; i<8; i++) Tr[i*threads+thread] = X[i]; } } __global__ #if __CUDA_ARCH__ > 500 __launch_bounds__(TPBsalsa152, 1) #else __launch_bounds__(TPBsalsa150, 1) #endif void neoscrypt_gpu_hash_salsa1_stream1(const uint32_t threads,const uint2x4* Input, uint2x4 *const __restrict__ W2, uint2x4 *const __restrict__ Tr2){ const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); uint2x4 Z[8]; uint16* XV = (uint16*)Z; if(thread<threads){ #pragma unroll 8 for(uint32_t i = 0; i<8; i++) Z[i] = Input[i*threads+thread]; #pragma nounroll for(uint32_t i = 0; i < 128; ++i){ #if __CUDA_ARCH__ > 500 #pragma unroll 8 for(uint32_t j = 0; j<8; j++) W2[(thread<<10) + (i<<3) + j] = Z[j]; #else #pragma unroll 8 for(uint32_t j = 0; j<8; j++) W2[((i<<3)+j)*threads+thread] = Z[j]; #endif const uint16 temp = XV[ 2]; XV[0] = salsa_small_scalar_rnd(XV[0] ^ XV[3]); XV[2] = salsa_small_scalar_rnd(XV[1] ^ XV[0]); XV[1] = salsa_small_scalar_rnd(XV[2] ^ temp); XV[3] = salsa_small_scalar_rnd(XV[3] ^ XV[1]); } #pragma unroll 8 for(uint32_t i = 0; i<8; i++) Tr2[i*threads+thread] = Z[i]; } } __global__ #if __CUDA_ARCH__ > 500 __launch_bounds__(TPBsalsa252, 1) #else __launch_bounds__(TPBsalsa250, 1) #endif void neoscrypt_gpu_hash_salsa2_stream1(const uint32_t threads, const uint2x4 *const __restrict__ W2, uint2x4 *const __restrict__ Tr2){ const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); uint2x4 X[8]; uint16* XV = (uint16*)X; if(thread<threads){ #pragma unroll 8 for(uint32_t i = 0; i<8; i++) X[i] = __ldg4(&Tr2[i*threads+thread]); #pragma unroll 128 for(uint32_t t = 0; t < 128; t++) { const uint32_t idx = (X[6].x.x & 0x7F) << 3; #if __CUDA_ARCH__ > 500 #pragma unroll 8 for(uint32_t j = 0; j<8; j++) X[j] ^= __ldg4(&W2[(thread<<10) + idx + j]); #else uint2x4 tmp[8]; #pragma nounroll for(uint32_t j = 0; j<8; j++) tmp[j] = __ldg4(&W2[(idx+j)*threads + thread]); #pragma nounroll for(uint32_t j = 0; j<8; j++) X[j] ^= tmp[j]; #endif const uint16 temp = XV[ 2]; XV[0] = salsa_small_scalar_rnd(XV[0] ^ XV[3]); XV[2] = salsa_small_scalar_rnd(XV[1] ^ XV[0]); XV[1] = salsa_small_scalar_rnd(XV[2] ^ temp); XV[3] = salsa_small_scalar_rnd(XV[3] ^ XV[1]); } #pragma unroll 8 for(uint32_t i = 0; i<8; i++) Tr2[i*threads+thread] = X[i]; } } __global__ __launch_bounds__(TPB, 3) void neoscrypt_gpu_hash_ending(int stratum, uint32_t threads, uint32_t startNonce,const uint2x4 *const __restrict__ Tr,const uint2x4 *const __restrict__ Tr2, uint32_t *resNonces,const uint32_t target){ __shared__ uint32_t s_data[64 * TPB]; const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if(thread<threads){ const uint32_t cdata7 = c_data[7]; const uint32_t data18 = c_data[18]; const uint32_t data20 = c_data[0]; const uint32_t Nonce = startNonce + thread; const uint32_t nonce = (stratum) ? cuda_swab32(Nonce) : Nonce; uint32_t* B0 = (uint32_t*)&s_data[threadIdx.x * 64]; uint32_t input[16]; #pragma unroll 8 for (int i = 0; i<8; i++){ *(uint2x4*)&B0[i<<3] = __ldg4(&Tr2[i*threads+thread]) ^ __ldg4(&Tr[i*threads+thread]); } *(uint2x4*)&input[ 0] = *(uint2x4*)&c_data[ 0]; *(uint2x4*)&input[ 8] = *(uint2x4*)&c_data[ 8]; uint32_t key[16]; *(uint2x4*)&key[0] = *(uint2x4*)&B0[0]; *(uint4*)&key[ 8] = make_uint4(0, 0, 0, 0); *(uint4*)&key[12] = make_uint4(0, 0, 0, 0); uint32_t qbuf, bitbuf; uint32_t temp[9]; #pragma unroll for (int i = 0; i < 31; i++){ Blake2S_v2(input, input, key); uint8_t bufidx = 0; #pragma unroll 4 for (uint32_t x = 0; x < 8; x+=2){ bufidx+= __vsadu4(input[x],0) + __vsadu4(input[x+1],0); } qbuf = bufidx >> 2; bitbuf = (bufidx & 3) << 3; uint32_t shift = 32U - bitbuf; const uint32_t byte_perm_shift = 0x76543210ULL >> (shift>>1); const uint32_t byte_perm_bitbf = 0x76543210ULL >> (bitbuf>>1); shift = (input[7]>>shift); B0[(8 + qbuf) & 0x3f] = (temp[8] = B0[(8 + qbuf) & 0x3f] ^ shift); shift = __byte_perm(input[ 6],input[ 7],byte_perm_shift); B0[(7 + qbuf) & 0x3f] = (temp[7] = B0[(7 + qbuf) & 0x3f] ^ shift); shift = __byte_perm(input[ 5],input[ 6],byte_perm_shift); B0[(6 + qbuf) & 0x3f] = (temp[6] = B0[(6 + qbuf) & 0x3f] ^ shift); shift = __byte_perm(input[ 4],input[ 5],byte_perm_shift); B0[(5 + qbuf) & 0x3f] = (temp[5] = B0[(5 + qbuf) & 0x3f] ^ shift); shift = __byte_perm(input[ 3],input[ 4],byte_perm_shift); B0[(4 + qbuf) & 0x3f] = (temp[4] = B0[(4 + qbuf) & 0x3f] ^ shift); shift = __byte_perm(input[ 2],input[ 3],byte_perm_shift); B0[(3 + qbuf) & 0x3f] = (temp[3] = B0[(3 + qbuf) & 0x3f] ^ shift); shift = __byte_perm(input[ 1],input[ 2],byte_perm_shift); B0[(2 + qbuf) & 0x3f] = (temp[2] = B0[(2 + qbuf) & 0x3f] ^ shift); shift = __byte_perm(input[ 0],input[ 1],byte_perm_shift); B0[(1 + qbuf) & 0x3f] = (temp[1] = B0[(1 + qbuf) & 0x3f] ^ shift); B0[(0 + qbuf) & 0x3f] = (temp[0] = B0[(0 + qbuf) & 0x3f] ^ (input[ 0]<<bitbuf)); uint32_t a = c_data[qbuf & 0x3f]; #pragma unroll 8 for (int k = 0; k<16; k += 2) { const uint32_t b = c_data[(qbuf + k + 1) & 0x3f]; input[k] = __byte_perm(a,b,byte_perm_bitbf); a = c_data[(qbuf + k + 2) & 0x3f]; input[k+1] = __byte_perm(b,a,byte_perm_bitbf); key[(k>>1)] = __byte_perm(temp[(k>>1)],temp[(k>>1)+1],byte_perm_bitbf); } if(qbuf<60){ const uint32_t noncepos = 19 - qbuf % 20; if (noncepos <= 16){ if (noncepos != 0) input[noncepos - 1] = __byte_perm(data18,nonce,byte_perm_bitbf); if (noncepos != 16) input[noncepos] = __byte_perm(nonce,data20,byte_perm_bitbf); } } } Blake2S_v2(input, input, key); uint8_t bufidx = 0; #pragma unroll 4 for (uint32_t x = 0; x < 8; x+=2){ bufidx+= __vsadu4(input[x],0) + __vsadu4(input[x+1],0); } qbuf = bufidx >> 2; const uint32_t byte_perm_bitbf = 0x76543210ULL >> ((bufidx & 3)<<2); const uint32_t output = input[ 7] ^ cdata7 ^ __byte_perm(B0[(qbuf + 7) & 0x3f],B0[(qbuf + 8) & 0x3f],byte_perm_bitbf); if (output <= target){ // resNonces[0] = nonce; uint32_t tmp = atomicExch(resNonces, Nonce); if(tmp != UINT32_MAX) resNonces[1] = tmp; } } } uint2x4* W[MAX_GPUS]; uint2x4* W2[MAX_GPUS]; // 2 streams uint2x4* Trans1[MAX_GPUS]; uint2x4* Trans2[MAX_GPUS]; // 2 streams uint2x4 *Input[MAX_GPUS]; // 2 streams void neoscrypt_cpu_init(int thr_id, uint32_t threads) { CUDA_SAFE_CALL(cudaStreamCreate(&stream[thr_id][0])); CUDA_SAFE_CALL(cudaStreamCreate(&stream[thr_id][1])); CUDA_SAFE_CALL(cudaMalloc(&W[thr_id], 32 * 128 * sizeof(uint64_t) * threads)); CUDA_SAFE_CALL(cudaMalloc(&W2[thr_id], 32 * 128 * sizeof(uint64_t) * threads)); CUDA_SAFE_CALL(cudaMalloc(&Trans1[thr_id], 32 * sizeof(uint64_t) * threads)); CUDA_SAFE_CALL(cudaMalloc(&Trans2[thr_id], 32 * sizeof(uint64_t) * threads)); CUDA_SAFE_CALL(cudaMalloc(&Input[thr_id], 32 * sizeof(uint64_t) * threads)); } __host__ void neoscrypt_free(int thr_id){ cudaFree(W[thr_id]); cudaFree(W2[thr_id]); cudaFree(Trans1[thr_id]); cudaFree(Trans2[thr_id]); cudaFree(Input[thr_id]); cudaStreamDestroy(stream[thr_id][0]); cudaStreamDestroy(stream[thr_id][1]); } __host__ void neoscrypt_cpu_hash_k4(bool stratum, int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_resNonce,const uint32_t target7){ const uint32_t threadsperblock2 = TPB; dim3 grid((threads + threadsperblock2 - 1) / threadsperblock2); dim3 block(threadsperblock2); const int threadsperblock3 = device_sm[device_map[thr_id]]>500 ? TPBchacha152 : TPBchacha150; dim3 grid3((threads + threadsperblock3 - 1) / threadsperblock3); dim3 block3(threadsperblock3); const int threadsperblock4 = device_sm[device_map[thr_id]]>500 ? TPBsalsa152 : TPBsalsa150; dim3 grid4((threads + threadsperblock4 - 1) / threadsperblock4); dim3 block4(threadsperblock4); const int threadsperblock5 = device_sm[device_map[thr_id]]>500 ? TPBchacha252 : TPBchacha250; dim3 grid5((threads + threadsperblock5 - 1) / threadsperblock5); dim3 block5(threadsperblock5); const int threadsperblock6 = device_sm[device_map[thr_id]]>500 ? TPBsalsa252 : TPBsalsa250; dim3 grid6((threads + threadsperblock6 - 1) / threadsperblock6); dim3 block6(threadsperblock6); neoscrypt_gpu_hash_start <<< grid, block >>>(stratum, threads, startNounce,Input[thr_id]); //fastkdf cudaThreadSynchronize(); neoscrypt_gpu_hash_salsa1_stream1 <<< grid4, block4, 0, stream[thr_id][0] >>>(threads,Input[thr_id], W2[thr_id], Trans2[thr_id]); //salsa neoscrypt_gpu_hash_chacha1_stream1 <<< grid3, block3, 0, stream[thr_id][1] >>>(threads,Input[thr_id], W[thr_id], Trans1[thr_id]); //chacha neoscrypt_gpu_hash_salsa2_stream1 <<< grid6, block6, 0, stream[thr_id][0] >>>(threads, W2[thr_id], Trans2[thr_id]);//salsa neoscrypt_gpu_hash_chacha2_stream1 <<< grid5, block5, 0, stream[thr_id][1] >>>(threads, W[thr_id], Trans1[thr_id]); //chacha cudaStreamSynchronize(stream[thr_id][0]); cudaStreamSynchronize(stream[thr_id][1]); neoscrypt_gpu_hash_ending << <grid, block >> >(stratum, threads, startNounce, Trans1[thr_id], Trans2[thr_id], d_resNonce,target7); //fastkdf+end } __host__ void neoscrypt_setBlockTarget(uint32_t* pdata) { uint32_t PaddedMessage[64]; uint32_t input[16], key[16] = { 0 }; for (int i = 0; i < 19; i++) { PaddedMessage[i] = pdata[i]; PaddedMessage[i + 20] = pdata[i]; PaddedMessage[i + 40] = pdata[i]; } for (int i = 0; i<4; i++) PaddedMessage[i + 60] = pdata[i]; PaddedMessage[19] = 0; PaddedMessage[39] = 0; PaddedMessage[59] = 0; ((uint16*)input)[0] = ((uint16*)pdata)[0]; ((uint8*)key)[0] = ((uint8*)pdata)[0]; Blake2Shost(input, key); // cudaMemcpyToSymbol(input_init, input, 64, 0, cudaMemcpyHostToDevice); uint8_t bufidx = 0; for (int x = 0; x < 8; x++){ uint32_t bufhelper = (input[x] & 0x00ff00ff) + ((input[x] & 0xff00ff00) >> 8); bufhelper = bufhelper + (bufhelper >> 16); bufidx += bufhelper; } uint32_t qbuf, rbuf, bitbuf; qbuf = bufidx >> 2; rbuf = bufidx & 3; bitbuf = rbuf << 3; uint32_t shifted[ 9]; uint32_t shift = 32 - bitbuf; #define LONGLONG(LO,HI) ((uint64_t)LO | (((uint64_t)HI) << 32)) shifted[ 0] = input[ 0] << bitbuf; shifted[ 1] = LONGLONG(input[ 0],input[ 1]) >> shift; shifted[ 2] = LONGLONG(input[ 1],input[ 2]) >> shift; shifted[ 3] = LONGLONG(input[ 2],input[ 3]) >> shift; shifted[ 4] = LONGLONG(input[ 3],input[ 4]) >> shift; shifted[ 5] = LONGLONG(input[ 4],input[ 5]) >> shift; shifted[ 6] = LONGLONG(input[ 5],input[ 6]) >> shift; shifted[ 7] = LONGLONG(input[ 6],input[ 7]) >> shift; shifted[ 8] = LONGLONG(input[ 7], 0) >> shift; uint32_t values[11];//qbuf,bitbuf,shiftedvalues values[ 0] = qbuf; values[ 1] = 0x76543210ULL >> (bitbuf >> 1); memcpy(&values[2],shifted,9*sizeof(uint32_t)); cudaMemcpyToSymbol(buf_shifts,values,11*sizeof(uint32_t),0,cudaMemcpyHostToDevice); cudaMemcpyToSymbol(key_init, key, 64, 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_data, PaddedMessage, 64 * sizeof(uint32_t), 0, cudaMemcpyHostToDevice); CUDA_SAFE_CALL(cudaGetLastError()); }
the_stack
#include <atomic> #include <iostream> #include <string> #include <torch/csrc/utils/python_numbers.h> #include <unordered_map> namespace quiver { #define CHECK_CPU(x) AT_ASSERTM(!x.device().is_cuda(), #x " must be CPU tensor") class ShardTensorItem { public: int device; cudaIpcMemHandle_t mem_handle; std::vector<int> shape; // for now we assume it is all float int dtype; ShardTensorItem(int device_, cudaIpcMemHandle_t mem_handle_, std::vector<int> shape_) : device(device_), mem_handle(mem_handle_), shape(shape_) { } ShardTensorItem(){ }; std::tuple<int, py::bytes, std::vector<int>> share_ipc() { auto _handle = PyBytes_FromStringAndSize((char *)&mem_handle, CUDA_IPC_HANDLE_SIZE); auto bytes_obj = py::reinterpret_steal<py::object>((PyObject *)_handle); return std::make_tuple(device, bytes_obj, shape); } void from_ipc(std::tuple<int, std::string, std::vector<int>> ipc_data) { device = std::get<0>(ipc_data); shape = std::get<2>(ipc_data); auto handle = std::get<1>(ipc_data); auto ipc_handle = reinterpret_cast<const cudaIpcMemHandle_t *>(handle.c_str()); mem_handle = *ipc_handle; } }; class ShardTensor { public: ShardTensor(int device) : device_(device), inited_(false), device_count_(0) { offset_list_.push_back(0); } size_t get_tensor_bytes(torch::Tensor tensor) { // assume it's float int dim = tensor.dim(); size_t total_bytes = 4; for (int index = 0; index < dim; index++) { total_bytes *= tensor.sizes()[index]; } return total_bytes; } std::vector<int> get_tensor_shape(torch::Tensor tensor) { std::vector<int> shape; int dim = tensor.dim(); for (int index = 0; index < dim; index++) { shape.push_back(tensor.sizes()[index]); } return shape; } void append(ShardTensorItem item) { cudaSetDevice(device_); if (!inited_) { shape_.resize(item.shape.size()); shape_[0] = 0; auto tensor_sizes = item.shape; for (int index = 1; index < shape_.size(); index++) { shape_[index] = tensor_sizes[index]; } inited_ = true; } offset_list_.push_back(offset_list_[offset_list_.size() - 1] + item.shape[0]); // Check accessbility if (item.device >= 0) { // TODO int access_i_j, access_j_i; cudaDeviceCanAccessPeer(&access_i_j, device_, item.device); cudaDeviceCanAccessPeer(&access_j_i, item.device, device_); if ((access_i_j && access_j_i) || device_ == item.device) { access_book.push_back(1); // printf("%d <-> %d support peer access \n", device_, // item.device); } else { access_book.push_back(0); // printf("%d <-> %d dont support peer access \n", device_, // item.device); } } else { access_book.push_back(1); // printf("%d <-> CPU support peer access \n", device_); } // get dev_ptr that can be accessed from this process void *ptr = NULL; tensor_devices_.push_back(item.device); if (!access_book[access_book.size() - 1]) { cudaSetDevice(item.device); cudaIpcOpenMemHandle(&ptr, item.mem_handle, cudaIpcMemLazyEnablePeerAccess); cudaSetDevice(device_); // printf("WARNING: Tensor from device %d can NOT be accessed in // kernel launched on device %d \n", item.device, device_); } else { cudaIpcOpenMemHandle(&ptr, item.mem_handle, cudaIpcMemLazyEnablePeerAccess); } // dev_ptrs_.push_back((float *)ptr); shape_[0] += item.shape[0]; device_count_ += 1; cudaCheckError(); } void append(torch::Tensor &tensor, int target_device) { CHECK_CPU(tensor); // for now, we assume tensor is added ordered if (!inited_) { shape_.resize(tensor.dim()); shape_[0] = 0; auto tensor_sizes = tensor.sizes(); for (int index = 1; index < shape_.size(); index++) { shape_[index] = tensor_sizes[index]; } inited_ = true; } tensor_shapes_.push_back(get_tensor_shape(tensor)); offset_list_.push_back(offset_list_[offset_list_.size() - 1] + tensor.sizes()[0]); void *ptr = NULL; size_t data_size = get_tensor_bytes(tensor); tensor_devices_.push_back(target_device); if (target_device >= 0) { // if target_device >= 0, it means we use p2p // printf("LOG >>> Malloc Data On Device %d With %ulld Bytes\n", // target_device, data_size); cudaSetDevice(target_device); cudaMalloc(&ptr, data_size); cudaMemcpy(ptr, tensor.data_ptr<float>(), data_size, cudaMemcpyHostToDevice); cudaSetDevice(device_); // decide access book int access_i_j, access_j_i; cudaDeviceCanAccessPeer(&access_i_j, device_, target_device); cudaDeviceCanAccessPeer(&access_j_i, target_device, device_); if ((access_i_j && access_j_i) || device_ == target_device) { access_book.push_back(1); // printf("%d <-> %d support peer access \n", device_, // target_device); } else { access_book.push_back(0); // printf("%d <-> %d dont support peer access \n", device_, // target_device); } } else { cudaSetDevice(device_); // if target_device < 0, it means we use Zero-Copy quiverRegister(tensor.data_ptr<float>(), data_size, cudaHostRegisterMapped); cudaHostGetDevicePointer(&ptr, (void *)tensor.data_ptr<float>(), 0); access_book.push_back(1); // printf("%d <-> CPU support peer access \n", device_); } dev_ptrs_.push_back((float *)ptr); shape_[0] += tensor.size(0); device_count_ += 1; } std::tuple<float **, int64_t *, int *> get_device_pointers(int device) { auto iter = device_pointers_map.find(device); if (iter == device_pointers_map.end()) { float **buffers_device; int64_t *offset_device; int *access_book_device; // Copy buffers Device cudaMalloc((void ***)&buffers_device, sizeof(float *) * device_count_); cudaMemcpy(buffers_device, &dev_ptrs_[0], sizeof(float *) * dev_ptrs_.size(), cudaMemcpyHostToDevice); cudaCheckError(); // copy offset cudaMalloc((void **)&offset_device, sizeof(int64_t) * offset_list_.size()); cudaMemcpy(offset_device, &offset_list_[0], sizeof(int64_t) * offset_list_.size(), cudaMemcpyHostToDevice); cudaCheckError(); cudaMalloc((void **)&access_book_device, sizeof(int) * access_book.size()); cudaMemcpy(access_book_device, &access_book[0], sizeof(int) * access_book.size(), cudaMemcpyHostToDevice); cudaCheckError(); device_pointers_map.emplace( device, std::make_tuple(buffers_device, offset_device, access_book_device)); iter = device_pointers_map.find(device); } return iter->second; } torch::Tensor operator[](torch::Tensor &indices) { /* __global__ void quiver_tensor_gather(const int64_t** dev_ptrs, const int64_t* offsets, const int device_count, const int64_t* indices, int indice_length, const float* res, const int item_byte_size){ torch::zeros((100,100),torch::KF32); */ int current_device = 0; cudaGetDevice(&current_device); auto stream = at::cuda::getCurrentCUDAStream(); std::vector<int64_t> res_shape(shape_); res_shape[0] = indices.numel(); // decide Tensor auto options = torch::TensorOptions() .dtype(at::kFloat) .device(torch::kCUDA, current_device); auto res = torch::empty(res_shape, options); cudaCheckError(); // Device Data // for(int index = 0; index < offset_list_.size(); index++){ // std::cout<<"offset " << offset_list_[index]<<std::endl; // std::cout<<"access_book[index] " << access_book[index]<<std::endl; //} float **buffers_device; int64_t *offset_device; int *access_book_device; auto val = get_device_pointers(current_device); buffers_device = std::get<0>(val); offset_device = std::get<1>(val); access_book_device = std::get<2>(val); int blockSize = 0; int numBlocks = 0; cudaOccupancyMaxPotentialBlockSize(&numBlocks, &blockSize, quiver_tensor_gather); // std::cout<<"LOG >>> "<<" numBlocks "<< numBlocks <<" blockSize // "<<blockSize<<std::endl; int ignore_access_book = 0; if (current_device != device_) { ignore_access_book = 1; } quiver_tensor_gather<<<numBlocks, blockSize, 0, stream>>>( buffers_device, offset_device, offset_list_.size(), indices.data_ptr<int64_t>(), indices.numel(), res.data_ptr<float>(), stride(0), access_book_device, ignore_access_book); cudaCheckError(); return res; } std::vector<int64_t> shape() const { return shape_; } int device() const { return device_; } int size(int dim) const { if (shape_.size() == 0) return 0; return shape_[dim]; } int64_t stride(int dim) const { int64_t res = 1; for (int index = dim + 1; index < shape_.size(); index++) { res *= shape_[index]; } return res; } int64_t numel() const { int64_t res = 1; for (int index = 0; index < shape_.size(); index++) { res *= shape_[index]; } return res; } std::vector<ShardTensorItem> share_ipc() { std::vector<ShardTensorItem> res; for (int index = 0; index < dev_ptrs_.size(); index++) { if (tensor_devices_[index] >= 0) { cudaSetDevice(tensor_devices_[index]); ShardTensorItem *item = new ShardTensorItem(); item->device = tensor_devices_[index]; item->shape = tensor_shapes_[index]; cudaIpcGetMemHandle(&(item->mem_handle), dev_ptrs_[index]); res.push_back(*item); } } return res; } int device_count() const { return device_count_; } void unregister(torch::Tensor &cpu_tensor) { std::cout << "begin unregister" << std::endl; cudaHostUnregister((void *)cpu_tensor.data_ptr<float>()); std::cout << "end unregister" << std::endl; } private: std::vector<int64_t> offset_list_; std::vector<float *> dev_ptrs_; std::vector<int> tensor_devices_; std::vector<int> access_book; std::vector<std::vector<int>> tensor_shapes_; std::vector<int64_t> shape_; std::unordered_map<int, std::tuple<float **, int64_t *, int *>> device_pointers_map; int numa_broker_device; int device_; int device_count_; bool inited_; }; void init_p2p(std::vector<int> devices) { std::cout << "LOG>>> P2P Access Initilization" << std::endl; for (int i = 0; i < devices.size(); i++) { int src = devices[i]; cudaSetDevice(src); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, src); // CUDA IPC is only supported on devices with unified addressing if (!prop.unifiedAddressing) { printf( "Device %d does not support unified addressing, skipping...\n", i); continue; } // This sample requires two processes accessing each device, so we need // to ensure exclusive or prohibited mode is not set if (prop.computeMode != cudaComputeModeDefault) { printf( "Device %d is in an unsupported compute mode for this sample\n", i); continue; } for (int j = i + 1; j < devices.size(); j++) { int dst = devices[j]; int access_i_j = 0; int access_j_i = 0; cudaDeviceCanAccessPeer(&access_i_j, src, dst); cudaDeviceCanAccessPeer(&access_j_i, dst, src); if (access_i_j && access_j_i) { printf("Enable P2P Access Between %d <---> %d \n", src, dst); cudaSetDevice(src); cudaDeviceEnablePeerAccess(dst, 0); cudaCheckError(); cudaSetDevice(dst); cudaDeviceEnablePeerAccess(src, 0); cudaCheckError(); } } } } bool can_device_access_peer(int src_device_index, int dst_device_index) { int access_i_j = 0, access_j_i = 0; cudaDeviceCanAccessPeer(&access_i_j, src_device_index, dst_device_index); cudaDeviceCanAccessPeer(&access_j_i, dst_device_index, src_device_index); return (access_i_j == 1) && (access_j_i == 1); } } // namespace quiver void register_cuda_quiver_feature(pybind11::module &m) { m.def("init_p2p", &quiver::init_p2p, py::call_guard<py::gil_scoped_release>()); m.def("can_device_access_peer", &quiver::can_device_access_peer, py::call_guard<py::gil_scoped_release>()); py::class_<quiver::ShardTensorItem>(m, "ShardTensorItem") .def(py::init<>()) .def("share_ipc", &quiver::ShardTensorItem::share_ipc) .def("from_ipc", &quiver::ShardTensorItem::from_ipc); py::class_<quiver::ShardTensor>(m, "ShardTensor") //.def(py::init<std::vector<torch::Tensor>, int>()) .def(py::init<int>()) .def("__getitem__", &quiver::ShardTensor::operator[], py::call_guard<py::gil_scoped_release>()) .def("unregister", &quiver::ShardTensor::unregister, py::call_guard<py::gil_scoped_release>()) .def("shape", &quiver::ShardTensor::shape, py::call_guard<py::gil_scoped_release>()) .def("numel", &quiver::ShardTensor::numel, py::call_guard<py::gil_scoped_release>()) .def("device", &quiver::ShardTensor::device, py::call_guard<py::gil_scoped_release>()) .def("stride", &quiver::ShardTensor::stride, py::call_guard<py::gil_scoped_release>()) .def("size", &quiver::ShardTensor::size, py::call_guard<py::gil_scoped_release>()) .def("device_count", &quiver::ShardTensor::device_count, py::call_guard<py::gil_scoped_release>()) .def("append", py::overload_cast<torch::Tensor &, int>( &quiver::ShardTensor::append), py::call_guard<py::gil_scoped_release>()) .def("append", py::overload_cast<quiver::ShardTensorItem>( &quiver::ShardTensor::append), py::call_guard<py::gil_scoped_release>()) .def("share_ipc", &quiver::ShardTensor::share_ipc, py::call_guard<py::gil_scoped_release>()); }
the_stack
// check if A+B is faster than just B // check if loading affineBias into shared memory is faster than loading // multiple times (if not try 64,16 backwards case) template <typename T, Int K, Int V> __global__ void dAffineReluTrivialConvolution_forwardA( T *inFeatures, T *outFeatures, T *affineWeight, T *affineBias, T *convWeight, Int input_nPlanes, Int input_stride, Int output_nPlanes, Int output_stride, Int nActive) { // nActive must be a multiple of K!! // Input x Weight -> Output // blockDim=(K,K/V,1), gridDim=(nBlocks,N,1) Volkov-blocks // K is a multiple of V, // nActive x KM -> nActive x KN - parallel over N,nActive - loop over M Int M = input_nPlanes / K; // N = gridDim.y == output_nPlanes/K Int n = blockIdx.y; outFeatures += n * K; convWeight += n * K; T O[V]; __shared__ T I[K][K]; __shared__ T AW[K]; __shared__ T AB[K]; __shared__ T CW[K][K]; const Int tx = threadIdx.x; int ty[V]; #pragma unroll for (int v = 0; v < V; v++) ty[v] = threadIdx.y + v * (K / V); for (int m = 0; m < M; m++) { // Read affineWeight, affineBias and convWeight if (ty[0] == 0) { AW[tx] = affineWeight[tx]; AB[tx] = affineBias[tx]; } #pragma unroll for (int v = 0; v < V; v++) CW[ty[v]][tx] = convWeight[ty[v] * output_nPlanes + tx]; __syncthreads(); for (Int s = blockIdx.x * K; s < nActive; s += K * gridDim.x) { // Read input, do affine + relu, set O[] #pragma unroll for (int v = 0; v < V; v++) { T i = inFeatures[(s + ty[v]) * input_stride + tx] * AW[tx] + AB[tx]; I[ty[v]][tx] = (i > 0) ? i : 0; if (m == 0) { O[v] = 0; } else { O[v] = outFeatures[(s + ty[v]) * output_stride + tx]; } } __syncthreads(); #pragma unroll for (int k = 0; k < K; k++) #pragma unroll for (int v = 0; v < V; v++) O[v] += I[ty[v]][k] * CW[k][tx]; #pragma unroll for (int v = 0; v < V; v++) outFeatures[(s + ty[v]) * output_stride + tx] = O[v]; __syncthreads(); } affineWeight += K; affineBias += K; convWeight += K * output_nPlanes; inFeatures += K; } } template <typename T, Int K, Int V> __global__ void dAffineReluTrivialConvolution_forwardB( T *inFeatures, T *outFeatures, T *affineWeight, T *affineBias, T *convWeight, Int input_nPlanes, Int input_stride, Int output_nPlanes, Int output_stride, Int nActive) { // Input x Weight -> Output // blockDim=(K,K/V,1), gridDim=(nBlocks,N,1) Volkov-blocks // K is a multiple of V, // nActive x KM -> nActive x KN - parallel over N,nActive - loop over M Int M = input_nPlanes / K; // N = gridDim.y == output_nPlanes/K Int n = blockIdx.y; outFeatures += n * K; convWeight += n * K; T O[V]; __shared__ T I[K][K]; // zz try K+1 trick A+B+backwards __shared__ T AW[K]; __shared__ T AB[K]; __shared__ T CW[K][K]; const Int tx = threadIdx.x; int ty[V]; #pragma unroll for (int v = 0; v < V; v++) ty[v] = threadIdx.y + v * (K / V); for (int m = 0; m < M; m++) { // Read affineWeight, affineBias and convWeight if (ty[0] == 0) { AW[tx] = affineWeight[tx]; AB[tx] = affineBias[tx]; } #pragma unroll for (int v = 0; v < V; v++) CW[ty[v]][tx] = convWeight[ty[v] * output_nPlanes + tx]; __syncthreads(); for (Int s = blockIdx.x * K; s < nActive; s += K * gridDim.x) { // Read input, do affine + relu, set O[] #pragma unroll for (int v = 0; v < V; v++) { if (s + ty[v] < nActive) { T i = inFeatures[(s + ty[v]) * input_stride + tx] * AW[tx] + AB[tx]; I[ty[v]][tx] = (i > 0) ? i : 0; if (m == 0) { O[v] = 0; } else { O[v] = outFeatures[(s + ty[v]) * output_stride + tx]; } } } __syncthreads(); #pragma unroll for (int k = 0; k < K; k++) #pragma unroll for (int v = 0; v < V; v++) O[v] += I[ty[v]][k] * CW[k][tx]; #pragma unroll for (int v = 0; v < V; v++) if (s + ty[v] < nActive) outFeatures[(s + ty[v]) * output_stride + tx] = O[v]; __syncthreads(); } affineWeight += K; affineBias += K; convWeight += K * output_nPlanes; inFeatures += K; } } #define FOO(T, K, V) \ { \ if (input_nPlanes % K == 0 and output_nPlanes % K == 0) { \ Int o = (nActive / K) * K; \ if (o > 0) \ dAffineReluTrivialConvolution_forwardA< \ T, K, V><<<dim3(std::min(o / K, (Int)512), output_nPlanes / K), \ dim3(K, K / V)>>>( \ inFeatures, outFeatures, affineWeight, affineBias, convWeight, \ input_nPlanes, input_stride, output_nPlanes, output_stride, o); \ if (nActive > o) \ dAffineReluTrivialConvolution_forwardB< \ T, K, V><<<dim3(1, output_nPlanes / K), dim3(K, K / V)>>>( \ inFeatures + o * input_stride, outFeatures + o * output_stride, \ affineWeight, affineBias, convWeight, input_nPlanes, input_stride, \ output_nPlanes, output_stride, nActive - o); \ return; \ } \ } template <typename T> void dAffineReluTrivialConvolution_forward(T *inFeatures, T *outFeatures, T *affineWeight, T *affineBias, T *convWeight, Int input_nPlanes, Int input_stride, Int output_nPlanes, Int output_stride, Int nActive) { FOO(T, 64, 16) FOO(T, 32, 8) FOO(T, 16, 4) FOO(T, 8, 2) assert(false); } template <> void dAffineReluTrivialConvolution_forward<double>( double *inFeatures, double *outFeatures, double *affineWeight, double *affineBias, double *convWeight, Int input_nPlanes, Int input_stride, Int output_nPlanes, Int output_stride, Int nActive) { FOO(double, 32, 8) FOO(double, 16, 4) FOO(double, 8, 2) assert(false); } #undef FOO // dOutput x W^T -> dInput and // Input^T x dOutput -> dW // blockDim=(K,K/V,1), gridDim=(nBlocks,M,1) template <typename T, Int K, Int V> __global__ void dAffineReluTrivialConvolution_backward_dW_A( T *inFeatures, T *dInFeatures, T *dOutFeatures, T *affineWeight, T *dAffineWeight, T *affineBias, T *dAffineBias, T *convWeight, T *dConvWeight, Int input_nPlanes, Int input_stride, Int output_nPlanes, Int output_stride, Int nActive, bool additiveGrad) { // M = gridDim.y == input_nPlanes / K Int N = output_nPlanes / K; Int m = blockIdx.y; inFeatures += m * K; dInFeatures += m * K; convWeight += m * K * output_nPlanes; dConvWeight += m * K * output_nPlanes; affineWeight += m * K; dAffineWeight += m * K; affineBias += m * K; dAffineBias += m * K; T dI[V]; T dCW[V]; T i[V]; T dAW = 0; T dAB = 0; __shared__ T I[K][K]; __shared__ T dO[K][K]; __shared__ T AW[K]; __shared__ T AB[K]; __shared__ T CW[K][K]; const Int tx = threadIdx.x; int ty[V]; #pragma unroll for (int v = 0; v < V; v++) ty[v] = threadIdx.y + v * (K / V); if (ty[0] == 0) { AW[tx] = affineWeight[tx]; AB[tx] = affineBias[tx]; } for (int n = 0; n < N; n++) { // Read w, reset dW #pragma unroll for (int v = 0; v < V; v++) { CW[ty[v]][tx] = convWeight[ty[v] * output_nPlanes + tx]; dCW[v] = 0; } __syncthreads(); for (Int s = blockIdx.x * K; s < nActive; s += K * gridDim.x) { #pragma unroll for (int v = 0; v < V; v++) dI[v] = 0; __syncthreads(); // Read input and dOutput #pragma unroll for (int v = 0; v < V; v++) { T i_ = inFeatures[(s + ty[v]) * input_stride + tx]; i[v] = i_; i_ = i_ * AW[tx] + AB[tx]; I[ty[v]][tx] = (i_ > 0) ? i_ : 0; dO[ty[v]][tx] = dOutFeatures[(s + ty[v]) * output_stride + tx]; } __syncthreads(); #pragma unroll for (int k = 0; k < K; k++) #pragma unroll for (int v = 0; v < V; v++) { dI[v] += dO[ty[v]][k] * CW[tx][k]; dCW[v] += I[k][ty[v]] * dO[k][tx]; } #pragma unroll for (int v = 0; v < V; v++) { dI[v] = (I[ty[v]][tx] > 0) ? dI[v] : 0; dAW += i[v] * dI[v]; dAB += dI[v]; if (additiveGrad) dInFeatures[(s + ty[v]) * input_stride + tx] += dI[v]; else dInFeatures[(s + ty[v]) * input_stride + tx] = dI[v]; } __syncthreads(); } #pragma unroll for (int v = 0; v < V; v++) atomicAdd(&dConvWeight[ty[v] * output_nPlanes + tx], dCW[v]); convWeight += K; dConvWeight += K; dOutFeatures += K; __syncthreads(); } atomicAdd(&dAffineWeight[tx], dAW); atomicAdd(&dAffineBias[tx], dAB); } // dOutput x W^T -> dInput and // Input^T x dOutput -> dW // blockDim=(K,K/V,1), gridDim=(nBlocks,M,1) template <typename T, Int K, Int V> __global__ void dAffineReluTrivialConvolution_backward_dW_B( T *inFeatures, T *dInFeatures, T *dOutFeatures, T *affineWeight, T *dAffineWeight, T *affineBias, T *dAffineBias, T *convWeight, T *dConvWeight, Int input_nPlanes, Int input_stride, Int output_nPlanes, Int output_stride, Int nActive, bool additiveGrad) { // M = gridDim.y == input_nPlanes / K Int N = output_nPlanes / K; Int m = blockIdx.y; inFeatures += m * K; dInFeatures += m * K; convWeight += m * K * output_nPlanes; dConvWeight += m * K * output_nPlanes; affineWeight += m * K; dAffineWeight += m * K; affineBias += m * K; dAffineBias += m * K; T dI[V]; T dCW[V]; T i[V]; T dAW = 0; T dAB = 0; __shared__ T I[K][K]; __shared__ T dO[K][K]; __shared__ T AW[K]; __shared__ T AB[K]; __shared__ T CW[K][K]; const Int tx = threadIdx.x; int ty[V]; #pragma unroll for (int v = 0; v < V; v++) ty[v] = threadIdx.y + v * (K / V); if (ty[0] == 0) { AW[tx] = affineWeight[tx]; AB[tx] = affineBias[tx]; } for (int n = 0; n < N; n++) { // Read w, reset dW #pragma unroll for (int v = 0; v < V; v++) { CW[ty[v]][tx] = convWeight[ty[v] * output_nPlanes + tx]; dCW[v] = 0; } __syncthreads(); for (Int s = blockIdx.x * K; s < nActive; s += K * gridDim.x) { #pragma unroll for (int v = 0; v < V; v++) dI[v] = 0; __syncthreads(); // Read input and dOutput #pragma unroll for (int v = 0; v < V; v++) if (s + ty[v] < nActive) { T i_ = inFeatures[(s + ty[v]) * input_stride + tx]; i[v] = i_; i_ = i_ * AW[tx] + AB[tx]; I[ty[v]][tx] = (i_ > 0) ? i_ : 0; dO[ty[v]][tx] = dOutFeatures[(s + ty[v]) * output_stride + tx]; } else { i[v] = 0; I[ty[v]][tx] = 0; dO[ty[v]][tx] = 0; } __syncthreads(); #pragma unroll for (int k = 0; k < K; k++) #pragma unroll for (int v = 0; v < V; v++) { dI[v] += dO[ty[v]][k] * CW[tx][k]; dCW[v] += I[k][ty[v]] * dO[k][tx]; } #pragma unroll for (int v = 0; v < V; v++) if (s + ty[v] < nActive) { dI[v] = (I[ty[v]][tx] > 0) ? dI[v] : 0; dAW += i[v] * dI[v]; dAB += dI[v]; if (additiveGrad) dInFeatures[(s + ty[v]) * input_stride + tx] += dI[v]; else dInFeatures[(s + ty[v]) * input_stride + tx] = dI[v]; } __syncthreads(); } #pragma unroll for (int v = 0; v < V; v++) atomicAdd(&dConvWeight[ty[v] * output_nPlanes + tx], dCW[v]); convWeight += K; dConvWeight += K; dOutFeatures += K; __syncthreads(); } atomicAdd(&dAffineWeight[tx], dAW); atomicAdd(&dAffineBias[tx], dAB); } #define FOO(T, K, V) \ { \ if (input_nPlanes % K == 0 and output_nPlanes % K == 0) { \ Int o = (nActive / K) * K; \ if (o > 0) \ dAffineReluTrivialConvolution_backward_dW_A< \ T, K, V><<<dim3(std::min(o / K, (Int)512), input_nPlanes / K), \ dim3(K, K / V)>>>( \ inFeatures, dInFeatures, dOutFeatures, affineWeight, \ dAffineWeight, affineBias, dAffineBias, convWeight, dConvWeight, \ input_nPlanes, input_stride, output_nPlanes, output_stride, o, \ additiveGrad); \ if (nActive > o) \ dAffineReluTrivialConvolution_backward_dW_B< \ T, K, V><<<dim3(1, input_nPlanes / K), dim3(K, K / V)>>>( \ inFeatures + o * input_stride, dInFeatures + o * input_stride, \ dOutFeatures + o * output_stride, affineWeight, dAffineWeight, \ affineBias, dAffineBias, convWeight, dConvWeight, input_nPlanes, \ input_stride, output_nPlanes, output_stride, nActive - o, \ additiveGrad); \ return; \ } \ } template <typename T> void dAffineReluTrivialConvolution_backward_dW( T *inFeatures, T *dInFeatures, T *dOutFeatures, T *affineWeight, T *dAffineWeight, T *affineBias, T *dAffineBias, T *convWeight, T *dConvWeight, Int input_nPlanes, Int input_stride, Int output_nPlanes, Int output_stride, Int nActive, bool additiveGrad) { FOO(T, 32, 8) FOO(T, 16, 4) FOO(T, 8, 2) } #undef FOO
the_stack
#include "caffe/filler.hpp" #include "caffe/layers/class_distance_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { ////////////////////////////////////////// template <typename Dtype> static __global__ void compute_top_l2(const int nthreads, const int N_, const int K_, const Dtype *bottom_data, Dtype *top_data, const Dtype *weight, const Dtype *sigma, Dtype *dist, bool ignore_zero, bool isotropic) { if (ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; const int j = index % N_; Dtype t = 0; for (int k = 0; k < K_; ++k) if (bottom_data[i*K_ + k]) { Dtype d = weight[j*K_ + k] - bottom_data[i*K_ + k]; t += d*d; } dist[index] = t; top_data[index] = Dtype(-0.5) * t / (max(sigma[j], Dtype(0)) + Dtype(0.0001)); } } else { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; const int j = index % N_; Dtype t = 0; for (int k = 0; k < K_; ++k) { Dtype d = weight[j*K_ + k] - bottom_data[i*K_ + k]; if (isotropic) t += d*d; else t += d*d/max(sigma[j*K_ + k], Dtype(0) + Dtype(0.00000001)); } dist[index] = t; // only useful for 'isotropic' if (isotropic) top_data[index] = Dtype(-0.5) * t / (max(sigma[j], Dtype(0)) + Dtype(0.0001)); else top_data[index] = Dtype(-0.5) * t; } } } template <typename Dtype> static __global__ void compute_top_ip(const int nthreads, const int N_, const int K_, const Dtype *bottom_data, Dtype *top_data, const Dtype *weight) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; const int j = index % N_; Dtype t = 0; for (int k = 0; k < K_; ++k) { t += weight[j*K_ + k] * bottom_data[i*K_ + k]; } top_data[index] = t; } } template <typename Dtype> static __global__ void compute_top_l1(const int nthreads, const int N_, const int K_, const Dtype *bottom_data, Dtype *top_data, const Dtype *weight, bool ignore_zero) { if (ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; const int j = index % N_; Dtype t = 0; for (int k = 0; k < K_; ++k) { Dtype d = weight[j*K_ + k] - bottom_data[i*K_ + k]; t += abs(d); } top_data[index] = -t; } } else { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / N_; const int j = index % N_; Dtype t = 0; for (int k = 0; k < K_; ++k) { Dtype d = weight[j*K_ + k] - bottom_data[i*K_ + k]; t += abs(d); } top_data[index] = -t; } } } ////////////////////////////////////////// template <typename Dtype> static __global__ void margin_top(const int M_, const int N_, Dtype *top_data, const Dtype *label, const Dtype margin_mul, const Dtype margin_add) { CUDA_KERNEL_LOOP(i, M_) { const int y = (int)label[i]; top_data[i*N_ + y] += top_data[i*N_ + y] * margin_mul - margin_add; } } ////////////////////////////////////////// template <typename Dtype> void ClassDistanceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); const Dtype* sigma = this->blobs_[1]->gpu_data(); Dtype* dist = dist_.mutable_gpu_data(); const ClassDistanceParameter& param = this->layer_param_.class_distance_param(); bool isotropic = param.isotropic(); switch (param.metric()) { case ClassDistanceParameter_Metric_L2: compute_top_l2<Dtype> << <CAFFE_GET_BLOCKS(M_*N_), CAFFE_CUDA_NUM_THREADS >> >( M_*N_, N_, K_, bottom_data, top_data, weight, sigma, dist, param.ignore_zero() & (this->phase_ == TRAIN), isotropic); break; case ClassDistanceParameter_Metric_IP: compute_top_ip<Dtype> << <CAFFE_GET_BLOCKS(M_*N_), CAFFE_CUDA_NUM_THREADS >> >( M_*N_, N_, K_, bottom_data, top_data, weight); break; case ClassDistanceParameter_Metric_L1: compute_top_l1<Dtype> << <CAFFE_GET_BLOCKS(M_*N_), CAFFE_CUDA_NUM_THREADS >> >( M_*N_, N_, K_, bottom_data, top_data, weight, param.ignore_zero() & (this->phase_ == TRAIN)); break; } if (bottom.size() == 2 && this->phase_ == TRAIN) { Dtype margin_mul_ = this->m_mul_.get_iter("mul_margin"); Dtype margin_add_ = this->m_add_.get_iter("add_margin"); const Dtype* label = bottom[1]->gpu_data(); margin_top<Dtype> << <CAFFE_GET_BLOCKS(M_), CAFFE_CUDA_NUM_THREADS >> >( M_, N_, top_data, label, margin_mul_, margin_add_); } // validate that sigma > 0 const Dtype *sigma_cpu = this->blobs_[1]->cpu_data(); const int sigma_number = isotropic?N_:(N_*K_); for(int i=0; i<sigma_number; i++) if (sigma_cpu[i] <= eps_) { LOG(INFO) << "Dangerous sigma value, sigma[" << i << "]=" << sigma_cpu[i]; break; } /*if (bias_term_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1., bias_multiplier_.gpu_data(), this->blobs_[1]->gpu_data(), (Dtype)1., top_data); }*/ } //========================================== template <typename Dtype> static __global__ void compute_gradient_bottom_label_l2(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, const Dtype *label, const Dtype margin_mul, const Dtype center_coef, const Dtype *sigma, bool ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) if (!ignore_zero || bottom_data[index]) { if (j == (int)label[i]) t += (weight[j*K_ + k] - bottom_data[index]) * (margin_mul / (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * top_diff[i*N_ + j] - center_coef); else t += (weight[j*K_ + k] - bottom_data[index]) / (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * top_diff[i*N_ + j]; } bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_label_l2(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, const Dtype* label, const Dtype margin_mul, const Dtype center_coef, const Dtype *sigma, Dtype *sigma_diff, const Dtype *dist, bool update_sigma, bool ignore_zero) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; Dtype t_sigma = 0; for (int i = 0; i < M_; ++i) if (!ignore_zero || bottom_data[i*K_ + k]) { if (j == (int)label[i]){ t += (bottom_data[i*K_ + k] - weight[index]) * (margin_mul / (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * top_diff[i*N_ + j] - center_coef); if (update_sigma && k==0) t_sigma += dist[i * N_ + j] * margin_mul / (Dtype(2.0) * (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * sigma[j]) * top_diff[i*N_ + j]; } else{ t += (bottom_data[i*K_ + k] - weight[index]) / (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * top_diff[i*N_ + j]; if (update_sigma && k==0) t_sigma += dist[i * N_ + j] / (Dtype(2.0) * (max(sigma[j], Dtype(0)) + Dtype(0.0001)) * sigma[j]) * top_diff[i*N_ + j]; } } weight_diff[index] += t; if (update_sigma && k == 0) sigma_diff[j] += t_sigma; } } template <typename Dtype> static __global__ void compute_gradient_bottom_label_l2_diag(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, const Dtype *label, const Dtype margin_mul, const Dtype center_coef, const Dtype *sigma, bool ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) if (!ignore_zero || bottom_data[index]) { if (j == (int)label[i]) t += (weight[j*K_ + k] - bottom_data[index]) * (margin_mul / (max(sigma[j*K_ + k] , Dtype(0)) + Dtype(0.00000001)) * top_diff[i*N_ + j] - center_coef); else t += (weight[j*K_ + k] - bottom_data[index]) / (max(sigma[j*K_ + k], Dtype(0)) + Dtype(0.00000001)) * top_diff[i*N_ + j]; } bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_label_l2_diag(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, const Dtype* label, const Dtype margin_mul, const Dtype center_coef, const Dtype *sigma, Dtype *sigma_diff, bool update_sigma, bool ignore_zero) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; Dtype t_sigma = 0; Dtype d = 0; Dtype safe_sigma = max(sigma[index], Dtype(0)) + Dtype(0.0001); for (int i = 0; i < M_; ++i) if (!ignore_zero || bottom_data[i*K_ + k]) { d = bottom_data[i*K_ + k] - weight[index]; if (j == (int)label[i]){ t += d * (margin_mul / safe_sigma * top_diff[i*N_ + j] - center_coef); if (update_sigma) t_sigma += d * d * margin_mul / (Dtype(2.0) * safe_sigma * safe_sigma) * top_diff[i*N_ + j]; } else{ t += d / safe_sigma * top_diff[i*N_ + j]; if (update_sigma) t_sigma += d * d / (Dtype(2.0) * safe_sigma * safe_sigma) * top_diff[i*N_ + j]; } } weight_diff[index] += t; if (update_sigma) sigma_diff[index] += t_sigma; } } template <typename Dtype> static __global__ void compute_gradient_bottom_label_ip(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, const Dtype *label, const Dtype margin_mul, const Dtype center_coef) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) { if (j == (int)label[i]) t += weight[j*K_ + k] * margin_mul * top_diff[i*N_ + j] + (bottom_data[index] - weight[j*K_ + k]) * center_coef; else t += weight[j*K_ + k] * top_diff[i*N_ + j]; } bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_label_ip(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, const Dtype* label, const Dtype margin_mul, const Dtype center_coef) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; for (int i = 0; i < M_; ++i) { if (j == (int)label[i]) t += margin_mul * bottom_data[i*K_ + k] * top_diff[i*N_ + j] + (weight[index] - bottom_data[i*K_ + k]) * center_coef; else t += bottom_data[i*K_ + k] * top_diff[i*N_ + j]; } weight_diff[index] += t; } } template <typename Dtype> static __global__ void compute_gradient_bottom_label_l1(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, const Dtype *label, const Dtype margin_mul, const Dtype center_coef, bool ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) if (!ignore_zero || bottom_data[index]) { if (j == (int)label[i]) t += ((weight[j*K_ + k] > bottom_data[index]) - (bottom_data[index] > weight[j*K_ + k])) * (margin_mul * top_diff[i*N_ + j] - center_coef); else t += ((weight[j*K_ + k] > bottom_data[index]) - (bottom_data[index] > weight[j*K_ + k])) * top_diff[i*N_ + j]; } bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_label_l1(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, const Dtype* label, const Dtype margin_mul, const Dtype center_coef, bool ignore_zero) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; for (int i = 0; i < M_; ++i) if (!ignore_zero || bottom_data[i*K_ + k]) { if (j == (int)label[i]) t += ((bottom_data[i*K_ + k] - weight[index]) - (weight[index] > bottom_data[i*K_ + k]) - (bottom_data[index] > weight[j*K_ + k])) * (margin_mul * top_diff[i*N_ + j] - center_coef); else t += ((bottom_data[i*K_ + k] - weight[index]) - (weight[index] > bottom_data[i*K_ + k]) - (bottom_data[index] > weight[j*K_ + k])) * top_diff[i*N_ + j]; } weight_diff[index] += t; } } template <typename Dtype> static __global__ void compute_gradient_bottom_l2(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, bool ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) if (!ignore_zero || bottom_data[index]) t += (weight[j*K_ + k] - bottom_data[index]) * top_diff[i*N_ + j]; bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_l2(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, bool ignore_zero) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; for (int i = 0; i < M_; ++i) if (!ignore_zero || bottom_data[i*K_ + k]) t += (bottom_data[i*K_ + k] - weight[index]) * top_diff[i*N_ + j]; weight_diff[index] += t; } } template <typename Dtype> static __global__ void compute_gradient_bottom_ip(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) t += weight[j*K_ + k] * top_diff[i*N_ + j]; bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_ip(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; for (int i = 0; i < M_; ++i) t += bottom_data[i*K_ + k] * top_diff[i*N_ + j]; weight_diff[index] += t; } } template <typename Dtype> static __global__ void compute_gradient_bottom_l1(const int nthreads, const int K_, const int N_, const Dtype* top_diff, const Dtype *bottom_data, Dtype *bottom_diff, const Dtype *weight, bool ignore_zero) { CUDA_KERNEL_LOOP(index, nthreads) { const int i = index / K_; const int k = index % K_; Dtype t = 0; for (int j = 0; j < N_; ++j) if (!ignore_zero || bottom_data[index]) t += ((weight[j*K_ + k] > bottom_data[index]) - (bottom_data[index] > weight[j*K_ + k])) * top_diff[i*N_ + j]; bottom_diff[index] = t; } } template <typename Dtype> static __global__ void compute_gradient_weight_l1(const int nthreads, const int K_, const int M_, const Dtype* top_diff, const Dtype *bottom_data, const Dtype *weight, Dtype* weight_diff, bool ignore_zero) { const int N_ = nthreads / K_; CUDA_KERNEL_LOOP(index, nthreads) { const int j = index / K_; const int k = index % K_; Dtype t = 0; for (int i = 0; i < M_; ++i) if (!ignore_zero || bottom_data[i*K_ + k]) t += ((bottom_data[i*K_ + k] - weight[index]) - (weight[index] > bottom_data[i*K_ + k]) - (bottom_data[index] > weight[j*K_ + k])) * top_diff[i*N_ + j]; weight_diff[index] += t; } } template <typename Dtype> void ClassDistanceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); /*const*/ Dtype* weight = this->blobs_[0]->mutable_gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); const Dtype* sigma = this->blobs_[1]->gpu_data(); const Dtype* dist = dist_.gpu_data(); Dtype* sigma_diff = this->blobs_[1]->mutable_gpu_diff(); const ClassDistanceParameter& param = this->layer_param_.class_distance_param(); bool ignore_zero = param.ignore_zero(); bool update_sigma = param.update_sigma(); bool isotropic = param.isotropic(); if (isotropic) caffe_gpu_set(N_, (Dtype)0, sigma_diff); else caffe_gpu_set(N_*K_, (Dtype)0, sigma_diff); if (bottom.size() == 2) { const Dtype* label = bottom[1]->gpu_data(); const Dtype center_coef_ = param.center_coef() / M_; const Dtype margin_mul_1 = 1 + (param.block_mul_grad() ? 0 : m_mul_.get()); switch (param.metric()) { case ClassDistanceParameter_Metric_L2: if (isotropic) { compute_gradient_bottom_label_l2<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, label, margin_mul_1, center_coef_, sigma, ignore_zero); compute_gradient_weight_label_l2<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, label, margin_mul_1, center_coef_, sigma, sigma_diff, dist, update_sigma, ignore_zero); } else { compute_gradient_bottom_label_l2_diag<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, label, margin_mul_1, center_coef_, sigma, ignore_zero); compute_gradient_weight_label_l2_diag<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, label, margin_mul_1, center_coef_, sigma, sigma_diff, update_sigma, ignore_zero); } break; case ClassDistanceParameter_Metric_IP: compute_gradient_bottom_label_ip<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, label, margin_mul_1, center_coef_); compute_gradient_weight_label_ip<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, label, margin_mul_1, center_coef_); break; case ClassDistanceParameter_Metric_L1: compute_gradient_bottom_label_l1<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, label, margin_mul_1, center_coef_, ignore_zero); compute_gradient_weight_label_l1<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, label, margin_mul_1, center_coef_, ignore_zero); break; } } else { switch (param.metric()) { case ClassDistanceParameter_Metric_L2: compute_gradient_bottom_l2<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, ignore_zero); compute_gradient_weight_l2<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, ignore_zero); break; case ClassDistanceParameter_Metric_IP: compute_gradient_bottom_ip<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight); compute_gradient_weight_ip<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff); break; case ClassDistanceParameter_Metric_L1: compute_gradient_bottom_l1<Dtype> << <CAFFE_GET_BLOCKS(M_*K_), CAFFE_CUDA_NUM_THREADS >> >( M_*K_, K_, N_, top_diff, bottom_data, bottom_diff, weight, ignore_zero); compute_gradient_weight_l1<Dtype> << <CAFFE_GET_BLOCKS(N_*K_), CAFFE_CUDA_NUM_THREADS >> >( N_*K_, K_, M_, top_diff, bottom_data, weight, weight_diff, ignore_zero); break; } } } INSTANTIATE_LAYER_GPU_FUNCS(ClassDistanceLayer); } // namespace caffe
the_stack
namespace tensorflow { #define FINAL_MASK 0xffffffff template <typename T> __inline__ __device__ T gelu(T x) { float cdf = 0.5f * (1.0f + tanhf((0.7978845608028654f * (x + 0.044715f * x * x * x)))); return x * cdf; } template <typename T> __inline__ __device__ T warpReduceSum(T val) { for(int mask = 16; mask > 0; mask >>= 1) val += __shfl_xor_sync(FINAL_MASK, val, mask, 32); return val; } /* Calculate the sum of all elements in a block */ template <typename T> __inline__ __device__ T blockReduceSum(T val) { static __shared__ T shared[32]; int lane = threadIdx.x & 0x1f; int wid = threadIdx.x >> 5; val = warpReduceSum<T>(val); if(lane == 0) shared[wid] = val; __syncthreads(); val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f); val = warpReduceSum<T>(val); return val; } template <typename T> __global__ void add_bias_act(T* out, const T* bias, int m, int n) { T val, reg_bias; int row_id = blockIdx.x; int ite = n / blockDim.x; int tid = threadIdx.x; for(int i = 0; i < ite; ++i) { reg_bias = __ldg(&bias[i * blockDim.x + tid]); row_id = blockIdx.x; while(row_id < m){ val = out[tid + i * blockDim.x + row_id * n]+ reg_bias; out[tid + i * blockDim.x + row_id * n] = gelu<T>(val); row_id += gridDim.x; } } } template <typename T> void add_bias_act_kernelLauncher(DType<T>* out, const DType<T>* bias, int m, int n, cudaStream_t stream) { dim3 grid(m / 4); dim3 block(n / 4); assert(block.x <= 1024); add_bias_act<T><<<grid, block, 0, stream>>>(out, bias, m, n); } template void add_bias_act_kernelLauncher<float>(DType<float>* out, const DType<float>* bias, int m, int n, cudaStream_t stream); template <typename T> __global__ void add_bias_input_layernorm(T* out, const T* input, const T* bias, const T* gamma, const T* beta, int m, int n) { int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float local_out = 0.0f; for(int i = tid; i < n; i += blockDim.x) local_out += (float)(out[blockIdx.x * n + i] + input[blockIdx.x * n + i] + __ldg(&bias[i])); mean = blockReduceSum<float>(local_out); if(threadIdx.x == 0) s_mean = mean / n; __syncthreads(); variance = blockReduceSum<float>((local_out - s_mean) * (local_out - s_mean)); if(threadIdx.x == 0) s_variance = variance / n + 1e-30f; __syncthreads(); for(int i = tid; i < n; i += blockDim.x) out[blockIdx.x * n + i] = (T)(((local_out - s_mean) * rsqrtf(s_variance)) * (float)(__ldg(&gamma[i])) + (float)(__ldg(&beta[i]))); } template <typename T> void add_bias_input_layernorm_kernelLauncher(DType<T>* out, const DType<T>* input_tensor, const DType<T>* bias, const DType<T>* gamma, const DType<T>* beta, int m, int n, cudaStream_t stream) { dim3 grid(m); dim3 block(n); assert(block.x <= 1024); add_bias_input_layernorm<T><<<grid, block, 0, stream>>>(out, input_tensor, bias, gamma, beta, m, n); } template void add_bias_input_layernorm_kernelLauncher<float>(DType<float>* out, const DType<float>* input_tensor, const DType<float>* bias, const DType<float>* gamma, const DType<float>* beta, int m, int n, cudaStream_t stream); template <typename T> __global__ void add_bias_input_jump_layernorm_kernel(T* out, const T* input, const T* jump, const T* bias, const T* gamma, const T* beta, int m, int n) { int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float local_out = 0.0f; for(int i = tid; i < n; i += blockDim.x) local_out += (float)(/*out[blockIdx.x * n + i] +*/ input[blockIdx.x * n + i] + jump[blockIdx.x * n + i] + __ldg(&bias[i])); mean = blockReduceSum<float>(local_out); if(threadIdx.x == 0) s_mean = mean / n; __syncthreads(); variance = blockReduceSum<float>((local_out - s_mean) * (local_out - s_mean)); if(threadIdx.x == 0) s_variance = variance / n + 1e-30f; __syncthreads(); for(int i = tid; i < n; i += blockDim.x) out[blockIdx.x * n + i] = (T)(((local_out - s_mean) * rsqrtf(s_variance)) * (float)(__ldg(&gamma[i])) + (float)(__ldg(&beta[i]))); } template <typename T> void add_bias_input_jump_layernorm(DType<T>* out, const DType<T>* input_tensor, const DType<T>* jump, const DType<T>* bias, const DType<T>* gamma, const DType<T>* beta, int m, int n, cudaStream_t stream) { dim3 grid(m); dim3 block(n); assert(block.x <= 1024); add_bias_input_jump_layernorm_kernel<T><<<grid, block, 0, stream>>>(out, input_tensor, jump, bias, gamma, beta, m, n); } template void add_bias_input_jump_layernorm<float>(DType<float>* out, const DType<float>* input_tensor, const DType<float>* jump, const DType<float>* bias, const DType<float>* gamma, const DType<float>* beta, int m, int n, cudaStream_t stream); template <typename T> __global__ void rel_shift_kernel(T* bd, T* bd_buf, int sqlen, int klen) { T* start_addr = bd + blockIdx.x * sqlen * klen; T* start_addr_buf = bd_buf + blockIdx.x * sqlen * klen; T fetch_val; int ini_num = klen+1-sqlen; for(int i = threadIdx.x; i < sqlen*klen; i += blockDim.x) { int q_id = i / klen; int k_id = i % klen; int step = (ini_num + q_id) % klen; if(step != k_id) { start_addr_buf[i] = start_addr[i + sqlen -1 - q_id]; } else { start_addr_buf[i] = 0.0f; } } } template<typename T> void rel_shift(DType<T>* BD, DType<T>* BD_BUF, int sqlen, int klen, int batch_size, int n_head, cudaStream_t stream) { int n = batch_size * n_head; dim3 grid(n); dim3 block(256); //printf("sqlen(%d), klen(%d), batch_size(%d), n_head(%d)\n ", sqlen, klen, batch_size, n_head); rel_shift_kernel<<<grid, block, 0, stream>>>(BD, BD_BUF, sqlen, klen); } template void rel_shift<float>(DType<float>* BD, DType<float>* BD_BUF, int sqlen, int klen, int batch_size, int n_head, cudaStream_t stream); template <typename T> __global__ void add_bias_and_relu_kernel(T* attn_out_buff, const T* bias, int m, int n) { for(int i = threadIdx.x; i < n; i += blockDim.x) { T val = (float)(attn_out_buff[blockIdx.x * n + i] + __ldg(&bias[i])); attn_out_buff[blockIdx.x * n + i] = val < 0.0f ? 0.0f : val; } } template <typename T> void add_bias_and_relu(DType<T>* attn_out_buff1, const DType<T>* ff_layer_1_bias, int m, int n, cudaStream_t stream) { dim3 grid(m); dim3 block(1024); assert(block.x <= 1024); add_bias_and_relu_kernel<<<grid, block, 0, stream>>>(attn_out_buff1, ff_layer_1_bias, m, n); } template void add_bias_and_relu<float>(DType<float>* attn_out_buff1, const DType<float>* ff_layer_1_bias, int m, int n, cudaStream_t stream); template <typename T> __global__ void add_input_layernorm_kernel(T* out, const T* input, const T* gamma, const T* beta, int m, int n) { int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float local_out = 0.0f; for(int i = tid; i < n; i += blockDim.x) { local_out += (float)(out[blockIdx.x * n + i] + input[blockIdx.x * n + i]); } mean = blockReduceSum<float>(local_out); if(tid == 0) s_mean = mean / n; __syncthreads(); variance = blockReduceSum<float>((local_out - s_mean) * (local_out - s_mean)); if(tid == 0) s_variance = variance / n + 1e-30f; __syncthreads(); for(int i = tid; i < n; i += blockDim.x) out[blockIdx.x * n + i] = (T)(((local_out - s_mean) * rsqrtf(s_variance)) * (float)(__ldg(&gamma[i])) + (float)(__ldg(&beta[i]))); } template <typename T> void add_input_layernorm(DType<T>* out, const DType<T>* input_tensor, const DType<T>* gamma, const DType<T>* beta, int m, int n, cudaStream_t stream) { dim3 grid(m); dim3 block(n); assert(block.x <= 1024); add_input_layernorm_kernel<T><<<grid, block, 0, stream>>>(out, input_tensor, gamma, beta, m, n); } template void add_input_layernorm<float>(DType<float>* out, const DType<float>* bias, const DType<float>* gamma, const DType<float>* beta, int m, int n, cudaStream_t stream); template <typename T> __global__ void combine_bias_kernel(const T* bias1, const T* bias2, int n, T* out) { for(int i=threadIdx.x; i<n; i+=blockDim.x) { T bias1_val = __ldg(&bias1[i]); T bias2_val = __ldg(&bias2[i]); out[i] = bias1_val + bias2_val; } } template<typename T> void combine_bias(const DType<T>* bias1_tmp, const DType<T>* ff_layer_2_bias, int d_model, DType<T>* ff_kernel_bias_combine) { dim3 grid(1); dim3 block(256); combine_bias_kernel<<<grid, block>>>(bias1_tmp, ff_layer_2_bias, d_model, ff_kernel_bias_combine); } template void combine_bias<float>(const DType<float>* bias1_tmp, const DType<float>* ff_layer_2_bias, int d_model, DType<float>* ff_kernel_bias_combine); } /* namespace tensorflow */
the_stack
namespace timemachine { template <int D> double reduce_born_forces_jvp( const std::vector<Surreal<double>> &coords, const std::vector<double> &params, const std::vector<int> &atomic_radii_idxs, const std::vector<Surreal<double>> &born_radii, const std::vector<Surreal<double>> &obc_chain, const std::vector<Surreal<double>> &obc_chain_ri, const double surface_tension, // surface area factor const double probe_radius, std::vector<Surreal<double>> &bornForces, // dU/Ri std::vector<double> &out_MvP) { // constants const int numberOfAtoms = atomic_radii_idxs.size(); const int N = numberOfAtoms; for (int atomI = 0; atomI < numberOfAtoms; atomI++) { Surreal<double> radii_derivs(0, 0); if (born_radii[atomI].real > 0.0) { double atomic_radii = params[atomic_radii_idxs[atomI]]; double r = atomic_radii + probe_radius; Surreal<double> ar = atomic_radii / born_radii[atomI]; Surreal<double> ar2 = ar * ar; Surreal<double> ar4 = ar2 * ar2; Surreal<double> ratio6 = ar4 * ar2; Surreal<double> saTerm = surface_tension * r * r * ratio6; bornForces[atomI] -= 6.0 * saTerm / born_radii[atomI]; Surreal<double> br2 = born_radii[atomI] * born_radii[atomI]; Surreal<double> br4 = br2 * br2; Surreal<double> br6 = br4 * br2; radii_derivs += 2 * pow(atomic_radii, 5) * surface_tension * (probe_radius + atomic_radii) * (3 * probe_radius + 4 * atomic_radii) / br6; } radii_derivs += bornForces[atomI] * obc_chain_ri[atomI]; out_MvP[atomic_radii_idxs[atomI]] += radii_derivs.imag; bornForces[atomI] *= obc_chain[atomI]; } } // ported over from OpenMM with minor corrections template <int D> void compute_born_radii_jvp( const std::vector<Surreal<double>> &coords, const std::vector<double> &params, const std::vector<int> &atomic_radii_idxs, const std::vector<int> &scale_factor_idxs, const double dielectric_offset, const double alpha_obc, const double beta_obc, const double gamma_obc, const double cutoff, std::vector<Surreal<double>> &born_radii, std::vector<Surreal<double>> &obc_chain, std::vector<Surreal<double>> &obc_chain_ri) { int numberOfAtoms = atomic_radii_idxs.size(); int N = numberOfAtoms; if (coords.size() / D != numberOfAtoms) { throw std::runtime_error("compute born radii number of atoms are inconsistent"); } for (int i_idx = 0; i_idx < numberOfAtoms; i_idx++) { double radiusI = params[atomic_radii_idxs[i_idx]]; double offsetRadiusI = radiusI - dielectric_offset; double radiusIInverse = 1.0 / offsetRadiusI; Surreal<double> sum(0, 0); // HCT code for (int j_idx = 0; j_idx < numberOfAtoms; j_idx++) { if (j_idx != i_idx) { Surreal<double> r(0, 0); for (int d = 0; d < D; d++) { Surreal<double> dx = coords[i_idx * D + d] - coords[j_idx * D + d]; r += dx * dx; } r = sqrt(r); double offsetRadiusJ = params[atomic_radii_idxs[j_idx]] - dielectric_offset; double scaledRadiusJ = offsetRadiusJ * params[scale_factor_idxs[j_idx]]; Surreal<double> rScaledRadiusJ = r + scaledRadiusJ; Surreal<double> rSubScaledRadiusJ = r - scaledRadiusJ; if (offsetRadiusI < rScaledRadiusJ.real) { Surreal<double> rInverse = 1.0 / r; Surreal<double> l_ij(0, 0); if (offsetRadiusI > abs(rSubScaledRadiusJ).real) { l_ij.real = offsetRadiusI; l_ij.imag = 0; } else { l_ij = abs(rSubScaledRadiusJ); } l_ij = 1.0 / l_ij; Surreal<double> u_ij = 1.0 / rScaledRadiusJ; Surreal<double> l_ij2 = l_ij * l_ij; Surreal<double> u_ij2 = u_ij * u_ij; Surreal<double> ratio = log((u_ij / l_ij)); Surreal<double> term = l_ij - u_ij + 0.25 * r * (u_ij2 - l_ij2) + (0.5 * rInverse * ratio) + (0.25 * scaledRadiusJ * scaledRadiusJ * rInverse) * (l_ij2 - u_ij2); // this case (atom i completely inside atom j) is not considered in the original paper // Jay Ponder and the authors of Tinker recognized this and // worked out the details if (offsetRadiusI < (scaledRadiusJ - r).real) { term += 2.0 * (radiusIInverse - l_ij); } sum += term; } } } sum *= 0.5 * offsetRadiusI; Surreal<double> sum2 = sum * sum; Surreal<double> sum3 = sum * sum2; Surreal<double> tanhSum = tanh(alpha_obc * sum - beta_obc * sum2 + gamma_obc * sum3); born_radii[i_idx] = 1.0 / (1.0 / offsetRadiusI - tanhSum / radiusI); // dRi/dPsi obc_chain[i_idx] = (alpha_obc - 2.0 * beta_obc * sum + 3.0 * gamma_obc * sum2); // !@#$ why did you move it here! obc_chain[i_idx] = (1.0 - tanhSum * tanhSum) * obc_chain[i_idx] / radiusI; // this takes care of the radiusI prefactor obc_chain[i_idx] *= born_radii[i_idx] * born_radii[i_idx]; // dRi/dri obc_chain_ri[i_idx] = 1.0 / (offsetRadiusI * offsetRadiusI) - tanhSum / (radiusI * radiusI); obc_chain_ri[i_idx] *= born_radii[i_idx] * born_radii[i_idx]; } } template <int D> double compute_born_first_loop_jvp( const std::vector<Surreal<double>> &coords, const std::vector<double> &params, const std::vector<int> &charge_param_idxs, const std::vector<Surreal<double>> &born_radii, const double prefactor, const double cutoff, std::vector<Surreal<double>> &bornForces, std::vector<double> &out_forces, std::vector<double> &dU_dp) { const int numberOfAtoms = charge_param_idxs.size(); const int N = numberOfAtoms; // const double soluteDielectric = solute_dielectric; // const double solventDielectric = solvent_dielectric; // double preFactor; // if (soluteDielectric != 0.0 && solventDielectric != 0.0) { // preFactor = -screening*((1.0/soluteDielectric) - (1.0/solventDielectric)); // } else { // preFactor = 0.0; // } // printf("preFactor %f\n", preFactor); std::vector<Surreal<double>> charge_derivs(N, Surreal<double>(0, 0)); for (int atomI = 0; atomI < numberOfAtoms; atomI++) { double partialChargeI = params[charge_param_idxs[atomI]]; for (int atomJ = atomI; atomJ < numberOfAtoms; atomJ++) { Surreal<double> r2(0, 0); Surreal<double> dxs[D] = {Surreal<double>(0, 0)}; for (int d = 0; d < D; d++) { Surreal<double> dx = coords[atomI * D + d] - coords[atomJ * D + d]; dxs[d] = dx; r2 += dx * dx; } Surreal<double> r = sqrt(r2); Surreal<double> alpha2_ij = born_radii[atomI] * born_radii[atomJ]; Surreal<double> D_ij = r2 / (4.0 * alpha2_ij); Surreal<double> expTerm = exp(-D_ij); Surreal<double> denominator2 = r2 + alpha2_ij * expTerm; Surreal<double> denominator = sqrt(denominator2); double partialChargeJ = params[charge_param_idxs[atomJ]]; Surreal<double> Gpol = (prefactor * partialChargeI * partialChargeJ) / denominator; Surreal<double> dGpol_dr = -Gpol * (1.0 - 0.25 * expTerm) / denominator2; Surreal<double> dGpol_dalpha2_ij = -0.5 * Gpol * expTerm * (1.0 + D_ij) / denominator2; // printf("%d %d dGpol_dalpha2_ij %f\n", atomI, atomJ, dGpol_dalpha2_ij); Surreal<double> energy = Gpol; Surreal<double> dE_dqi = prefactor * partialChargeJ / denominator; Surreal<double> dE_dqj = prefactor * partialChargeI / denominator; if (atomI != atomJ) { // TBD: determine what we should do with cutoff // energy -= partialChargeI*partialCharges[atomJ]/cutoff; bornForces[atomJ] += dGpol_dalpha2_ij * born_radii[atomI]; for (int d = 0; d < D; d++) { out_forces[atomI * D + d] += (dxs[d] * dGpol_dr).imag; out_forces[atomJ * D + d] -= (dxs[d] * dGpol_dr).imag; } } else { dE_dqi *= 0.5; dE_dqj *= 0.5; energy *= 0.5; } charge_derivs[atomI] += dE_dqi; charge_derivs[atomJ] += dE_dqj; // obcEnergy += energy; bornForces[atomI] += dGpol_dalpha2_ij * born_radii[atomJ]; } } for (int i = 0; i < charge_derivs.size(); i++) { // std::cout << "???" << charge_derivs[i] << std::endl; dU_dp[charge_param_idxs[i]] += charge_derivs[i].imag; } }; template <int D> double compute_born_energy_and_forces_jvp( const std::vector<Surreal<double>> &coords, const std::vector<double> &params, const std::vector<int> &atomic_radii_idxs, const std::vector<int> &scale_factor_idxs, const std::vector<Surreal<double>> &born_radii, const std::vector<Surreal<double>> &obc_chain, const std::vector<Surreal<double>> &obc_chain_ri, const double dielectric_offset, // const double screening, // const double surface_tension, // surface area factor // const double solute_dielectric, // const double solvent_dielectric, // const double probe_radius, const double cutoff, std::vector<Surreal<double>> &bornForces, std::vector<double> &out_HvP, std::vector<double> &out_MvP) { // constants const int numberOfAtoms = atomic_radii_idxs.size(); const int N = numberOfAtoms; const double dielectricOffset = dielectric_offset; const double cutoffDistance = cutoff; std::vector<Surreal<double>> dPsi_dx(N * D, Surreal<double>(0, 0)); std::vector<Surreal<double>> dPsi_dri(N, Surreal<double>(0, 0)); std::vector<Surreal<double>> dPsi_dsi(N, Surreal<double>(0, 0)); for (int atomI = 0; atomI < numberOfAtoms; atomI++) { // radius w/ dielectric offset applied double radiusI = params[atomic_radii_idxs[atomI]]; double offsetRadiusI = radiusI - dielectricOffset; double offsetRadiusI2 = offsetRadiusI * offsetRadiusI; double offsetRadiusI3 = offsetRadiusI2 * offsetRadiusI; for (int atomJ = 0; atomJ < numberOfAtoms; atomJ++) { if (atomJ != atomI) { Surreal<double> r2(0, 0); Surreal<double> dxs[D] = {Surreal<double>(0, 0)}; for (int d = 0; d < D; d++) { Surreal<double> dx = coords[atomI * D + d] - coords[atomJ * D + d]; dxs[d] = dx; r2 += dx * dx; } Surreal<double> r = sqrt(r2); // radius w/ dielectric offset applied double radiusJ = params[atomic_radii_idxs[atomJ]]; double offsetRadiusJ = radiusJ - dielectricOffset; double offsetRadiusJ2 = offsetRadiusJ * offsetRadiusJ; double scaleFactorJ = params[scale_factor_idxs[atomJ]]; double scaleFactorJ2 = scaleFactorJ * scaleFactorJ; double scaleFactorJ3 = scaleFactorJ2 * scaleFactorJ; double scaledRadiusJ = offsetRadiusJ * scaleFactorJ; double scaledRadiusJ2 = scaledRadiusJ * scaledRadiusJ; Surreal<double> rScaledRadiusJ = r + scaledRadiusJ; Surreal<double> rScaledRadiusJ2 = rScaledRadiusJ * rScaledRadiusJ; Surreal<double> rScaledRadiusJ3 = rScaledRadiusJ2 * rScaledRadiusJ; // dL/dr & dU/dr are zero (this can be shown analytically) // removed from calculation if (offsetRadiusI < rScaledRadiusJ.real) { // double l_ij = offsetRadiusI > abs(rSubScaledRadiusJ) ? offsetRadiusI : abs(rSubScaledRadiusJ); // l_ij = 1.0/l_ij; // double u_ij = 1.0/rScaledRadiusJ; // double l_ij2 = l_ij*l_ij; // double u_ij2 = u_ij*u_ij; // double rInverse = 1.0/r; // double r2Inverse = rInverse*rInverse; // double t3 = 0.125*(1.0 + scaledRadiusJ2*r2Inverse)*(l_ij2 - u_ij2) + 0.25*log(u_ij/l_ij)*r2Inverse; // printf("%d t3 RHS: %.8f\n", atomI, t3); // double de = bornForces[atomI]*t3*rInverse; // for(int d=0; d < D; d++) { // dPsi_dx[atomI*D+d] -= dxs[d]*de; // dPsi_dx[atomJ*D+d] += dxs[d]*de; // } // start manual derivative Surreal<double> de(0, 0); // derivative of Psi wrt the distance Surreal<double> dpsi_dri(0, 0); Surreal<double> dpsi_drj(0, 0); Surreal<double> dpsi_dsj(0, 0); Surreal<double> rSubScaledRadiusJ = r - scaledRadiusJ; Surreal<double> rSubScaledRadiusJ2 = rSubScaledRadiusJ * rSubScaledRadiusJ; Surreal<double> rSubScaledRadiusJ3 = rSubScaledRadiusJ2 * rSubScaledRadiusJ; // factor out as much as we can to outside of the conditional for reduce convergence if (offsetRadiusI > abs(rSubScaledRadiusJ).real) { Surreal<double> term = 0.5 * (-offsetRadiusI) * (-0.25 * r * (1 / rScaledRadiusJ2 - 1 / offsetRadiusI2) + 1.0 / rScaledRadiusJ + 1.0 / (-offsetRadiusI) + 0.25 * scaleFactorJ2 * offsetRadiusJ2 * (1 / rScaledRadiusJ2 - 1 / offsetRadiusI2) / r - 0.5 * log(offsetRadiusI / rScaledRadiusJ) / r); de = -0.5 * r / rScaledRadiusJ3 + (5.0 / 4.0) / rScaledRadiusJ2 - 0.25 / offsetRadiusI2 + 0.5 * scaleFactorJ2 * offsetRadiusJ2 / (r * rScaledRadiusJ3) - 0.5 / (r * rScaledRadiusJ) - 0.25 * scaleFactorJ2 * offsetRadiusJ2 * (-1 / rScaledRadiusJ2 + 1 / offsetRadiusI2) / r2 - 0.5 * log(offsetRadiusI / rScaledRadiusJ) / r2; dpsi_dri = 0.25 * r * (1 / rScaledRadiusJ2 - 1 / offsetRadiusI2) + offsetRadiusI * (0.5 * r / offsetRadiusI3 - 1 / offsetRadiusI2 - 0.5 * scaleFactorJ2 * offsetRadiusJ2 / (r * offsetRadiusI3) + 0.5 / (r * offsetRadiusI)) - 1 / rScaledRadiusJ + 1.0 / offsetRadiusI + 0.25 * scaleFactorJ2 * offsetRadiusJ2 * (-1 / rScaledRadiusJ2 + 1 / offsetRadiusI2) / r + 0.5 * log(offsetRadiusI / rScaledRadiusJ) / r; dpsi_drj = offsetRadiusI * (-0.5 * r * scaleFactorJ / rScaledRadiusJ3 + scaleFactorJ / rScaledRadiusJ2 + 0.5 * scaleFactorJ3 * offsetRadiusJ2 / (r * rScaledRadiusJ3) + 0.25 * scaleFactorJ2 * (-2 * dielectricOffset + 2 * radiusJ) * (-1 / rScaledRadiusJ2 + 1 / offsetRadiusI2) / r - 0.5 * scaleFactorJ / (r * rScaledRadiusJ)); dpsi_dsj = offsetRadiusI * (0.25 * r * (2 * dielectricOffset - 2 * radiusJ) / rScaledRadiusJ3 + offsetRadiusJ / rScaledRadiusJ2 - 0.25 * scaleFactorJ2 * offsetRadiusJ2 * (2 * dielectricOffset - 2 * radiusJ) / (r * rScaledRadiusJ3) + 0.5 * scaleFactorJ * offsetRadiusJ2 * (-1 / rScaledRadiusJ2 + 1 / offsetRadiusI2) / r + 0.5 * (-offsetRadiusJ) / (r * rScaledRadiusJ)); if (offsetRadiusI < (scaledRadiusJ - r).real) { de += 0; dpsi_dri += 0; dpsi_drj += 0; dpsi_dsj += 0; } } else { Surreal<double> term = -0.5 * (-offsetRadiusI) * (-0.25 * r * (1 / rSubScaledRadiusJ2 - 1 / rScaledRadiusJ2) + 1.0 / fabs(rSubScaledRadiusJ) - 1 / rScaledRadiusJ - 0.25 * scaleFactorJ2 * offsetRadiusJ2 * (-1 / rSubScaledRadiusJ2 + 1 / rScaledRadiusJ2) / r + 0.5 * log(fabs(rSubScaledRadiusJ) / rScaledRadiusJ) / r); de = 0.25 * r * (-2 / rScaledRadiusJ3 + 2 / rSubScaledRadiusJ3) + (5.0 / 4.0) / rScaledRadiusJ2 - sign(rSubScaledRadiusJ) / rSubScaledRadiusJ2 - 0.25 / rSubScaledRadiusJ2 + 0.25 * scaleFactorJ2 * offsetRadiusJ2 * (2 / rScaledRadiusJ3 - 2 / rSubScaledRadiusJ3) / r + 0.5 * rScaledRadiusJ * (sign(rSubScaledRadiusJ) / rScaledRadiusJ - fabs(rSubScaledRadiusJ) / rScaledRadiusJ2) / (r * fabs(rSubScaledRadiusJ)) - 0.25 * scaleFactorJ2 * offsetRadiusJ2 * (-1 / rScaledRadiusJ2 + 1 / rSubScaledRadiusJ2) / r2 - 0.5 * log(fabs(rSubScaledRadiusJ) / rScaledRadiusJ) / r2; dpsi_dri = 0.25 * r * (1 / rScaledRadiusJ2 - 1 / rSubScaledRadiusJ2) + 1.0 / fabs(rSubScaledRadiusJ) - 1 / rScaledRadiusJ + 0.25 * scaleFactorJ2 * offsetRadiusJ2 * (-1 / rScaledRadiusJ2 + 1 / rSubScaledRadiusJ2) / r + 0.5 * log(fabs(rSubScaledRadiusJ) / rScaledRadiusJ) / r; dpsi_drj = offsetRadiusI * (0.25 * r * (-2 * scaleFactorJ / rScaledRadiusJ3 - 2 * scaleFactorJ / rSubScaledRadiusJ3) + scaleFactorJ / rScaledRadiusJ2 + scaleFactorJ * sign(rSubScaledRadiusJ) / rSubScaledRadiusJ2 + 0.25 * scaleFactorJ2 * (-2 * dielectricOffset + 2 * radiusJ) * (-1 / rScaledRadiusJ2 + 1 / rSubScaledRadiusJ2) / r + 0.25 * scaleFactorJ2 * offsetRadiusJ2 * (2 * scaleFactorJ / rScaledRadiusJ3 + 2 * scaleFactorJ / rSubScaledRadiusJ3) / r + 0.5 * rScaledRadiusJ * (-scaleFactorJ * sign(rSubScaledRadiusJ) / rScaledRadiusJ - scaleFactorJ * fabs(rSubScaledRadiusJ) / rScaledRadiusJ2) / (r * fabs(rSubScaledRadiusJ))); dpsi_dsj = offsetRadiusI * (0.25 * r * (-(-2 * dielectricOffset + 2 * radiusJ) / rSubScaledRadiusJ3 + (2 * dielectricOffset - 2 * radiusJ) / rScaledRadiusJ3) + offsetRadiusJ / rScaledRadiusJ2 + offsetRadiusJ * sign(rSubScaledRadiusJ) / rSubScaledRadiusJ2 + 0.25 * scaleFactorJ2 * offsetRadiusJ2 * ((-2 * dielectricOffset + 2 * radiusJ) / rSubScaledRadiusJ3 - (2 * dielectricOffset - 2 * radiusJ) / rScaledRadiusJ3) / r + 0.5 * scaleFactorJ * offsetRadiusJ2 * (-1 / rScaledRadiusJ2 + 1 / rSubScaledRadiusJ2) / r + 0.5 * rScaledRadiusJ * ((-offsetRadiusJ) * sign(rSubScaledRadiusJ) / rScaledRadiusJ + (-offsetRadiusJ) * fabs(rSubScaledRadiusJ) / rScaledRadiusJ2) / (r * fabs(rSubScaledRadiusJ))); if (offsetRadiusI < (scaledRadiusJ - r).real) { de += 2.0 * sign(rSubScaledRadiusJ) / rSubScaledRadiusJ2; dpsi_dri += -2.0 / fabs(rSubScaledRadiusJ); dpsi_drj += -2.0 * scaleFactorJ * offsetRadiusI * sign(rSubScaledRadiusJ) / rSubScaledRadiusJ2; dpsi_dsj += 2.0 * offsetRadiusI * (-offsetRadiusJ) * sign(rSubScaledRadiusJ) / rSubScaledRadiusJ2; } } // is bornForces de *= 0.5 * bornForces[atomI] * offsetRadiusI; dpsi_dri *= 0.5 * bornForces[atomI]; dpsi_drj *= 0.5 * bornForces[atomI]; dpsi_dsj *= 0.5 * bornForces[atomI]; dPsi_dri[atomI] += dpsi_dri; dPsi_dri[atomJ] += dpsi_drj; dPsi_dsi[atomJ] += dpsi_dsj; for (int d = 0; d < D; d++) { dPsi_dx[atomI * D + d] += (dxs[d] / r) * de; dPsi_dx[atomJ * D + d] -= (dxs[d] / r) * de; } } } } } // std::vector<double> &out_HvP, // std::vector<double> &out_MvP for (int atomI = 0; atomI < numberOfAtoms; atomI++) { for (int d = 0; d < D; d++) { out_HvP[atomI * D + d] += dPsi_dx[atomI * D + d].imag; } } // for(int i=0; i < dPsi_dri.size(); i++) { // std::cout << "dPsi_dri: " << dPsi_dri[i]+atomic_radii_derivatives[i] << std::endl; // } for (int i = 0; i < dPsi_dri.size(); i++) { // std::cout << "dPsi_dri parts: " << dPsi_dri[i] << " " << atomic_radii_derivatives[i] << std::endl; // out_MvP[atomic_radii_idxs[i]] += dPsi_dri[i].imag + atomic_radii_derivatives[i].imag; out_MvP[atomic_radii_idxs[i]] += dPsi_dri[i].imag; } for (int i = 0; i < dPsi_dsi.size(); i++) { out_MvP[scale_factor_idxs[i]] += dPsi_dsi[i].imag; } // for(int i=0; i < charge_derivs.size(); i++) { // // std::cout << "???" << charge_derivs[i] << std::endl; // out_MvP[charge_param_idxs[i]] += charge_derivs[i].imag; // } // std::cout << "energy" << obcEnergy.real << std::endl; // return obcEnergy; } } // namespace timemachine
the_stack
#include "ConvSingleInputExecution.hpp" namespace MNN { namespace CUDA { template <typename T> __global__ void Pad(const size_t size, const T* input, const int old_height, const int old_width, const int padded_height, const int padded_width, const int pad_top, const int pad_left, float pad_value, T* output) { T pad_value_ = static_cast<T>(pad_value); for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) { int block_num = pos / (padded_width*padded_height); int left = pos % (padded_width*padded_height); const int padded_w = left % padded_width; const int padded_h = left / padded_width % padded_height; if (padded_h - pad_top < 0 || padded_w - pad_left < 0 || padded_h - pad_top >= old_height || padded_w - pad_left >= old_width) { output[pos] = pad_value_; } else { output[pos] = input[(block_num * old_height + padded_h - pad_top) * old_width + padded_w - pad_left]; } } return; } ConvSingleInputExecution::ConvSingleInputExecution(Backend* backend, const MNN::Op* op) : Execution(backend), mOp(op) { //MNN_PRINT("cuda convSingleInput onInit in\n"); auto conv = op->main_as_Convolution2D(); auto common = conv->common(); mKernelInfo.groups = common->group(); mKernelInfo.kernelX = common->kernelX(); mKernelInfo.kernelY = common->kernelY(); mKernelInfo.padMode = common->padMode(); mKernelInfo.padX = common->padX(); mKernelInfo.padY = common->padY(); if (nullptr != common->pads()) { mKernelInfo.padX = common->pads()->data()[1]; mKernelInfo.padY = common->pads()->data()[0]; } pad_left_ = mKernelInfo.padX; pad_right_ = mKernelInfo.padX; pad_top_ = mKernelInfo.padY; pad_bottom_ = mKernelInfo.padY; mKernelInfo.strideX = common->strideX(); mKernelInfo.strideY = common->strideY(); mKernelInfo.dilateX = common->dilateX(); mKernelInfo.dilateY = common->dilateY(); mKernelInfo.activationType = common->relu() ? 1 : (common->relu6() ? 2 : 0); use_relu_ = (mKernelInfo.activationType == 1); use_relu6_ = (mKernelInfo.activationType == 2); cudnn_handle_ = nullptr; input_desc_ = nullptr; output_desc_ = nullptr; filter_desc_ = nullptr; conv_desc_ = nullptr; padded_desc_ = nullptr; cudnn_data_type_ = CUDNN_DATA_FLOAT; cudnn_data_type_len_ = 0; auto runtime = static_cast<CUDABackend*>(backend)->getCUDARuntime(); cudnn_handle_ = runtime->cudnn_handle(); cudnn_check(cudnnCreateTensorDescriptor(&input_desc_)); cudnn_check(cudnnCreateTensorDescriptor(&output_desc_)); cudnn_check(cudnnCreateTensorDescriptor(&padded_desc_)); cudnn_check(cudnnCreateTensorDescriptor(&bias_desc_)); cudnn_check(cudnnCreateFilterDescriptor(&filter_desc_)); cudnn_check(cudnnCreateConvolutionDescriptor(&conv_desc_)); cudnn_check(cudnnCreateActivationDescriptor(&act_desc_)); //weight host->device const float* filterDataPtr = nullptr; int weightSize = 0; std::shared_ptr<ConvolutionCommon::Int8Common> quanCommon; ConvolutionCommon::getConvParameters(&quanCommon, conv, &filterDataPtr, &weightSize); weightTensor.reset(Tensor::createDevice<float>({weightSize})); backend->onAcquireBuffer(weightTensor.get(), Backend::STATIC); mFilter = (void *)weightTensor.get()->buffer().device; cuda_check(cudaMemcpy(mFilter, filterDataPtr, weightSize*sizeof(float), cudaMemcpyHostToDevice)); if(conv->bias()->size() != 0) { int biasSize = conv->bias()->size(); biasTensor.reset(Tensor::createDevice<float>({biasSize})); backend->onAcquireBuffer(biasTensor.get(), Backend::STATIC); mBias = (void *)biasTensor.get()->buffer().device; cuda_check(cudaMemcpy(mBias, conv->bias()->data(), conv->bias()->size()*sizeof(float), cudaMemcpyHostToDevice)); int bias_size = conv->bias()->size(); int dim_bias[] = {1, bias_size, 1, 1}; int stride_bias[] = {bias_size, 1, 1, 1}; if(cudnn_data_type_ == CUDNN_DATA_FLOAT) { cudnn_check(cudnnSetTensorNdDescriptor(bias_desc_, CUDNN_DATA_FLOAT, 4, dim_bias, stride_bias)); } else if(cudnn_data_type_ == CUDNN_DATA_HALF) { cudnn_check(cudnnSetTensorNdDescriptor(bias_desc_, CUDNN_DATA_HALF, 4, dim_bias, stride_bias)); } else { MNN_PRINT("only supports fp32/fp16 data type!!!\n"); } use_bias_ = true; } mKernelInfo.kernelN = common->outputCount(); mKernelInfo.kernelC = weightSize / (mKernelInfo.kernelN * mKernelInfo.kernelY * mKernelInfo.kernelX); std::vector<int> filter_shape = {mKernelInfo.kernelN, mKernelInfo.kernelC, mKernelInfo.kernelY, mKernelInfo.kernelX}; cudnn_check(cudnnSetFilter4dDescriptor(filter_desc_, cudnn_data_type_, CUDNN_TENSOR_NCHW, filter_shape[0], filter_shape[1], filter_shape[2], filter_shape[3])); cudnn_check(cudnnSetConvolution2dDescriptor(conv_desc_, 0, 0, mKernelInfo.strideY, mKernelInfo.strideX, mKernelInfo.dilateY, mKernelInfo.dilateX, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); if (cudnn_data_type_ == CUDNN_DATA_HALF) { cudnn_check(cudnnSetConvolutionMathType(conv_desc_, CUDNN_TENSOR_OP_MATH)); } //set group num cudnn_check(cudnnSetConvolutionGroupCount(conv_desc_, mKernelInfo.groups)); } ConvSingleInputExecution::~ConvSingleInputExecution() { cudnn_check(cudnnDestroyConvolutionDescriptor(conv_desc_)); cudnn_check(cudnnDestroyFilterDescriptor(filter_desc_)); cudnn_check(cudnnDestroyTensorDescriptor(padded_desc_)); cudnn_check(cudnnDestroyTensorDescriptor(output_desc_)); cudnn_check(cudnnDestroyTensorDescriptor(input_desc_)); cudnn_check(cudnnDestroyTensorDescriptor(bias_desc_)); cudnn_check(cudnnDestroyActivationDescriptor(act_desc_)); if (nullptr != weightTensor) { backend()->onReleaseBuffer(weightTensor.get(), Backend::STATIC); } if(use_bias_ && nullptr != biasTensor) { backend()->onReleaseBuffer(biasTensor.get(), Backend::STATIC); } if(workspace_size_!=0 && nullptr != workspaceTensor) { backend()->onReleaseBuffer(workspaceTensor.get(), Backend::DYNAMIC_SEPERATE); } } ErrorCode ConvSingleInputExecution::onResize(const std::vector<Tensor*> &inputs, const std::vector<Tensor*> &outputs) { // prepare //MNN_PRINT("cuda convSingleInput onResize in, pad:%d\n", mKernelInfo.padX); auto input = inputs[0], output = outputs[0]; mIOInfo.iw = input->width(); mIOInfo.ih = input->height(); mIOInfo.ic = input->channel(); mIOInfo.ib = input->batch(); mIOInfo.ow = output->width(); mIOInfo.oh = output->height(); mIOInfo.oc = output->channel(); mIOInfo.ob = output->batch(); mKernelInfo.kernelN = output->channel(); mKernelInfo.kernelC = input->channel() / mKernelInfo.groups; if(mIOInfo.iw==0) { mIOInfo.iw = 1; } if(mIOInfo.ih==0) { mIOInfo.ih = 1; } if(mIOInfo.ic==0) { mIOInfo.ic = 1; } if(mIOInfo.ib==0) { mIOInfo.ib = 1; } if(mIOInfo.ow==0) { mIOInfo.ow = 1; } if(mIOInfo.oh==0) { mIOInfo.oh = 1; } if(mIOInfo.oc==0) { mIOInfo.oc = 1; } if(mIOInfo.ob==0) { mIOInfo.ob = 1; } std::vector<int> in_shape = {mIOInfo.ib, mIOInfo.ic, mIOInfo.ih, mIOInfo.iw}; std::vector<int> output_shape = {mIOInfo.ob, mIOInfo.oc, mIOInfo.oh, mIOInfo.ow}; // printf("filter:%d %d %d %d\n", filter_shape[0], filter_shape[1], filter_shape[2], filter_shape[3]); // printf("input:%d %d %d %d\n", in_shape[0], in_shape[1], in_shape[2], in_shape[3]); // printf("output:%d %d %d %d\n", output_shape[0], output_shape[1], output_shape[2], output_shape[3]); cudnn_check(cudnnSetTensor4dDescriptor(input_desc_, CUDNN_TENSOR_NCHW, cudnn_data_type_, in_shape[0], in_shape[1], in_shape[2], in_shape[3])); cudnn_check(cudnnSetTensor4dDescriptor(output_desc_, CUDNN_TENSOR_NCHW, cudnn_data_type_, output_shape[0], output_shape[1], output_shape[2], output_shape[3])); cudnnTensorDescriptor_t input_descriptor_real = nullptr; if (mKernelInfo.padMode == PadMode_SAME) { int kernelWidthSize = (mKernelInfo.kernelX - 1) * mKernelInfo.dilateX + 1; int kernelHeightSize = (mKernelInfo.kernelY - 1) * mKernelInfo.dilateY + 1; int pw = (mIOInfo.ow - 1) * mKernelInfo.strideX + kernelWidthSize - mIOInfo.iw; int ph = (mIOInfo.oh - 1) * mKernelInfo.strideY + kernelHeightSize - mIOInfo.ih; pad_left_ = pw/2; pad_right_ = pw - pad_left_; pad_top_ = ph/2; pad_bottom_ = ph - pad_top_; } else { if (mKernelInfo.padMode == PadMode_VALID) { pad_left_ = 0; pad_right_ = 0; pad_top_ = 0; pad_bottom_ = 0; } } use_pad_ = (pad_left_!=0 || pad_right_!=0 || pad_top_!=0 || pad_bottom_!=0 ) ? true : false; if(use_pad_) { int totalSize = in_shape[0]*in_shape[1]*(in_shape[2]+pad_top_+pad_bottom_)*(in_shape[3]+pad_left_+pad_right_); padTensor.reset(Tensor::createDevice<float>({totalSize})); backend()->onAcquireBuffer(padTensor.get(), Backend::DYNAMIC); mPadPtr = (void *)padTensor.get()->buffer().device; //dynamic memory release backend()->onReleaseBuffer(padTensor.get(), Backend::DYNAMIC); cudnn_check(cudnnSetTensor4dDescriptor(padded_desc_, CUDNN_TENSOR_NCHW, cudnn_data_type_, in_shape[0], in_shape[1], in_shape[2] + +pad_top_+pad_bottom_, in_shape[3] + pad_left_+pad_right_)); } input_descriptor_real = use_pad_ ? padded_desc_ : input_desc_; // algorithm constexpr int requested_algo_count = 1; int returned_algo_count; cudnnConvolutionFwdAlgoPerf_t perf_results; cudnn_check(cudnnGetConvolutionForwardAlgorithm_v7(cudnn_handle_, input_descriptor_real, filter_desc_, conv_desc_, output_desc_, requested_algo_count, &returned_algo_count, &perf_results)); conv_algorithm_ = perf_results.algo; if(mIOInfo.iw==1 && mIOInfo.ih==1 && mKernelInfo.kernelY==1 && mKernelInfo.kernelX==1) { conv_algorithm_ = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM; } // workspace cudnn_check(cudnnGetConvolutionForwardWorkspaceSize(cudnn_handle_, input_descriptor_real, filter_desc_, conv_desc_, output_desc_, conv_algorithm_, &workspace_size_)); if (workspace_size_ != 0) { int workspaceSize = workspace_size_; workspaceTensor.reset(Tensor::createDevice<float>({workspaceSize})); //cudnn not support workspace memory reuse backend()->onAcquireBuffer(workspaceTensor.get(), Backend::DYNAMIC_SEPERATE); mWorkSpace = (void *)workspaceTensor.get()->buffer().device; } if(use_relu_) { cudnn_check(cudnnSetActivationDescriptor(act_desc_, CUDNN_ACTIVATION_RELU, CUDNN_NOT_PROPAGATE_NAN, 0.0)); } else if(use_relu6_) { cudnn_check(cudnnSetActivationDescriptor(act_desc_, CUDNN_ACTIVATION_CLIPPED_RELU, CUDNN_NOT_PROPAGATE_NAN, 6.0)); } else { //do nothing } //MNN_PRINT("cuda convSingleInput onResize out\n"); return NO_ERROR; } ErrorCode ConvSingleInputExecution::onExecute(const std::vector<Tensor*> &inputs, const std::vector<Tensor*> &outputs) { //MNN_PRINT("cuda convSingleInput onExecute in, inputsize:%d %d\n", (int)inputs.size(), workspace_size_); MNN_ASSERT(inputs.size() == 1); MNN_ASSERT(outputs.size() == 1); auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime(); const void *input_addr = (const void*)inputs[0]->deviceId(); const void *filter_addr = mFilter; const void *bias_addr = mBias; void *output_addr = (void*)outputs[0]->deviceId(); void *workspace_addr = nullptr; if (workspace_size_ != 0) { workspace_addr = mWorkSpace; } const float alpha = 1; const float beta = 0; if(use_pad_) { std::vector<int> in_shape = {mIOInfo.ib, mIOInfo.ic, mIOInfo.ih, mIOInfo.iw}; int size = in_shape[0] * in_shape[1] * (in_shape[2]+pad_top_+pad_bottom_) * (in_shape[3]+pad_left_+pad_right_); int block_num = runtime->blocks_num(size); int threads_num = runtime->threads_num(); Pad<<<block_num, threads_num>>>(size, (float*)input_addr, in_shape[2], in_shape[3], in_shape[2]+pad_top_+pad_bottom_, in_shape[3]+pad_left_+pad_right_, pad_top_, pad_left_, 0.0, (float*)mPadPtr); cudnn_check(cudnnConvolutionForward(cudnn_handle_, &alpha, padded_desc_, mPadPtr, filter_desc_, filter_addr, conv_desc_, conv_algorithm_, workspace_addr, workspace_size_, &beta, output_desc_, output_addr)); } else { cudnn_check(cudnnConvolutionForward(cudnn_handle_, &alpha, input_desc_, input_addr, filter_desc_, filter_addr, conv_desc_, conv_algorithm_, workspace_addr, workspace_size_, &beta, output_desc_, output_addr)); } if(use_bias_) { cudnn_check(cudnnAddTensor(cudnn_handle_, &alpha, bias_desc_, bias_addr, &alpha, output_desc_, output_addr)); } if(use_relu_ || use_relu6_) { cudnn_check(cudnnActivationForward(cudnn_handle_, act_desc_, &alpha, output_desc_, output_addr, &beta, output_desc_, output_addr)); } return NO_ERROR; } class CUDAConvolutionCreator : public CUDABackend::Creator { public: virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs, const MNN::Op* op, Backend* backend) const override { if (nullptr != op->main_as_Convolution2D()->quanParameter()) { auto quan = op->main_as_Convolution2D()->quanParameter(); if (1 == quan->type() || 2 == quan->type()) { MNN_PRINT("cuda conv quant type 1 or 2 not support\n"); return nullptr; } } if(inputs.size() > 1) { MNN_PRINT("multi conv inputs size: not support\n"); return nullptr; } else if(inputs.size() == 1) { return new ConvSingleInputExecution(backend, op); } else { MNN_PRINT("conv inputs size:%d not support", (int)inputs.size()); return nullptr; } } }; CUDACreatorRegister<CUDAConvolutionCreator> __ConvExecution(OpType_Convolution); }// namespace CUDA }// namespace MNN
the_stack
#include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <sys/time.h> #include "determinant_update.h" #include "../../CUDA/gpu_misc.h" template<typename T, int BS> __global__ void update_inverse_cuda1 (updateJob *updateList, int N, int rowstride) { __shared__ T *A, *Ainv, *u, *Ainv_delta, *Ainv_colk; __shared__ int k; if (threadIdx.x==0) { updateJob job = updateList[blockIdx.y]; A = (T*)job.A; Ainv = (T*)job.Ainv; u = (T*)job.newRow; Ainv_delta = (T*)job.AinvDelta; Ainv_colk = (T*)job.AinvColk; k = job.iat; } __syncthreads(); // Store the product Ainv * u in shared memory T Ainv_delta_tid; __shared__ T Ainv_colk_shared[BS], delta[BS]; Ainv_delta_tid = 0.0f; __syncthreads(); int col = blockIdx.x*BS + threadIdx.x; int numblocks = (N+BS-1) / BS; int kBlock = k/BS; // If the column I need to pull from Ainv is in this thread block // domain, do the following __syncthreads(); for (int block=0; block<numblocks; block++) { delta[threadIdx.x] = u[block*BS+threadIdx.x] - A[k*rowstride + block*BS + threadIdx.x]; __syncthreads(); int istop = min(BS, N-block*BS); for (int i=0; i<istop; i++) { int row = block*BS + i; T a = Ainv[row*rowstride+col]; if (col == k) Ainv_colk_shared[i] = a; Ainv_delta_tid += a*delta[i]; __syncthreads(); } if (blockIdx.x == kBlock) if (block*BS+threadIdx.x < N) Ainv_colk[block*BS+threadIdx.x] = Ainv_colk_shared[threadIdx.x]; __syncthreads(); } // Write the data back to global memory if (col < N) Ainv_delta[col] = Ainv_delta_tid; __syncthreads(); } template<typename T, int BS> __global__ void update_inverse_cuda2 (updateJob *updateList, int N, int rowstride) { __shared__ T *A, *u, *Ainv, *Ainv_delta, *Ainv_colk; int tid = threadIdx.x; __shared__ int k; if (threadIdx.x==0) { updateJob job = updateList[blockIdx.y]; A = (T*)job.A; u = (T*)job.newRow; Ainv = (T*)job.Ainv; Ainv_delta = (T*)job.AinvDelta; Ainv_colk = (T*)job.AinvColk; k = job.iat; } __syncthreads(); T Ainv_delta_tid; __shared__ T Ainv_colk_shared[BS]; int col = blockIdx.x*BS + threadIdx.x; // Read the data back from global memory Ainv_delta_tid = Ainv_delta[col]; Ainv_colk_shared[threadIdx.x] = Ainv_colk[col]; if (col < N) A[k*rowstride + col] = u[col]; __syncthreads(); __shared__ T prefact; if (threadIdx.x == 0) prefact = -1.0f/(1.0f+Ainv_delta[k]); int numblocks = N / BS + ((N % BS) ? 1 : 0); __syncthreads(); for (int block=0; block<numblocks; block++) { Ainv_colk_shared[tid] = prefact*Ainv_colk[block*BS+threadIdx.x]; __syncthreads(); T *Ainv_row = Ainv+block*BS*rowstride + col; int istop = min (BS, N-block*BS); if (col < N) for (int i=0; i<istop; i++, Ainv_row+=rowstride) *Ainv_row += Ainv_delta_tid*Ainv_colk_shared[i]; __syncthreads(); } } void update_inverse_cuda(updateJob updateList[], float dummy, int N, int rowstride, int numWalkers) { const int BS1 = 64; const int BS2 = 64; int NB1 = (N+BS1-1)/BS1; int NB2 = (N+BS2-1)/BS2; dim3 dimBlock1(BS1); dim3 dimGrid1(NB1, numWalkers); dim3 dimBlock2(BS2); dim3 dimGrid2(NB2, numWalkers); update_inverse_cuda1<float,BS1><<<dimGrid1,dimBlock1>>> (updateList, N, rowstride); update_inverse_cuda2<float,BS2><<<dimGrid2,dimBlock2>>> (updateList, N, rowstride); } void update_inverse_cuda(updateJob updateList[], double dummy, int N, int rowstride, int numWalkers) { const int BS1 = 32; const int BS2 = 32; int NB1 = N/BS1 + ((N%BS1) ? 1 : 0); int NB2 = N/BS2 + ((N%BS2) ? 1 : 0); dim3 dimBlock1(BS1); dim3 dimGrid1(NB1, numWalkers); dim3 dimBlock2(BS2); dim3 dimGrid2(NB2, numWalkers); update_inverse_cuda1<double,BS1><<<dimGrid1,dimBlock1>>> (updateList, N, rowstride); update_inverse_cuda2<double,BS2><<<dimGrid2,dimBlock2>>> (updateList, N, rowstride); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { fprintf (stderr, "CUDA error in update_inverse_cuda:\n %s\n", cudaGetErrorString(err)); abort(); } } /* Update compensated dot product using algorithm CompDot from: S. Graillat, Ph. Langlois, and N. Louvet: Accurate dot products with FMA. RNC 7, pp. 141-142. See also: http://rnc7.loria.fr/louvet_poster.pdf. The product of a and b is added to the dot product stored in sum and corr, where sum represents the head and corr represents the tail. The result is written back to the locations pointed to by new_sum and new_corr. */ template<typename T> __device__ __forceinline__ void update_dot (T a, T b, T sum, T corr, T *new_sum, T *new_corr) { T h, l, t, r, s; // 2ProdFMA: h + l = a * b h = a * b; l = fma (a, b, -h); // 2Sum: s + r = sum + h s = sum + h; t = s - h; r = s - t; t = sum - t; r = h - r; r = t + r; *new_sum = s; *new_corr = (l + r) + corr; } /* Update of Ainv after rank-1 update to A, using Sherman-Morrison algorithm, part 1. See also K. P. Esler, J. Kim, D. M. Ceperley, and L. Shulenburger: Accelerating Quantum Monte Carlo Simulations of Real Materials on GPU Clusters. Computing in Science & Engineering, Vol. 14, No. 1, Jan/Feb 2012, pp. 40-51 (section "Inverses and Updates"). Given a new row vector u to replace the k-th row of matrix A, compute a row vector delta as the difference between u and the k-th row of A. Then compute the row vector delta * Ainv and return it via delta_Ainv. Copy out the k-th column of Ainv as a contiguous vector Ainv_colk for use by the second part of the update process. The dimension of the square matrices A and Ainv is N, and the stride (in elements) between consecutive rows of Ainv (and A) is rowstride. All matrices are stored in row-major order. */ template<typename T, int BS> __device__ __forceinline__ void update_inverse_core1 (const T * __restrict__ A, const T * __restrict__ Ainv, const T * __restrict__ u, T * __restrict__ delta_Ainv, T * __restrict__ Ainv_colk, int k, int N, int rowstride) { __shared__ T Ainv_colk_shared[BS], delta[BS]; T sum = (T)0, corr = (T)0;// compensated dot-product delta*Ainv(*,col_Ainv) int tidx = threadIdx.x; int col_Ainv = blockIdx.x*BS + tidx; int numBlocks = (N + BS - 1) / BS; int kBlock = k/BS; // thread block handling the k-th column of Ainv if (blockIdx.x == kBlock) { // this thread block needs to copy out the k-th column of Ainv for (int block = 0; block < numBlocks; block++) { int blockStart = block * BS; int col_A = blockStart + tidx; int row_Ainv; if (col_A < N) { delta[tidx] = u[col_A] - A[k*rowstride + col_A]; } __syncthreads(); for (int i = 0; i < min(BS, N-blockStart); i++) { row_Ainv = blockStart + i; T Ainv_elem = Ainv[row_Ainv*rowstride + col_Ainv]; update_dot<T> (delta[i], Ainv_elem, sum, corr, &sum, &corr); if (col_Ainv == k) { Ainv_colk_shared[i] = Ainv_elem; } } __syncthreads(); // Write segment of k-th column of Ainv back to global memory row_Ainv = blockStart + tidx; if (row_Ainv < N) { Ainv_colk[row_Ainv] = Ainv_colk_shared[tidx]; } } } else { for (int block = 0; block < numBlocks; block++) { int blockStart = block * BS; int col_A = blockStart + tidx; int row_Ainv; if (col_A < N) { delta[tidx] = u[col_A] - A[k*rowstride + col_A]; } __syncthreads(); for (int i = 0; i < min(BS, N-blockStart); i++) { row_Ainv = blockStart + i; T Ainv_elem = Ainv[row_Ainv*rowstride + col_Ainv]; update_dot<T> (delta[i], Ainv_elem, sum, corr, &sum, &corr); } __syncthreads(); } } // Write segment of row vector delta*Ainv back to global memory if (col_Ainv < N) { delta_Ainv[col_Ainv] = sum + corr; } } /* Update of Ainv after rank-1 update to A, using Sherman-Morrison algorithm, part 2. See also K. P. Esler, J. Kim, D. M. Ceperley, and L. Shulenburger: Accelerating Quantum Monte Carlo Simulations of Real Materials on GPU Clusters. Computing in Science & Engineering, Vol. 14, No. 1, Jan/Feb 2012, pp. 40-51 (section "Inverses and Updates"). Ainv * ek, the k-th column of Ainv, has been extracted in the first step, and is passed in as column vector Ainv_colk. Step 1 also computed the row vector delta*Ainv, passed in via delta_Ainv. We need to multiply Ainv_colk with delta_Ainv, scale the result by -1/(1+delta*Ainv*ek), then add this to Ainv. We also need to replace the k-th row of A with the new row vector u. delta*Ainv*ek is simply the k-th element of delta_Ainv. The dimension of the square matrices A and Ainv is N, and the stride (in elements) between consecutive rows of Ainv and A is rowstride. All matrices are stored in row-major order. */ template<typename T, int BS> __device__ __forceinline__ void update_inverse_core2 (T * __restrict__ A, T * __restrict__ Ainv, const T * __restrict__ u, const T * __restrict__ delta_Ainv, const T * __restrict__ Ainv_colk, int k, int N, int rowstride) { __shared__ T delta_Ainv_shared[BS]; __shared__ T Ainv_colk_shared[BS]; T prefact; int tidx = threadIdx.x; int col_Ainv = blockIdx.x*BS + tidx; int col_A = col_Ainv; int numBlocks = (N + BS - 1) / BS; // Cache one segment of row vector delta*Ainv, and replace one segment of // the k-th row of A with the corresponding segment from row vector u if (col_Ainv < N) { delta_Ainv_shared[tidx] = delta_Ainv[col_Ainv]; A[k*rowstride + col_A] = u[col_A]; } prefact = -1.0f / (1.0f + delta_Ainv[k]); for (int block = 0; block < numBlocks; block++) { int blockStart = block * BS; int row_Ainv; row_Ainv = blockStart + tidx; // cache and scale next segment of k-th column of Ainv __syncthreads(); if (row_Ainv < N) { Ainv_colk_shared[tidx] = prefact * Ainv_colk[row_Ainv]; } __syncthreads(); if (col_Ainv < N) { for (int i = 0; i < min(BS, N-blockStart); i++) { row_Ainv = blockStart + i; // update one segment of current row of Ainv Ainv[row_Ainv*rowstride + col_Ainv] += delta_Ainv_shared[tidx] * Ainv_colk_shared[i]; } } } } ///////////////////////////////////////////////// // New version with fewer PCI transfers needed // ///////////////////////////////////////////////// template<typename T, int BS> __global__ void update_inverse_kernel1 (T **data, int *iat, int A_off, int Ainv_off, int newRow_off, int AinvDelta_off, int AinvColk_off, int N, int rowstride) { T* const sdata = data[blockIdx.y]; const T *A = sdata + A_off; // A const T *Ainv = sdata + Ainv_off; // Ainv const T *u = sdata + newRow_off; // new k-th row of A T *delta_Ainv = sdata + AinvDelta_off; // delta * Ainv T *Ainv_colk = sdata + AinvColk_off; // k-th column of orig. Ainv int k = iat[blockIdx.y]; update_inverse_core1<T,BS> (A, Ainv, u, delta_Ainv, Ainv_colk, k, N, rowstride); } template<typename T, int BS> __global__ void update_inverse_kernel2 (T **data, int *iat, int A_off, int Ainv_off, int newRow_off, int AinvDelta_off, int AinvColk_off, int N, int rowstride) { T * const sdata = data[blockIdx.y]; T *A = sdata + A_off; // A T *Ainv = sdata + Ainv_off; // Ainv const T *u = sdata + newRow_off; // new k-th row of A const T *delta_Ainv = sdata + AinvDelta_off; // delta * Ainv const T *Ainv_colk = sdata + AinvColk_off; // k-th column of orig. Ainv int k = iat[blockIdx.y]; update_inverse_core2<T,BS> (A, Ainv, u, delta_Ainv, Ainv_colk, k, N, rowstride); } void update_inverse_cuda(float **data, int iat[], int A_off, int Ainv_off, int newRow_off, int AinvDelta_off, int AinvColk_off, int N, int rowstride, int numWalkers) { const int BS1 = 128; const int BS2 = 128; int NB1 = (N+BS1-1)/BS1; int NB2 = (N+BS2-1)/BS2; dim3 dimBlock1(BS1); dim3 dimGrid1(NB1, numWalkers); dim3 dimBlock2(BS2); dim3 dimGrid2(NB2, numWalkers); update_inverse_kernel1<float,BS1><<<dimGrid1,dimBlock1>>> (data, iat, A_off, Ainv_off, newRow_off, AinvDelta_off, AinvColk_off, N, rowstride); update_inverse_kernel2<float,BS2><<<dimGrid2,dimBlock2>>> (data, iat, A_off, Ainv_off, newRow_off, AinvDelta_off, AinvColk_off, N, rowstride); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { fprintf (stderr, "CUDA error in update_inverse_cuda:\n %s\n", cudaGetErrorString(err)); abort(); } } void update_inverse_cuda(double **data, int iat[], int A_off, int Ainv_off, int newRow_off, int AinvDelta_off, int AinvColk_off, int N, int rowstride, int numWalkers) { const int BS1 = 32; const int BS2 = 32; int NB1 = (N+BS1-1)/BS1; int NB2 = (N+BS2-1)/BS2; dim3 dimBlock1(BS1); dim3 dimGrid1(NB1, numWalkers); dim3 dimBlock2(BS2); dim3 dimGrid2(NB2, numWalkers); update_inverse_kernel1<double,BS1><<<dimGrid1,dimBlock1>>> (data, iat, A_off, Ainv_off, newRow_off, AinvDelta_off, AinvColk_off, N, rowstride); update_inverse_kernel2<double,BS2><<<dimGrid2,dimBlock2>>> (data, iat, A_off, Ainv_off, newRow_off, AinvDelta_off, AinvColk_off, N, rowstride); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { fprintf (stderr, "CUDA error in update_inverse_cuda:\n %s\n", cudaGetErrorString(err)); abort(); } } template<typename T, int BS> __global__ void update_inverse_kernel1 (T **data, int k, int A_off, int Ainv_off, int newRow_off, int AinvDelta_off, int AinvColk_off, int N, int rowstride) { T* const sdata = data[blockIdx.y]; const T *A = sdata + A_off; // A const T *Ainv = sdata + Ainv_off; // Ainv const T *u = sdata + newRow_off; // new k-th row of A T *delta_Ainv = sdata + AinvDelta_off; // delta * Ainv T *Ainv_colk = sdata + AinvColk_off; // k-th column of orig. Ainv update_inverse_core1<T,BS> (A, Ainv, u, delta_Ainv, Ainv_colk, k, N, rowstride); } template<typename T, int BS> __global__ void update_inverse_kernel2 (T **data, int k, int A_off, int Ainv_off, int newRow_off, int AinvDelta_off, int AinvColk_off, int N, int rowstride) { T * const sdata = data[blockIdx.y]; T *A = sdata + A_off; // A T *Ainv = sdata + Ainv_off; // Ainv const T *u = sdata + newRow_off; // new k-th row of A const T *delta_Ainv = sdata + AinvDelta_off; // delta * Ainv const T *Ainv_colk = sdata + AinvColk_off; // k-th column of orig. Ainv update_inverse_core2<T,BS> (A, Ainv, u, delta_Ainv, Ainv_colk, k, N, rowstride); } void update_inverse_cuda(float **data, int iat, int A_off, int Ainv_off, int newRow_off, int AinvDelta_off, int AinvColk_off, int N, int rowstride, int numWalkers) { const int BS1 = 128; const int BS2 = 128; int NB1 = (N+BS1-1)/BS1; int NB2 = (N+BS2-1)/BS2; dim3 dimBlock1(BS1); dim3 dimGrid1(NB1, numWalkers); dim3 dimBlock2(BS2); dim3 dimGrid2(NB2, numWalkers); update_inverse_kernel1<float,BS1><<<dimGrid1,dimBlock1>>> (data, iat, A_off, Ainv_off, newRow_off, AinvDelta_off, AinvColk_off, N, rowstride); update_inverse_kernel2<float,BS2><<<dimGrid2,dimBlock2>>> (data, iat, A_off, Ainv_off, newRow_off, AinvDelta_off, AinvColk_off, N, rowstride); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { fprintf (stderr, "CUDA error in update_inverse_cuda:\n %s\n", cudaGetErrorString(err)); abort(); } } void update_inverse_cuda(double **data, int iat, int A_off, int Ainv_off, int newRow_off, int AinvDelta_off, int AinvColk_off, int N, int rowstride, int numWalkers) { const int BS1 = 32; const int BS2 = 32; int NB1 = (N+BS1-1)/BS1; int NB2 = (N+BS2-1)/BS2; dim3 dimBlock1(BS1); dim3 dimGrid1(NB1, numWalkers); dim3 dimBlock2(BS2); dim3 dimGrid2(NB2, numWalkers); update_inverse_kernel1<double,BS1><<<dimGrid1,dimBlock1>>> (data, iat, A_off, Ainv_off, newRow_off, AinvDelta_off, AinvColk_off, N, rowstride); update_inverse_kernel2<double,BS2><<<dimGrid2,dimBlock2>>> (data, iat, A_off, Ainv_off, newRow_off, AinvDelta_off, AinvColk_off, N, rowstride); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { fprintf (stderr, "CUDA error in update_inverse_cuda:\n %s\n", cudaGetErrorString(err)); abort(); } } // The first kernel just computes AinvT * u and also stores the kth // col of Ainv in global memory template<typename T, int BS> __global__ void update_inverse_cuda1 (T **A_g, T **Ainv_g, T **u_g, T **Ainv_delta_g, T **Ainv_colk_g, int N, int rowstride, int k) { __shared__ T *A, *Ainv, *u, *Ainv_delta, *Ainv_colk; if (threadIdx.x==0) { A = A_g[blockIdx.y]; Ainv = Ainv_g[blockIdx.y]; u = u_g[blockIdx.y]; Ainv_delta = Ainv_delta_g[blockIdx.y]; Ainv_colk = Ainv_colk_g[blockIdx.y]; } __syncthreads(); // Store the product Ainv * u in shared memory T Ainv_delta_tid; __shared__ T Ainv_colk_shared[BS], delta[BS]; Ainv_delta_tid = 0.0f; __syncthreads(); int col = blockIdx.x*BS + threadIdx.x; int numblocks = N / BS + ((N%BS) ? 1 : 0); int kBlock = k/BS; // If the column I need to pull from Ainv is in this thread block // domain, do the following __syncthreads(); for (int block=0; block<numblocks; block++) { delta[threadIdx.x] = u[block*BS+threadIdx.x] - A[k*rowstride + block*BS + threadIdx.x]; __syncthreads(); for (int i=0; i<BS; i++) { int row = block*BS + i; T a = Ainv[row*rowstride+col]; if (col == k) Ainv_colk_shared[i] = a; Ainv_delta_tid += a*delta[i]; __syncthreads(); } if (blockIdx.x == kBlock) Ainv_colk[block*BS+threadIdx.x] = Ainv_colk_shared[threadIdx.x]; __syncthreads(); } __syncthreads(); // Write the data back to global memory Ainv_delta[col] = Ainv_delta_tid; __syncthreads(); } template<typename T, int BS> __global__ void update_inverse_cuda2 (T **A_g, T **Ainv_g, T **u_g, T **Ainv_delta_g, T **Ainv_colk_g, int N, int rowstride, int k) { __shared__ T *A, *u, *Ainv, *Ainv_delta, *Ainv_colk; int tid = threadIdx.x; if (threadIdx.x==0) { A = A_g[blockIdx.y]; u = u_g[blockIdx.y]; Ainv = Ainv_g[blockIdx.y]; Ainv_delta = Ainv_delta_g[blockIdx.y]; Ainv_colk = Ainv_colk_g[blockIdx.y]; } __syncthreads(); T Ainv_delta_tid; __shared__ T Ainv_colk_shared[BS]; int col = blockIdx.x*BS + threadIdx.x; // Read the data back from global memory Ainv_delta_tid = Ainv_delta[col]; Ainv_colk_shared[threadIdx.x] = Ainv_colk[col]; if (col < N) A[k*rowstride + col] = u[col]; __syncthreads(); __shared__ T prefact; if (threadIdx.x == 0) prefact = -1.0f/(1.0f+Ainv_delta[k]); int numblocks = N / BS + ((N % BS) ? 1 : 0); __syncthreads(); for (int block=0; block<numblocks; block++) { Ainv_colk_shared[tid] = prefact*Ainv_colk[block*BS+threadIdx.x]; __syncthreads(); T *Ainv_row = Ainv+block*BS*rowstride + col; for (int i=0; i<BS; i++, Ainv_row+=rowstride) *Ainv_row += Ainv_delta_tid*Ainv_colk_shared[i]; __syncthreads(); } } void update_inverse_cuda(float *A_g[], float *Ainv_g[], float *u_g[], float *Ainv_delta_g[], float *Ainv_colk_g[], int N, int rowstride, int iat, int numWalkers) { const int BS1 = 64; const int BS2 = 64; int NB1 = N/BS1 + ((N%BS1) ? 1 : 0); int NB2 = N/BS2 + ((N%BS2) ? 1 : 0); dim3 dimBlock1(BS1); dim3 dimGrid1(NB1, numWalkers); dim3 dimBlock2(BS2); dim3 dimGrid2(NB2, numWalkers); update_inverse_cuda1<float,BS1><<<dimGrid1,dimBlock1>>> (A_g, Ainv_g, u_g, Ainv_delta_g, Ainv_colk_g, N, rowstride, iat); update_inverse_cuda2<float,BS2><<<dimGrid2,dimBlock2>>> (A_g, Ainv_g, u_g, Ainv_delta_g, Ainv_colk_g, N, rowstride, iat); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { fprintf (stderr, "CUDA error in update_inverse_cuda:\n %s\n", cudaGetErrorString(err)); abort(); } } void update_inverse_cuda(double *A_g[], double *Ainv_g[], double *u_g[], double *Ainv_delta_g[], double *Ainv_colk_g[], int N, int rowstride, int iat, int numWalkers) { const int BS1 = 32; const int BS2 = 32; int NB1 = N/BS1 + ((N%BS1) ? 1 : 0); int NB2 = N/BS2 + ((N%BS2) ? 1 : 0); dim3 dimBlock1(BS1); dim3 dimGrid1(NB1, numWalkers); dim3 dimBlock2(BS2); dim3 dimGrid2(NB2, numWalkers); update_inverse_cuda1<double,BS1><<<dimGrid1,dimBlock1>>> (A_g, Ainv_g, u_g, Ainv_delta_g, Ainv_colk_g, N, rowstride, iat); update_inverse_cuda2<double,BS2><<<dimGrid2,dimBlock2>>> (A_g, Ainv_g, u_g, Ainv_delta_g, Ainv_colk_g, N, rowstride, iat); } template<typename T, int BS, int MAXN> __global__ void update_inverse_transpose_cuda(T **A_g, T **AinvT_g, T **u_g, int N, int row_stride, int elec) { __shared__ float AinvT_row[MAXN], Ainv_colk[MAXN], delta[MAXN]; int numBlocks = N/blockDim.x + ((N%blockDim.x) ? 1 : 0); //int numBlocks = 4; __shared__ T *A, *AinvT, *u; if (threadIdx.x == 0) { A = A_g[blockIdx.x]; AinvT = AinvT_g[blockIdx.x]; u = u_g[blockIdx.x]; } __syncthreads(); T prefactor; __shared__ T sum[BS]; sum[threadIdx.x] = 0.0f; for (int block=0; block<numBlocks; block++) { int off = block *BS + threadIdx.x; T u1 = u[off]; delta[off] = u1 - A[elec*row_stride + off]; Ainv_colk[off] = AinvT[elec*row_stride + off]; A[elec*row_stride + off] = u1; sum[threadIdx.x] += (Ainv_colk[off] * delta[off]); } __syncthreads(); for (int s=(BS>>1); s>0; s>>=1) { __syncthreads(); if (threadIdx.x < s) sum[threadIdx.x] += sum[threadIdx.x+s]; } __syncthreads(); prefactor = -1.0f/(1.0f+sum[0]); for (int row=0; row<N; row++) { // First load row into shared memory sum[threadIdx.x] = 0.0; for (int block=0; block<numBlocks; block++) { int off = block*BS + threadIdx.x; AinvT_row[off] = AinvT[row*row_stride+off]; sum[threadIdx.x] += (AinvT_row[off] * delta[off]); } // Now sum across row to get Ainv_delta for (int s=BS>>1; s>0; s>>=1) { __syncthreads(); if (threadIdx.x < s) sum[threadIdx.x] += sum[threadIdx.x+s]; } __syncthreads(); // sum[0] now has the AinvT * delta // Add on outer product for (int block=0; block<numBlocks; block++) { int off = BS*block + threadIdx.x; AinvT[row*row_stride + off] = AinvT_row[off] + prefactor*sum[0] *Ainv_colk[off]; } __syncthreads(); } } template<typename T, int BS, int MAXN> __global__ void update_inverse_transpose_cuda_2pass(T **A_g, T **AinvT_g, T **u_g, int N, int row_stride, int elec) { __shared__ float Ainv_colk[MAXN], delta[MAXN]; int numBlocks = N/blockDim.x + ((N%blockDim.x) ? 1 : 0); //int numBlocks = 4; __shared__ T *A, *AinvT, *u; if (threadIdx.x == 0) { A = A_g[blockIdx.x]; AinvT = AinvT_g[blockIdx.x]; u = u_g[blockIdx.x]; } __syncthreads(); T prefactor; __shared__ T sum[BS]; sum[threadIdx.x] = 0.0f; for (int block=0; block<numBlocks; block++) { int off = block *BS + threadIdx.x; T u1 = u[off]; delta[off] = u1 - A[elec*row_stride + off]; Ainv_colk[off] = AinvT[elec*row_stride + off]; A[elec*row_stride + off] = u1; sum[threadIdx.x] += (Ainv_colk[off] * delta[off]); } __syncthreads(); for (int s=(BS>>1); s>0; s>>=1) { __syncthreads(); if (threadIdx.x < s && threadIdx.y == 0) sum[threadIdx.x] += sum[threadIdx.x+s]; } __syncthreads(); prefactor = -1.0f/(1.0f+sum[0]); __shared__ T sum2[BS][BS+1]; for (int b1=0; b1 < numBlocks; b1++) { sum[threadIdx.x] = 0.0f; for (int i=0; i<BS; i++) sum2[i][threadIdx.x] = 0.0f; // Compute Ainv * delta; for (int i=0; i<BS; i++) { int row = b1*BS +i; for (int b2=0; b2 < numBlocks; b2++) { int col = b2*BS + threadIdx.x; sum2[i][threadIdx.x] += AinvT[row*row_stride + col] * delta[col]; } } __syncthreads(); for (int i=0; i<BS; i++) sum[threadIdx.x] += prefactor*sum2[threadIdx.x][i]; // Outer product for (int i=0; i<BS; i++) { int row = b1*BS +i; for (int b2=0; b2 < numBlocks; b2++) { int col = b2*BS + threadIdx.x; AinvT[row*row_stride + col] += sum[i] *Ainv_colk[col]; } } } } template<typename T, int BS> __global__ void calc_ratios_transpose (T **AinvT_list, T **new_row_list, T *ratio_out, int N, int row_stride, int elec, int numMats) { __shared__ float *AinvT[BS], *new_row[BS]; int matNum = blockIdx.x*BS + threadIdx.x; if (matNum < numMats) { AinvT[threadIdx.x] = AinvT_list[matNum] + row_stride * BS; new_row[threadIdx.x] = new_row_list[matNum]; } __shared__ float AinvT_phi[BS][BS+1]; __shared__ float ratio[BS]; ratio[threadIdx.x] = 0.0; int numBlocks = N / BS; if (numBlocks*BS < N) numBlocks++; for (int block=0; block<numBlocks; block++) { int col = block*BS + threadIdx.x; // First, read the data into shared memory for (int i=0; i<BS; i++) AinvT_phi[i][threadIdx.x] = (AinvT[i])[col] * (new_row[i])[col]; __syncthreads(); // Now sum for (int i=0; i<BS; i++) ratio[threadIdx.x] += AinvT_phi[threadIdx.x][i]; } if (matNum < numMats) ratio_out[matNum] = ratio[threadIdx.x]; } template<typename T, int BS> __global__ void calc_ratios (T **Ainv_list, T **new_row_list, T *ratio, int N, int row_stride, int elec) { int tid = threadIdx.x; int col = /*blockIdx.x*BS * */tid; __shared__ T *Ainv, *new_row; if (tid == 0) { Ainv = Ainv_list[blockIdx.x]; new_row = new_row_list[blockIdx.x]; } __syncthreads(); __shared__ T new_row_shared[BS]; if (col < N) new_row_shared[tid] = new_row[tid]; __shared__ T Ainv_colk_shared[BS]; // This is *highly* uncoallesced, but we just have to eat it to allow // other kernels to operate quickly. if (col < N) Ainv_colk_shared[tid] = Ainv[col*row_stride + elec]; __syncthreads(); __shared__ T Ainv_new_row[BS]; if (col < N) Ainv_new_row[tid] = Ainv_colk_shared[tid] * new_row_shared[tid]; __syncthreads(); // Now, we have to dot for (unsigned int s=BS/2; s>0; s>>=1) { if (tid < s && (tid+s) < N) Ainv_new_row[tid] += Ainv_new_row[tid + s]; __syncthreads(); } if (tid == 0) ratio[blockIdx.x] = Ainv_new_row[0]; } void determinant_ratios_cuda (float *Ainv_list[], float *new_row_list[], float *ratios, int N, int row_stride, int iat, int numWalkers) { dim3 dimBlock(N); dim3 dimGrid(numWalkers); if (N <= 32) calc_ratios<float,32><<<dimGrid,dimBlock>>>(Ainv_list, new_row_list, ratios, N, row_stride, iat); else if (N <= 64) calc_ratios<float,64><<<dimGrid,dimBlock>>>(Ainv_list, new_row_list, ratios, N, row_stride, iat); else if (N <= 128) calc_ratios<float,128><<<dimGrid,dimBlock>>>(Ainv_list, new_row_list, ratios, N, row_stride, iat); else if (N <= 256) calc_ratios<float,256><<<dimGrid,dimBlock>>>(Ainv_list, new_row_list, ratios, N, row_stride, iat); else if (N <= 512) calc_ratios<float,512><<<dimGrid,dimBlock>>>(Ainv_list, new_row_list, ratios, N, row_stride, iat); else if (N <= 1024) calc_ratios<float,1024><<<dimGrid,dimBlock>>>(Ainv_list, new_row_list, ratios, N, row_stride, iat); else { fprintf (stdout, "Error: N too large for CUDA evaluation.\n"); abort(); } } void determinant_ratios_cuda (double *Ainv_list[], double *new_row_list[], double *ratios, int N, int row_stride, int iat, int numWalkers) { dim3 dimBlock(N); dim3 dimGrid(numWalkers); if (N <= 32) calc_ratios<double,32><<<dimGrid,dimBlock>>>(Ainv_list, new_row_list, ratios, N, row_stride, iat); else if (N <= 64) calc_ratios<double,64><<<dimGrid,dimBlock>>>(Ainv_list, new_row_list, ratios, N, row_stride, iat); else if (N <= 128) calc_ratios<double,128><<<dimGrid,dimBlock>>>(Ainv_list, new_row_list, ratios, N, row_stride, iat); else if (N <= 256) calc_ratios<double,256><<<dimGrid,dimBlock>>>(Ainv_list, new_row_list, ratios, N, row_stride, iat); else if (N <= 512) calc_ratios<double,512><<<dimGrid,dimBlock>>>(Ainv_list, new_row_list, ratios, N, row_stride, iat); else { fprintf (stdout, "Error: N too large for CUDA evaluation.\n"); abort(); } } template<typename T, int BS> __global__ void calc_ratio_grad_lapl (T **Ainv_list, T **new_row_list, T **grad_lapl_list, T *ratio_grad_lapl, int N, int row_stride, int elec) { int tid = threadIdx.x; int NB = N/BS + ((N % BS) ? 1 : 0); __shared__ T *Ainv, *new_row, *grad_lapl; if (tid == 0) { Ainv = Ainv_list[blockIdx.x]; new_row = new_row_list[blockIdx.x]; grad_lapl = grad_lapl_list[blockIdx.x]; } __syncthreads(); __shared__ T Ainv_colk_shared[BS]; __shared__ T ratio_prod[5][BS+1]; ratio_prod[0][tid] = 0.0f; ratio_prod[1][tid] = 0.0f; ratio_prod[2][tid] = 0.0f; ratio_prod[3][tid] = 0.0f; ratio_prod[4][tid] = 0.0f; // This is *highly* uncoallesced, but we just have to eat it to allow // other kernels to operate quickly. __syncthreads(); for (int block=0; block<NB; block++) { int col = block*BS + tid; if (col < N) Ainv_colk_shared[tid] = Ainv[col*row_stride + elec]; __syncthreads(); if (col < N) { ratio_prod[0][tid] += Ainv_colk_shared[tid] * new_row[col]; ratio_prod[1][tid] += Ainv_colk_shared[tid] * grad_lapl[0*row_stride+col]; ratio_prod[2][tid] += Ainv_colk_shared[tid] * grad_lapl[1*row_stride+col]; ratio_prod[3][tid] += Ainv_colk_shared[tid] * grad_lapl[2*row_stride+col]; ratio_prod[4][tid] += Ainv_colk_shared[tid] * grad_lapl[3*row_stride+col]; } __syncthreads(); } // Now, we have to sum for (unsigned int s=BS/2; s>0; s>>=1) { if (tid < s) { ratio_prod[0][tid] += ratio_prod[0][tid + s]; // Value ratio_prod[1][tid] += ratio_prod[1][tid + s]; // grad_x ratio_prod[2][tid] += ratio_prod[2][tid + s]; // grad_y ratio_prod[3][tid] += ratio_prod[3][tid + s]; // grad_z ratio_prod[4][tid] += ratio_prod[4][tid + s]; // lapl } __syncthreads(); } // Subtract off gradient^2 from laplacian if (tid == 0) { ratio_prod[4][0] -= (ratio_prod[1][0]*ratio_prod[1][0] + ratio_prod[2][0]*ratio_prod[2][0] + ratio_prod[3][0]*ratio_prod[3][0]); } __syncthreads(); // Present gradient and laplacian are w.r.t old position. Divide by // ratio to make it w.r.t. new position if (tid < 4) ratio_prod[tid+1][0] /= ratio_prod[0][0]; if (tid < 5) ratio_grad_lapl[5*blockIdx.x+tid] = ratio_prod[tid][0]; } template<typename T, int BS> __global__ void calc_ratio_grad_lapl (T **Ainv_list, T **new_row_list, T **grad_lapl_list, T *ratio_grad_lapl, int N, int row_stride, int *elec_list) { int tid = threadIdx.x; int NB = N/BS + ((N % BS) ? 1 : 0); __shared__ T *Ainv, *new_row, *grad_lapl; __shared__ int elec; if (tid == 0) { Ainv = Ainv_list[blockIdx.x]; new_row = new_row_list[blockIdx.x]; grad_lapl = grad_lapl_list[blockIdx.x]; elec = elec_list[blockIdx.x]; } __syncthreads(); __shared__ T Ainv_colk_shared[BS]; __shared__ T ratio_prod[5][BS+1]; ratio_prod[0][tid] = 0.0f; ratio_prod[1][tid] = 0.0f; ratio_prod[2][tid] = 0.0f; ratio_prod[3][tid] = 0.0f; ratio_prod[4][tid] = 0.0f; // This is *highly* uncoallesced, but we just have to eat it to allow // other kernels to operate quickly. __syncthreads(); for (int block=0; block<NB; block++) { int col = block*BS + tid; if (col < N) Ainv_colk_shared[tid] = Ainv[col*row_stride + elec]; __syncthreads(); if (col < N) { ratio_prod[0][tid] += Ainv_colk_shared[tid] * new_row[col]; ratio_prod[1][tid] += Ainv_colk_shared[tid] * grad_lapl[0*row_stride+col]; ratio_prod[2][tid] += Ainv_colk_shared[tid] * grad_lapl[1*row_stride+col]; ratio_prod[3][tid] += Ainv_colk_shared[tid] * grad_lapl[2*row_stride+col]; ratio_prod[4][tid] += Ainv_colk_shared[tid] * grad_lapl[3*row_stride+col]; } __syncthreads(); } // Now, we have to sum for (unsigned int s=BS/2; s>0; s>>=1) { if (tid < s) { ratio_prod[0][tid] += ratio_prod[0][tid + s]; // Value ratio_prod[1][tid] += ratio_prod[1][tid + s]; // grad_x ratio_prod[2][tid] += ratio_prod[2][tid + s]; // grad_y ratio_prod[3][tid] += ratio_prod[3][tid + s]; // grad_z ratio_prod[4][tid] += ratio_prod[4][tid + s]; // lapl } __syncthreads(); } // Subtract off gradient^2 from laplacian if (tid == 0) { ratio_prod[4][0] -= (ratio_prod[1][0]*ratio_prod[1][0] + ratio_prod[2][0]*ratio_prod[2][0] + ratio_prod[3][0]*ratio_prod[3][0]); } __syncthreads(); // Present gradient and laplacian are w.r.t old position. Divide by // ratio to make it w.r.t. new position if (tid < 4) ratio_prod[tid+1][0] /= ratio_prod[0][0]; if (tid < 5) ratio_grad_lapl[5*blockIdx.x+tid] = ratio_prod[tid][0]; } void determinant_ratios_grad_lapl_cuda (float *Ainv_list[], float *new_row_list[], float *grad_lapl_list[], float ratios_grad_lapl[], int N, int row_stride, int iat, int numWalkers) { const int BS = 32; dim3 dimBlock(BS); dim3 dimGrid(numWalkers); calc_ratio_grad_lapl<float,BS><<<dimGrid,dimBlock, 0, gpu::kernelStream>>> (Ainv_list, new_row_list, grad_lapl_list, ratios_grad_lapl, N, row_stride, iat); } void determinant_ratios_grad_lapl_cuda (double *Ainv_list[], double *new_row_list[], double *grad_lapl_list[], double ratios_grad_lapl[], int N, int row_stride, int iat, int numWalkers) { const int BS = 32; dim3 dimBlock(BS); dim3 dimGrid(numWalkers); calc_ratio_grad_lapl<double,BS><<<dimGrid,dimBlock, 0, gpu::kernelStream>>> (Ainv_list, new_row_list, grad_lapl_list, ratios_grad_lapl, N, row_stride, iat); } void determinant_ratios_grad_lapl_cuda (float *Ainv_list[], float *new_row_list[], float *grad_lapl_list[], float ratios_grad_lapl[], int N, int row_stride, int iat_list[], int numWalkers) { const int BS = 32; dim3 dimBlock(BS); dim3 dimGrid(numWalkers); calc_ratio_grad_lapl<float,BS><<<dimGrid,dimBlock, 0, gpu::kernelStream>>> (Ainv_list, new_row_list, grad_lapl_list, ratios_grad_lapl, N, row_stride, iat_list); } void determinant_ratios_grad_lapl_cuda (double *Ainv_list[], double *new_row_list[], double *grad_lapl_list[], double ratios_grad_lapl[], int N, int row_stride, int iat_list[], int numWalkers) { const int BS = 32; dim3 dimBlock(BS); dim3 dimGrid(numWalkers); calc_ratio_grad_lapl<double,BS><<<dimGrid,dimBlock, 0, gpu::kernelStream>>> (Ainv_list, new_row_list, grad_lapl_list, ratios_grad_lapl, N, row_stride, iat_list); } template<typename T, int BS> __global__ void calc_grad_kernel (T **Ainv_list, T **grad_lapl_list, T *grad, int N, int row_stride, int elec) { int tid = threadIdx.x; int NB = N/BS + ((N % BS) ? 1 : 0); __shared__ T *Ainv, *grad_lapl; if (tid == 0) { Ainv = Ainv_list[blockIdx.x]; grad_lapl = grad_lapl_list[blockIdx.x] + 4*elec*row_stride; } __syncthreads(); __shared__ T Ainv_colk_shared[BS]; __shared__ T ratio_prod[3][BS+1]; ratio_prod[0][tid] = 0.0f; ratio_prod[1][tid] = 0.0f; ratio_prod[2][tid] = 0.0f; // This is *highly* uncoallesced, but we just have to eat it to allow // other kernels to operate quickly. __syncthreads(); for (int block=0; block<NB; block++) { int col = block*BS + tid; if (col < N) Ainv_colk_shared[tid] = Ainv[col*row_stride + elec]; __syncthreads(); if (col < N) { ratio_prod[0][tid] += Ainv_colk_shared[tid] * grad_lapl[0*row_stride+col]; ratio_prod[1][tid] += Ainv_colk_shared[tid] * grad_lapl[1*row_stride+col]; ratio_prod[2][tid] += Ainv_colk_shared[tid] * grad_lapl[2*row_stride+col]; } __syncthreads(); } // Now, we have to sum for (unsigned int s=BS/2; s>0; s>>=1) { if (tid < s) { ratio_prod[0][tid] += ratio_prod[0][tid + s]; // grad_x ratio_prod[1][tid] += ratio_prod[1][tid + s]; // grad_y ratio_prod[2][tid] += ratio_prod[2][tid + s]; // grad_z } __syncthreads(); } if (tid < 3) grad[3*blockIdx.x+tid] = ratio_prod[tid][0]; } void calc_gradient (float *Ainv_list[], float *grad_lapl_list[], float grad[], int N, int row_stride, int elec, int numWalkers) { const int BS = 32; dim3 dimBlock(BS); dim3 dimGrid(numWalkers); calc_grad_kernel<float,BS><<<dimGrid,dimBlock, 0, gpu::kernelStream>>> (Ainv_list, grad_lapl_list, grad, N, row_stride, elec); } void calc_gradient (double *Ainv_list[], double *grad_lapl_list[], double grad[], int N, int row_stride, int elec, int numWalkers) { const int BS = 32; dim3 dimBlock(BS); dim3 dimGrid(numWalkers); calc_grad_kernel<double,BS><<<dimGrid,dimBlock, 0, gpu::kernelStream>>> (Ainv_list, grad_lapl_list, grad, N, row_stride, elec); } #define RATIO_BS 16 template<typename T> __global__ void all_ratios_kernel (T **Ainv_list, T **new_mat_list, T **ratio_list, int N, int row_stride) { __shared__ T *Ainv, *new_mat, *ratio; if (threadIdx.x == 0 && threadIdx.y == 0) { Ainv = Ainv_list[blockIdx.x]; new_mat = new_mat_list[blockIdx.x]; ratio = ratio_list[blockIdx.x]; } __shared__ float Ainv_block[RATIO_BS][RATIO_BS+1]; // __shared__ float new_block[RATIO_BS][RATIO_BS+1]; __shared__ float ratio_block[RATIO_BS][RATIO_BS+1]; unsigned int numBlocks = N >> 4; if (N & 15) numBlocks++; for (unsigned int yBlock=0; yBlock<numBlocks; yBlock++) { ratio_block[threadIdx.y][threadIdx.x] = 0.0f; __syncthreads(); for (unsigned int xBlock=0; xBlock<numBlocks; xBlock++) { unsigned int xIndex = yBlock * RATIO_BS + threadIdx.x; unsigned int yIndex = xBlock * RATIO_BS + threadIdx.y; unsigned int index = yIndex*row_stride + xIndex; if ((xIndex < N) && (yIndex < N)) Ainv_block[threadIdx.x][threadIdx.y] = Ainv[index]; __syncthreads(); xIndex = xBlock * RATIO_BS + threadIdx.x; yIndex = yBlock * RATIO_BS + threadIdx.y; index = yIndex*row_stride + xIndex; if ((xIndex < N) && (yIndex < N)) ratio_block[threadIdx.y][threadIdx.x] += new_mat[index] * Ainv_block[threadIdx.y][threadIdx.x]; __syncthreads(); } __syncthreads(); // Now, we have to do the reduction across the ratio_blocks if (threadIdx.x < 8) ratio_block[threadIdx.y][threadIdx.x] += ratio_block[threadIdx.y][threadIdx.x+8]; if (threadIdx.x < 4) ratio_block[threadIdx.y][threadIdx.x] += ratio_block[threadIdx.y][threadIdx.x+4]; if (threadIdx.x < 2) ratio_block[threadIdx.y][threadIdx.x] += ratio_block[threadIdx.y][threadIdx.x+2]; if (threadIdx.x < 1) ratio_block[threadIdx.y][threadIdx.x] += ratio_block[threadIdx.y][threadIdx.x+1]; __syncthreads(); if (threadIdx.y == 0 && (yBlock * RATIO_BS + threadIdx.x) < N) ratio[yBlock * RATIO_BS + threadIdx.x] = ratio_block[threadIdx.x][0]; } } void calc_all_ratios (float *Ainv_list[], float *new_mat_list[], float *ratio_list[], int N, int row_stride, int num_mats) { dim3 dimBlock(RATIO_BS, RATIO_BS); dim3 dimGrid (num_mats); all_ratios_kernel<float><<<dimGrid,dimBlock>>> (Ainv_list, new_mat_list, ratio_list, N, row_stride); } const int MAX_RATIO_ROWS = 20; template<typename T, int BS> __global__ void calc_many_ratios_kernel (T **Ainv_list, T **new_row_list, T **ratio_list, int *num_ratio_list, int N, int row_stride, int *elec_list) { int tid = threadIdx.x; __shared__ T *Ainv, *new_rows, *ratios; __shared__ int num_ratios, elec; if (tid == 0) { Ainv = Ainv_list[blockIdx.x]; new_rows = new_row_list[blockIdx.x]; num_ratios = num_ratio_list[blockIdx.x]; ratios = ratio_list[blockIdx.x]; elec = elec_list[blockIdx.x]; } __syncthreads(); int NB = N/BS + ((N%BS) ? 1 : 0); __shared__ T Ainv_shared[BS], row[BS]; // We use BS+1 to avoid bank conflicts in the writing. __shared__ T ratio_sum[MAX_RATIO_ROWS][BS+1]; for (int iratio=0; iratio<num_ratios; iratio++) ratio_sum[iratio][tid] = (T)0.0; __syncthreads(); for (int block=0; block<NB; block++) { int off = block*BS+tid; bool mask = off < N; if (mask) Ainv_shared[tid] = Ainv[off*row_stride+elec]; __syncthreads(); for (int iratio=0; iratio<num_ratios; iratio++) if (mask) ratio_sum[iratio][tid] += Ainv_shared[tid] * new_rows[iratio*row_stride + off]; __syncthreads(); } // now, sum up ratios for (int iratio = 0; iratio<num_ratios; iratio++) { for (int s=BS>>1; s>0; s>>=1) { if (tid < s) ratio_sum[iratio][tid] += ratio_sum[iratio][tid+s]; __syncthreads(); } } // Store sums in parallel if (tid < num_ratios) ratios[tid] = ratio_sum[tid][0]; } void calc_many_ratios (float *Ainv_list[], float *new_row_list[], float* ratio_list[], int num_ratio_list[], int N, int row_stride, int elec_list[], int numWalkers) { const int BS=32; dim3 dimBlock(BS); dim3 dimGrid (numWalkers); calc_many_ratios_kernel<float,BS><<<dimGrid,dimBlock>>> (Ainv_list, new_row_list, ratio_list, num_ratio_list, N, row_stride, elec_list); } void calc_many_ratios (double *Ainv_list[], double *new_row_list[], double* ratio_list[], int num_ratio_list[], int N, int row_stride, int elec_list[], int numWalkers) { const int BS=32; dim3 dimBlock(BS); dim3 dimGrid (numWalkers); calc_many_ratios_kernel<double,BS><<<dimGrid,dimBlock>>> (Ainv_list, new_row_list, ratio_list, num_ratio_list, N, row_stride, elec_list); } #define SCALE_BS 64 __constant__ float GGt[3][3]; template<typename T> __global__ void scale_grad_lapl_kernel (T **grad_list, T **hess_list, T **grad_lapl_list, T *Linv, int N) { __shared__ float gradBlock[3][SCALE_BS]; __shared__ float hessBlock[6][SCALE_BS]; // __shared__ float outBlock [4][SCALE_BS]; __shared__ float G[3][3], GGt[3][3]; __shared__ float *grad, *hess, *out; if (threadIdx.x == 0) { grad = grad_list[blockIdx.y]; hess = hess_list[blockIdx.y]; out = grad_lapl_list[blockIdx.y]; } int i = threadIdx.x/3; int j = threadIdx.x%3; if (threadIdx.x < 9) G[i][j] = Linv[threadIdx.x]; __syncthreads(); if (threadIdx.x < 9) { GGt[i][j] = (G[i][0] * G[0][j] + G[i][1] * G[1][j] + G[i][2] * G[2][j]); } // Load the gradients into shared memory for (int i=0; i<3; i++) { unsigned int gIndex = (3 * blockIdx.x+i) * SCALE_BS + threadIdx.x; if (gIndex < 3*N) gradBlock[i][threadIdx.x] = grad[gIndex]; } // Load the hessian into shared memory for (int i=0; i<6; i++) { unsigned int hIndex = (6 * blockIdx.x+i) * SCALE_BS + threadIdx.x; if (hIndex < 6*N) hessBlock[i][threadIdx.x] = grad[hIndex]; } // Now, loop through the rows that I own and compute the // dimensioned gradients and laplacians from the // dimensionless gradients and Hessians. int row = blockIdx.x*SCALE_BS; float val; // x component of gradient val = (G[0][0]*gradBlock[0][threadIdx.x] + G[0][1]*gradBlock[1][threadIdx.x] + G[0][2]*gradBlock[2][threadIdx.x]); out[row + 0*N + threadIdx.x] = val; // y component of gradient val = (G[1][0]*gradBlock[0][threadIdx.x] + G[1][1]*gradBlock[1][threadIdx.x] + G[1][2]*gradBlock[2][threadIdx.x]); out[row + 1*N + threadIdx.x] = val; // z component of gradient val = (G[2][0]*gradBlock[0][threadIdx.x] + G[2][1]*gradBlock[1][threadIdx.x] + G[2][2]*gradBlock[2][threadIdx.x]); out[row + 2*N + threadIdx.x] = val; // Hessian = H00 H01 H02 H11 H12 H22 // Matrix = [0 1 2] // [1 3 4] // [2 4 5] // laplacian = Trace(GGt*Hessian) val = (GGt[0][0]*hessBlock[0][threadIdx.x] + GGt[0][1]*hessBlock[1][threadIdx.x] + GGt[0][2]*hessBlock[2][threadIdx.x] + GGt[1][0]*hessBlock[1][threadIdx.x] + GGt[1][1]*hessBlock[3][threadIdx.x] + GGt[1][2]*hessBlock[4][threadIdx.x] + GGt[2][0]*hessBlock[2][threadIdx.x] + GGt[2][1]*hessBlock[4][threadIdx.x] + GGt[2][2]*hessBlock[5][threadIdx.x]); out[row + 3*N + threadIdx.x] = val; } // This function reads the vectors pointed to by grad_list and // hess_list. These are in memory as // [grad0_x grad0_y grad0_z grad1_x grad1_y ... ] and // [hess0_xx hess0_xy hess0_xy hess0_yy hess0_yz hess0_zz ...] // It the writes the data into memory as // [grad0_x grad1_x ... grad(N-1)_x grad0_y ... grad(N-1)_x lapl0 // lapl1...] void scale_grad_lapl(float *grad_list[], float *hess_list[], float *grad_lapl_list[], float Linv[], int N, int num_walkers) { dim3 dimBlock(SCALE_BS); dim3 dimGrid(N/SCALE_BS, num_walkers); if (N%SCALE_BS) dimGrid.x++; scale_grad_lapl_kernel<float><<<dimGrid,dimBlock>>> (grad_list, hess_list, grad_lapl_list, Linv, N); } template<typename T> __global__ void all_ratios_grad_lapl_kernel (T **Ainv_list, T **grad_lapl_list, T **out_list, int N, int row_stride) { __shared__ T *Ainv, *gl_array, *out; if (threadIdx.x == 0 && threadIdx.y == 0) { Ainv = Ainv_list[blockIdx.x]; gl_array = grad_lapl_list[blockIdx.x]; out = out_list[blockIdx.x]; } __syncthreads(); __shared__ float Ainv_block[RATIO_BS][RATIO_BS+1]; __shared__ float grad_lapl_block[4][RATIO_BS][RATIO_BS+1]; unsigned int numBlocks = N >> 4; if (N & 15) numBlocks++; __syncthreads(); for (unsigned int yBlock=0; yBlock<numBlocks; yBlock++) { __syncthreads(); grad_lapl_block[0][threadIdx.y][threadIdx.x] = 0.0f; grad_lapl_block[1][threadIdx.y][threadIdx.x] = 0.0f; grad_lapl_block[2][threadIdx.y][threadIdx.x] = 0.0f; grad_lapl_block[3][threadIdx.y][threadIdx.x] = 0.0f; __syncthreads(); for (unsigned int xBlock=0; xBlock<numBlocks; xBlock++) { unsigned int xIndex = yBlock * RATIO_BS + threadIdx.x; unsigned int yIndex = xBlock * RATIO_BS + threadIdx.y; unsigned int index = yIndex*row_stride + xIndex; if ((xIndex < N) && (yIndex < N)) Ainv_block[threadIdx.x][threadIdx.y] = Ainv[index]; __syncthreads(); xIndex = xBlock * RATIO_BS + threadIdx.x; yIndex = yBlock * RATIO_BS + threadIdx.y; index = 4*yIndex*row_stride + xIndex; __syncthreads(); if ((xIndex < N) && (yIndex < N)) { grad_lapl_block[0][threadIdx.y][threadIdx.x] += gl_array[index+0*row_stride] * Ainv_block[threadIdx.y][threadIdx.x]; grad_lapl_block[1][threadIdx.y][threadIdx.x] += gl_array[index+1*row_stride] * Ainv_block[threadIdx.y][threadIdx.x]; grad_lapl_block[2][threadIdx.y][threadIdx.x] += gl_array[index+2*row_stride] * Ainv_block[threadIdx.y][threadIdx.x]; grad_lapl_block[3][threadIdx.y][threadIdx.x] += gl_array[index+3*row_stride] * Ainv_block[threadIdx.y][threadIdx.x]; } __syncthreads(); } // Now, we have to do the reduction across the lapl_blocks if (threadIdx.x < 8) { grad_lapl_block[0][threadIdx.y][threadIdx.x] += grad_lapl_block[0][threadIdx.y][threadIdx.x+8]; grad_lapl_block[1][threadIdx.y][threadIdx.x] += grad_lapl_block[1][threadIdx.y][threadIdx.x+8]; grad_lapl_block[2][threadIdx.y][threadIdx.x] += grad_lapl_block[2][threadIdx.y][threadIdx.x+8]; grad_lapl_block[3][threadIdx.y][threadIdx.x] += grad_lapl_block[3][threadIdx.y][threadIdx.x+8]; } __syncthreads(); if (threadIdx.x < 4) { grad_lapl_block[0][threadIdx.y][threadIdx.x] += grad_lapl_block[0][threadIdx.y][threadIdx.x+4]; grad_lapl_block[1][threadIdx.y][threadIdx.x] += grad_lapl_block[1][threadIdx.y][threadIdx.x+4]; grad_lapl_block[2][threadIdx.y][threadIdx.x] += grad_lapl_block[2][threadIdx.y][threadIdx.x+4]; grad_lapl_block[3][threadIdx.y][threadIdx.x] += grad_lapl_block[3][threadIdx.y][threadIdx.x+4]; } __syncthreads(); if (threadIdx.x < 2) { grad_lapl_block[0][threadIdx.y][threadIdx.x] += grad_lapl_block[0][threadIdx.y][threadIdx.x+2]; grad_lapl_block[1][threadIdx.y][threadIdx.x] += grad_lapl_block[1][threadIdx.y][threadIdx.x+2]; grad_lapl_block[2][threadIdx.y][threadIdx.x] += grad_lapl_block[2][threadIdx.y][threadIdx.x+2]; grad_lapl_block[3][threadIdx.y][threadIdx.x] += grad_lapl_block[3][threadIdx.y][threadIdx.x+2]; } __syncthreads(); if (threadIdx.x < 1) { grad_lapl_block[0][threadIdx.y][threadIdx.x] += grad_lapl_block[0][threadIdx.y][threadIdx.x+1]; grad_lapl_block[1][threadIdx.y][threadIdx.x] += grad_lapl_block[1][threadIdx.y][threadIdx.x+1]; grad_lapl_block[2][threadIdx.y][threadIdx.x] += grad_lapl_block[2][threadIdx.y][threadIdx.x+1]; grad_lapl_block[3][threadIdx.y][threadIdx.x] += grad_lapl_block[3][threadIdx.y][threadIdx.x+1]; } __syncthreads(); // unsigned int yIndex = yBlock * RATIO_BS + threadIdx.x; // if (threadIdx.y == 0 && yIndex < N) { // out[4*yIndex+0] = grad_lapl_block[0][threadIdx.x][0]; // out[4*yIndex+1] = grad_lapl_block[1][threadIdx.x][0]; // out[4*yIndex+2] = grad_lapl_block[2][threadIdx.x][0]; // out[4*yIndex+3] = grad_lapl_block[3][threadIdx.x][0]; // } //unsigned int yIndex = 4*yBlock*RATIO_BS + 4*threadIdx.y + threadIdx.x; unsigned int ix = 16*threadIdx.y + threadIdx.x; unsigned int yIndex = RATIO_BS * yBlock + (ix >> 2); if (ix < 64 && yIndex < N) out[64*yBlock + ix] = grad_lapl_block[ix&3][ix>>2][0]; // IMPORTANT!!! __syncthreads(); } } void calc_grad_lapl (float *Ainv_list[], float *grad_lapl_list[], float *out_list[], int N, int row_stride, int num_mats) { dim3 dimBlock(RATIO_BS, RATIO_BS); dim3 dimGrid (num_mats); all_ratios_grad_lapl_kernel<float><<<dimGrid,dimBlock>>> (Ainv_list, grad_lapl_list, out_list, N, row_stride); } void calc_grad_lapl (double *Ainv_list[], double *grad_lapl_list[], double *out_list[], int N, int row_stride, int num_mats) { dim3 dimBlock(RATIO_BS, RATIO_BS); dim3 dimGrid (num_mats); all_ratios_grad_lapl_kernel<double><<<dimGrid,dimBlock>>> (Ainv_list, grad_lapl_list, out_list, N, row_stride); } #define COPY_BS 256 template<typename T> __global__ void multi_copy (T **dest, T **src, int len) { __shared__ T *mysrc, *mydest; if (threadIdx.x ==0) { mysrc = src[blockIdx.y]; mydest = dest[blockIdx.y]; } __syncthreads(); int i = blockIdx.x * COPY_BS + threadIdx.x; if (i < len) mydest[i] = mysrc[i]; } template<typename T> __global__ void multi_copy (T **buff, int dest_off, int src_off, int len) { __shared__ T *mysrc, *mydest; if (threadIdx.x ==0) { T* ptr = buff[blockIdx.y]; mysrc = ptr + src_off; mydest = ptr + dest_off; } __syncthreads(); int i = blockIdx.x * COPY_BS + threadIdx.x; if (i < len) mydest[i] = mysrc[i]; } void multi_copy (float *dest[], float *src[], int len, int num) { dim3 dimBlock(COPY_BS); dim3 dimGrid ((len+COPY_BS-1)/COPY_BS, num); multi_copy<float><<<dimGrid,dimBlock>>>(dest, src, len); } void multi_copy (double *dest[], double *src[], int len, int num) { dim3 dimBlock(COPY_BS); dim3 dimGrid (len/COPY_BS, num); if (len % COPY_BS) dimGrid.x++; multi_copy<double><<<dimGrid,dimBlock>>>(dest, src, len); } void multi_copy (float *buff[], int dest_off, int src_off, int len, int num) { dim3 dimBlock(COPY_BS); dim3 dimGrid ((len+COPY_BS-1)/COPY_BS, num); multi_copy<float><<<dimGrid,dimBlock>>>(buff, dest_off, src_off, len); } void multi_copy (double *buff[], int dest_off, int src_off, int len, int num) { dim3 dimBlock(COPY_BS); dim3 dimGrid (len/COPY_BS, num); if (len % COPY_BS) dimGrid.x++; multi_copy<double><<<dimGrid,dimBlock>>>(buff, dest_off, src_off, len); } #include <stdlib.h> #include <time.h> void test_all_ratios_kernel() { int N = 128; float *A, *A_d, *Ainv, *Ainv_d, *ratio, *ratio_d; cudaMalloc ((void**)&A_d, N*N*sizeof(float)); cudaMalloc ((void**)&Ainv_d, N*N*sizeof(float)); cudaMalloc ((void**)&ratio_d, 1*N*sizeof(float)); A = (float *)malloc (N*N*sizeof(float)); Ainv = (float *)malloc (N*N*sizeof(float)); ratio = (float *)malloc (1*N*sizeof(float)); float ratio2[N]; for (int i=0; i<N; i++) for (int j=0; j<N; j++) { A[i*N+j] = 1.0f+drand48(); Ainv[i*N+j] = 1.0f+drand48(); } cudaMemcpyAsync (A_d, A, N*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyAsync (Ainv_d, Ainv, N*N*sizeof(float), cudaMemcpyHostToDevice); float **A_list, **A_list_d, **Ainv_list, **Ainv_list_d, **ratio_list, **ratio_list_d; int numMats = 2000; cudaMalloc ((void**)&A_list_d, numMats*sizeof(float*)); cudaMalloc ((void**)&Ainv_list_d, numMats*sizeof(float*)); cudaMalloc ((void**)&ratio_list_d, numMats*sizeof(float*)); A_list = (float **)malloc (numMats*sizeof(float*)); Ainv_list = (float **)malloc (numMats*sizeof(float*)); ratio_list = (float **)malloc (numMats*sizeof(float*)); for (int i=0; i<numMats; i++) { A_list[i] = A_d; Ainv_list[i] = Ainv_d; ratio_list[i] = ratio_d; } cudaMemcpyAsync (A_list_d, A_list, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpyAsync (Ainv_list_d, Ainv_list, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpyAsync (ratio_list_d, ratio_list, numMats*sizeof(float*), cudaMemcpyHostToDevice); clock_t start = clock(); for (int i=0; i<1000; i++) calc_all_ratios (Ainv_list_d, A_list_d, ratio_list_d, N, N, numMats); clock_t end = clock(); double time = (double)(end-start)/(double)CLOCKS_PER_SEC; fprintf (stderr, "start = %d\n", start); fprintf (stderr, "end = %d\n", end); double rate = 1000.0/time; fprintf (stderr, "Rate = %1.2f generations per second.\n", rate); cudaMemcpy (ratio, ratio_d, N*sizeof(float), cudaMemcpyDeviceToHost); // for (int i=0; i<N; i++) { // ratio2[i] = 0.0f; // for (int j=0; j<N; j++) // ratio2[i] += A[i*N+j]*Ainv[j*N+i]; // fprintf (stderr, "%3d %10.6f %10.6f\n", i, ratio2[i], ratio[i]); // } } void test_all_grad_lapl_kernel() { int N = 128; float *A, *A_d, *Ainv, *Ainv_d, *ratio, *ratio_d; cudaMalloc ((void**)&A_d, 4*N*N*sizeof(float)); cudaMalloc ((void**)&Ainv_d, N*N*sizeof(float)); cudaMalloc ((void**)&ratio_d, 4*N*sizeof(float)); A = (float *)malloc (4*N*N*sizeof(float)); Ainv = (float *)malloc (1*N*N*sizeof(float)); ratio = (float *)malloc (4*N*sizeof(float)); float ratio2[4*N]; for (int i=0; i<N; i++) for (int j=0; j<N; j++) { Ainv[i*N+j] = 1.0f+drand48(); for (int k=0; k<4; k++) A[4*(i*N+j)+k] = 1.0f+drand48(); } cudaMemcpyAsync (A_d, A, 4*N*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyAsync (Ainv_d, Ainv, 1*N*N*sizeof(float), cudaMemcpyHostToDevice); float **A_list, **A_list_d, **Ainv_list, **Ainv_list_d, **ratio_list, **ratio_list_d; int numMats = 2000; cudaMalloc ((void**)&A_list_d, numMats*sizeof(float*)); cudaMalloc ((void**)&Ainv_list_d, numMats*sizeof(float*)); cudaMalloc ((void**)&ratio_list_d, numMats*sizeof(float*)); A_list = (float **)malloc (numMats*sizeof(float*)); Ainv_list = (float **)malloc (numMats*sizeof(float*)); ratio_list = (float **)malloc (numMats*sizeof(float*)); for (int i=0; i<numMats; i++) { A_list[i] = A_d; Ainv_list[i] = Ainv_d; ratio_list[i] = ratio_d; } cudaMemcpyAsync (A_list_d, A_list, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpyAsync (Ainv_list_d, Ainv_list, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpyAsync (ratio_list_d, ratio_list, numMats*sizeof(float*), cudaMemcpyHostToDevice); struct timeval tstart, tend; gettimeofday(&tstart, NULL); for (int i=0; i<1; i++) calc_grad_lapl (Ainv_list_d, A_list_d, ratio_list_d, N, N, numMats); cudaMemcpy (ratio, ratio_d, 4*N*sizeof(float), cudaMemcpyDeviceToHost); gettimeofday(&tend, NULL); double start = (double)tstart.tv_sec + 1.0e-6 * (double)tstart.tv_usec; double end = (double)tend.tv_sec + 1.0e-6 * (double)tend.tv_usec; fprintf (stderr, "start = %f\n", start); fprintf (stderr, "end = %f\n", end); double rate = 100.0/(end-start); fprintf (stderr, "Rate = %1.2f generations per second.\n", rate); for (int i=0; i<N; i++) { for (int k=0; k<4; k++) ratio2[4*i+k] = 0.0f; for (int j=0; j<N; j++) for (int k=0; k<4; k++) ratio2[4*i+k] += A[(4*i+k)*N+j]*Ainv[j*N+i]; for (int k=0; k<4; k++) fprintf (stderr, "%3d %10.6f %10.6f\n", 4*i+k, ratio2[4*i+k], ratio[4*i+k]); } } template<typename T> __global__ void woodbury_update_16 (T** Ainv_trans, T** delta, T** Ainv_delta, int N, int rowstride) { T *myAinv, *mydelta, *myAinv_delta; int tid = threadIdx.x; myAinv = Ainv_trans[blockIdx.y]; myAinv_delta = Ainv_delta[blockIdx.y]; mydelta = delta[blockIdx.y]; int first_row = blockIdx.x*16; __shared__ T Ainv_s[16][17], delta_s[2][17], Ainv_delta_s[16][17]; int nb = (N+15)/16; for (int row=0; row<16; row++) if (tid < 16) Ainv_delta_s[row][tid] = 0.0f; __syncthreads(); int col = tid & 15; for (int block=0; block<nb; block++) { int nend = N - block*16; int c = block*16+tid; if (tid < 16) for (int row=0; row<16; row++) { Ainv_s[row][col] = myAinv[(first_row+row)*rowstride+c]; } __syncthreads(); for (int irow=0; irow<8; irow++) { int odd = tid>15; int row = 2*irow + odd; delta_s[odd][tid] = mydelta[row*rowstride+c]; if (row+first_row < N && col < nend) for (int k=0; k<16; k++) Ainv_delta_s[row][col] += Ainv_s[col][k] * delta_s[odd][k]; } __syncthreads(); } int mycol = blockIdx.x*16+tid; if (tid < 16 && mycol < N) for (int row=0; row<16; row++) myAinv_delta[row*rowstride+mycol] = Ainv_delta_s[row][tid]; } // Require 64 threads per block template<typename T> __device__ inline void block_inverse_16(T A[16][17]) { int tid = threadIdx.x; __shared__ T Acolk[16]; // if (blockIdx.y == 0 && tid == 0) { // printf ("Ablock:\n"); // for (int i=0; i<16; i++) { // for (int j=0; j<16; j++) // printf ("%14.8e ", A[i][j]); // printf ("\n"); // } // } for (int k=0; k<16; k++) { T pivotInv = 1.0f/A[k][k]; if (tid < 16) { T tmp = (tid==k) ? 0.0f : -pivotInv*A[tid][k]; A[tid][k] = tmp; Acolk[tid] = tmp; } __syncthreads(); int row= tid >> 4; int col = tid & 0x0f; T Arowk = A[k][col]; A[row+ 0][col] += Arowk*Acolk[row+ 0]; A[row+ 4][col] += Arowk*Acolk[row+ 4]; A[row+ 8][col] += Arowk*Acolk[row+ 8]; A[row+12][col] += Arowk*Acolk[row+12]; __syncthreads(); if (tid < 16) { if (tid == k) A[k][tid] = pivotInv; else A[k][tid] *= pivotInv; } __syncthreads(); } // if (blockIdx.y == 0 && tid == 0) { // printf ("Ainvblock:\n"); // for (int i=0; i<16; i++) { // for (int j=0; j<16; j++) // printf ("%14.8e ", A[i][j]); // printf ("\n"); // } // } } // This routine performs the first half of a Woodbury formula update // for updating 16 rows at a time of the A matrix. It assumes that // the inverse matrix is stored in transposed form. This kernel // computes transpose(Ainv) * delta, where delta is 16xN matrix of the // changes in A. kblock indicates which block of 16 rows has been // changed. Each blockIdx.x computes a different 16x16 block for the // product. template<typename T> __global__ void woodbury_update_16a (T** Ainv_trans, T** delta, T** Ainv_delta, T** inv_block, int N, int rowstride, int kblock) { T *myAinv, *mydelta, *myAinv_delta, *myinvblock; int tid = threadIdx.x; myAinv = Ainv_trans[blockIdx.y]; myAinv_delta = Ainv_delta[blockIdx.y]; mydelta = delta[blockIdx.y]; myinvblock = inv_block[blockIdx.y]; int first_row = blockIdx.x*32; __shared__ T Ainv1[16][17], Ainv2[16][17], delta_s[4][17], Ainv_delta1[16][17], Ainv_delta2[16][17]; int nb = (N+15)/16; int row = tid >> 4; int col = tid & 0x0f; Ainv_delta1[row+ 0][col] = Ainv_delta2[row+ 0][col] = 0.0f; Ainv_delta1[row+ 4][col] = Ainv_delta2[row+ 4][col] = 0.0f; Ainv_delta1[row+ 8][col] = Ainv_delta2[row+ 8][col] = 0.0f; Ainv_delta1[row+12][col] = Ainv_delta2[row+12][col] = 0.0f; __syncthreads(); for (int block=0; block<nb; block++) { int c = block*16+ col; int row = tid >> 4; for (int irow=0; irow<4; irow++,row+=4) { Ainv1[row][col] = myAinv[(first_row+row )*rowstride+c]; Ainv2[row][col] = myAinv[(first_row+row+16)*rowstride+c]; } __syncthreads(); row = tid >> 4; int row2 = row; for (int irow=0; irow<4; irow++, row+=4) { delta_s[row2][col] = mydelta[row*rowstride+c]; T mysum1 = Ainv_delta1[row][col]; T mysum2 = Ainv_delta2[row][col]; if (row+first_row < N && c < N) for (int k=0; k<16; k++) { mysum1 += Ainv1[col][k] * delta_s[row2][k]; mysum2 += Ainv2[col][k] * delta_s[row2][k]; } Ainv_delta1[row][col] = mysum1; Ainv_delta2[row][col] = mysum2; } __syncthreads(); } int mycol = blockIdx.x*32+col; row = tid >> 4; if (mycol < N) { for (int irow=0; irow<4; irow++,row+=4) { myAinv_delta[row*rowstride+mycol ] = Ainv_delta1[row][col]; myAinv_delta[row*rowstride+mycol+16] = Ainv_delta2[row][col]; } } __syncthreads(); row = tid >> 4; col = tid & 0x0f; if (2*blockIdx.x+1 == kblock) { if (tid < 16) Ainv_delta2[tid][tid] += 1.0f; __syncthreads(); block_inverse_16<T> (Ainv_delta2); for (int irow=0; irow<4; irow++,row+=4) myinvblock[row*16+col] = Ainv_delta2[row][col]; } if (2*blockIdx.x == kblock) { if (tid < 16) Ainv_delta1[tid][tid] += 1.0f; __syncthreads(); block_inverse_16<T> (Ainv_delta1); for (int irow=0; irow<4; irow++,row+=4) myinvblock[row*16+col] = Ainv_delta1[row][col]; } } template<typename T> __global__ void woodbury_update_16b (T** Ainv_trans, T** delta, T** Ainv_delta, T** inv_block, int N, int rowstride, int kblock) { int tid = threadIdx.x; __shared__ T B1[16][17], B2[16][17], B3[16][17], B4[16][17]; __shared__ T *myAinv, *myAinv_delta, *myinv_block; if (tid == 0) { myAinv = Ainv_trans[blockIdx.y]; myAinv_delta = Ainv_delta[blockIdx.y]; myinv_block = inv_block[blockIdx.y]; } __syncthreads(); int row = tid >> 4; int col = tid & 0x0f; int c = blockIdx.x*32+col; for (int i=0; i<4; i++, row+=4) { B1[row][col] = myinv_block[16*row+col]; B2[row][col] = myAinv[(16*kblock+row)*rowstride + c]; } __syncthreads(); row = tid >> 4; // Now, multiply Ainv block by inv_block for (int i=0; i<4; i++, row+=4) { T mysum = 0.0f; for (int j=0; j<16; j++) mysum += B2[j][row] * B1[j][col]; B3[row][col] = mysum; } __syncthreads(); row = tid >> 4; for (int i=0; i<4; i++, row+=4) B2[row][col] = myAinv[(16*kblock+row)*rowstride + c + 16]; __syncthreads(); row = tid >> 4; // Now, multiply Ainv block by inv_block for (int i=0; i<4; i++, row+=4) { T mysum = 0.0f; for (int j=0; j<16; j++) mysum += B2[j][row] * B1[j][col]; B4[row][col] = mysum; } // Now do outer product int nb = (N+15)>>4; for (int block=0; block<nb; block++) { row = tid >> 4; col = tid & 0x0f; for (int i=0; i<4; i++, row+=4) B1[row][col] = myAinv_delta[row*rowstride+col+16*block]; __syncthreads(); row = tid >> 4; col = tid & 0x0f; for (int irow=0; irow<4; irow++,row+=4) { T mysum3 = myAinv[(16*block+row)*rowstride + c]; T mysum4 = myAinv[(16*block+row)*rowstride + c + 16]; for (int k=0; k<16; k++) { mysum3 -= B3[col][k] * B1[k][row]; mysum4 -= B4[col][k] * B1[k][row]; } myAinv[(16*block+row)*rowstride + c ] = mysum3; myAinv[(16*block+row)*rowstride + c + 16] = mysum4; } } } template<typename T> __global__ void woodbury_update_32 (T** Ainv_trans, T** delta, T** Ainv_delta, int N, int rowstride) { T *myAinv, *mydelta, *myAinv_delta; int tid = threadIdx.x; myAinv = Ainv_trans[blockIdx.y]; myAinv_delta = Ainv_delta[blockIdx.y]; mydelta = delta[blockIdx.y]; int first_row = blockIdx.x*32; __shared__ T Ainv_s[32][33], delta_s[32][33], Ainv_delta_s[32][33]; int nb = (N+31)/32; for (int row=0; row<32; row++) if (tid < 32) Ainv_delta_s[row][tid] = 0.0f; __syncthreads(); int col = tid; for (int block=0; block<nb; block++) { int nend = N - block*32; int c = block*32+tid; for (int row=0; row<32; row++) { Ainv_s[row][tid] = myAinv[(first_row+row)*rowstride+c]; delta_s[row][tid] = mydelta[row*rowstride+c]; } __syncthreads(); for (int row=0; row<32; row++) { if (row+first_row < N && col < nend) for (int k=0; k<32; k++) Ainv_delta_s[row][col] += Ainv_s[row][k] * delta_s[col][k]; } __syncthreads(); } int mycol = blockIdx.x*32+tid; if (mycol < N) for (int row=0; row<32; row++) myAinv_delta[row*rowstride+mycol] = Ainv_delta_s[row][tid]; } #ifdef CUDA_TEST_MAIN // Replaces A with its inverse by gauss-jordan elimination with full pivoting // Adapted from Numerical Recipes in C void GJInverse (double *A, int n) { const int maxSize = 2000; if (n == 2) // Special case for 2x2 { double a=A[0]; double b=A[1]; double c=A[2]; double d=A[3]; double detInv = 1.0/(a*d-b*c); A[0] = d*detInv; A[1] = -b*detInv; A[2] = -c*detInv; A[3] = a*detInv; return; } int colIndex[maxSize], rowIndex[maxSize], ipiv[maxSize]; double big, pivInv; int icol, irow; for (int j=0; j<n; j++) ipiv[j] = -1; for (int i=0; i<n; i++) { big = 0.0; for (int j=0; j<n; j++) if (ipiv[j] != 0) for (int k=0; k<n; k++) { if (ipiv[k] == -1) { if (fabs(A[n*j+k]) >= big) { big = fabs(A[n*j+k]); irow = j; icol = k; } } else if (ipiv[k] > 0) { fprintf (stderr, "GJInverse: Singular matrix!\n"); exit(1); } } ++(ipiv[icol]); if (irow != icol) for (int l=0; l<n; l++) { double tmp = A[n*irow+l]; A[n*irow+l] = A[n*icol+l]; A[n*icol+l] = tmp; // swap (A[n*irow+l], A[n*icol+l]); } rowIndex[i] = irow; colIndex[i] = icol; if (A[n*icol+icol] == 0.0) { fprintf (stderr, "GJInverse: Singular matrix!\n"); exit(1); } pivInv = 1.0/A[n*icol+icol]; A[n*icol+icol] = 1.0; for (int l=0; l<n; l++) A[n*icol+l] *= pivInv; for (int ll=0; ll<n; ll++) if (ll != icol) { double dum = A[n*ll+icol]; A[n*ll+icol] = 0.0; for (int l=0; l<n; l++) A[n*ll+l] -= A[n*icol+l]*dum; } } // Now unscramble the permutations for (int l=n-1; l>=0; l--) { if (rowIndex[l] != colIndex[l]) for (int k=0; k<n ; k++) { double tmp = A[n*k+rowIndex[l]]; A[n*k+rowIndex[l]] = A[n*k+colIndex[l]]; A[n*k+colIndex[l]] = tmp; // swap (A(k,rowIndex[l]),A(k, colIndex[l])); } } } #include <omp.h> #define MAT_SIZE 256 #define NUM_MATS 512 void test_update() { int const N = MAT_SIZE; double *A, *Ainv; int numMats = NUM_MATS; float *A_h, *Ainv_h, *u_h; float *Ainv_d, *Ainv_u_d, *Ainv_colk_d, *u_d; A = (double*)malloc (N*N*sizeof(double)); Ainv = (double*)malloc (N*N*sizeof(double)); Ainv_h = (float*) malloc (N*N*sizeof(float)); A_h = (float*) malloc (N*N*sizeof(float)); u_h = (float*) malloc (N*sizeof(float)); cudaMalloc((void**)&Ainv_d, N*N*sizeof(float)); cudaMalloc((void**)&Ainv_d, N*N*sizeof(float)); cudaMalloc((void**)&u_d, N*sizeof(float)); cudaMalloc((void**)&Ainv_u_d, N*sizeof(float)); cudaMalloc((void**)&Ainv_colk_d, N*sizeof(float)); float **AinvList, **Ainv_uList, **AList, **Ainv_colkList, **uList; AList = (float**)malloc(NUM_MATS*sizeof(float*)); AinvList = (float**)malloc(NUM_MATS*sizeof(float*)); Ainv_uList = (float**)malloc(NUM_MATS*sizeof(float*)); Ainv_colkList = (float**)malloc(NUM_MATS*sizeof(float*)); uList = (float**)malloc(NUM_MATS*sizeof(float*)); float **AList_d, **AinvList_d, **Ainv_uList_d, **Ainv_colkList_d, **uList_d; cudaMalloc((void**)&AList_d, numMats*sizeof(float*)); cudaMalloc((void**)&AinvList_d, numMats*sizeof(float*)); cudaMalloc((void**)&Ainv_uList_d, numMats*sizeof(float*)); cudaMalloc((void**)&Ainv_colkList_d, numMats*sizeof(float*)); cudaMalloc((void**)&uList_d, numMats*sizeof(float*)); for (int mat=0; mat<numMats; mat++) { cudaMalloc((void**)&(AList[mat]) , N*N*sizeof(float)+1000); cudaMalloc((void**)&(AinvList[mat]) , N*N*sizeof(float)+1000); cudaMalloc((void**)&(Ainv_uList[mat]) , N*sizeof(float)+1000); cudaMalloc((void**)&(Ainv_colkList[mat]), N*sizeof(float)+1000); cudaMalloc((void**)&(uList[mat]) , N*sizeof(float)+1000); } cudaMemcpyAsync (AList_d, AList, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpyAsync (AinvList_d, AinvList, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpyAsync (Ainv_uList_d, Ainv_uList, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpyAsync (Ainv_colkList_d, Ainv_colkList, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpyAsync (uList_d, uList, numMats*sizeof(float*), cudaMemcpyHostToDevice); srand48((long int) 12341313); int row = 0; for (int mat=0; mat<numMats; mat++) { if (mat == 0 ) { for (int i=0; i<N; i++) { u_h[i] = drand48(); for (int j=0; j<N; j++) A[i*N+j] = Ainv[i*N+j] = A_h[i*N+j] = drand48(); } // for (int i=0; i<N; i++) // u_h[i] = A_h[row*N+i]; GJInverse(Ainv, N); for (int i=0; i<N; i++) for (int j=0; j<N; j++) Ainv_h[i*N+j] = (float)Ainv[i*N+j]; } // for (int i=0; i<N; i++) // u_h[i] = A_h[row*N+i]; cudaMemcpyAsync (AList[mat], A_h, N*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyAsync (AinvList[mat], Ainv_h, N*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyAsync (uList[mat], u_h, N*sizeof(float), cudaMemcpyHostToDevice); } dim3 dimBlock2(64); dim3 dimGrid2((N+63)/64, NUM_MATS); double start = omp_get_wtime(); for (int i=0; i<100; i++) { update_inverse_cuda1<float,64><<<dimGrid2,dimBlock2>>> (AList_d, AinvList_d, uList_d, Ainv_uList_d, Ainv_colkList_d, N, N, row); update_inverse_cuda2<float,64><<<dimGrid2,dimBlock2>>> (AList_d, AinvList_d, uList_d, Ainv_uList_d, Ainv_colkList_d, N, N, row); } cudaThreadSynchronize(); double end = omp_get_wtime(); fprintf (stderr, "Rate = %12.8f updates per second.\n", (double)(100*NUM_MATS)/(end - start)); cudaMemcpy (Ainv_h, AinvList[0], N*N*sizeof(float),cudaMemcpyDeviceToHost); /* for (int j=0; j<16; j++) for (int i=0; i<N; i++) A[(row+j)*N+i] += delta_h[j*N+i]; for (int i=0; i<N; i++) for (int j=0; j<N; j++) { double ident = 0.0; for (int k=0; k<N; k++) ident += Ainv_h[i*N+k]*A[k*N+j]; if ((i==j && fabs(ident - 1.0) > 1.0e-4) || (i!=j && fabs(ident) > 1.0e-4)) fprintf (stderr, "Error in matrix inverse, (%d, %d) = %1.8f\n", i, j, ident); }*/ fprintf (stderr, "Finished.\n"); } void test_update_transpose() { const int N = MAT_SIZE; double *A, *Ainv; int numMats = NUM_MATS; float *A_h, *Ainv_h, *u_h; float *Ainv_d, *Ainv_u_d, *Ainv_colk_d, *u_d; A = (double*)malloc (N*N*sizeof(double)); Ainv = (double*)malloc (N*N*sizeof(double)); Ainv_h = (float*) malloc (N*N*sizeof(float)); A_h = (float*) malloc (N*N*sizeof(float)); u_h = (float*) malloc (N*sizeof(float)); cudaMalloc((void**)&Ainv_d, N*N*sizeof(float)); cudaMalloc((void**)&Ainv_d, N*N*sizeof(float)); cudaMalloc((void**)&u_d, N*sizeof(float)); cudaMalloc((void**)&Ainv_u_d, N*sizeof(float)); cudaMalloc((void**)&Ainv_colk_d, N*sizeof(float)); float **AinvList, **Ainv_uList, **AList, **Ainv_colkList, **uList; AList = (float**)malloc(NUM_MATS*sizeof(float*)); AinvList = (float**)malloc(NUM_MATS*sizeof(float*)); Ainv_uList = (float**)malloc(NUM_MATS*sizeof(float*)); Ainv_colkList = (float**)malloc(NUM_MATS*sizeof(float*)); uList = (float**)malloc(NUM_MATS*sizeof(float*)); float **AList_d, **AinvList_d, **Ainv_uList_d, **Ainv_colkList_d, **uList_d; cudaMalloc((void**)&AList_d, numMats*sizeof(float*)); cudaMalloc((void**)&AinvList_d, numMats*sizeof(float*)); cudaMalloc((void**)&Ainv_uList_d, numMats*sizeof(float*)); cudaMalloc((void**)&Ainv_colkList_d, numMats*sizeof(float*)); cudaMalloc((void**)&uList_d, numMats*sizeof(float*)); for (int mat=0; mat<numMats; mat++) { cudaMalloc((void**)&(AList[mat]) , N*N*sizeof(float)); cudaMalloc((void**)&(AinvList[mat]) , N*N*sizeof(float)); cudaMalloc((void**)&(Ainv_uList[mat]) , N*sizeof(float)); cudaMalloc((void**)&(Ainv_colkList[mat]), N*sizeof(float)); cudaMalloc((void**)&(uList[mat]) , N*sizeof(float)); } cudaMemcpyAsync (AList_d, AList, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpyAsync (AinvList_d, AinvList, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpyAsync (Ainv_uList_d, Ainv_uList, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpyAsync (Ainv_colkList_d, Ainv_colkList, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpyAsync (uList_d, uList, numMats*sizeof(float*), cudaMemcpyHostToDevice); srand48((long int) 12341313); int row = 1; for (int mat=0; mat<numMats; mat++) { if (mat == 0 ) { for (int i=0; i<N; i++) { for (int j=0; j<N; j++) A[i*N+j] = Ainv[i*N+j] = A_h[i*N+j] = drand48(); // u_h[i] = drand48(); } for (int j=0; j<N; j++) u_h[j] = drand48();//A[N*row+j]; GJInverse(Ainv, N); for (int i=0; i<N; i++) for (int j=0; j<N; j++) Ainv_h[j*N+i] = (float)Ainv[i*N+j]; } // for (int i=0; i<N; i++) // u_h[i] = A_h[row*N+i]; cudaMemcpyAsync (AList[mat], A_h, N*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyAsync (AinvList[mat], Ainv_h, N*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyAsync (uList[mat], u_h, N*sizeof(float), cudaMemcpyHostToDevice); } dim3 dimBlock(DET_BLOCK_SIZE); dim3 dimGrid(NUM_MATS); clock_t start = clock(); for (int i=0; i<1000; i++) { update_inverse_transpose_cuda<float,DET_BLOCK_SIZE,N><<<dimGrid,dimBlock>>> (AList_d, AinvList_d, uList_d, N, N, row); // update_inverse_transpose_cuda_2pass<float,DET_BLOCK_SIZE,N><<<dimGrid,dimBlock>>> // (AList_d, AinvList_d, uList_d, N, N, row); } cudaThreadSynchronize(); clock_t end = clock(); fprintf (stderr, "Rate = %12.8f updates per second.\n", (double)(1000*NUM_MATS)/((double)(end - start)/(double)CLOCKS_PER_SEC)); cudaMemcpy (Ainv_h, AinvList[1], N*N*sizeof(float),cudaMemcpyDeviceToHost); for (int i=0; i<N; i++) A[row*N+i] = u_h[i]; for (int i=0; i<N; i++) for (int j=0; j<N; j++) { double ident = 0.0; for (int k=0; k<N; k++) ident += Ainv_h[k*N+i]*A[k*N+j]; if ((i==j && fabs(ident - 1.0) > 1.0e-4) || (i!=j && fabs(ident) > 1.0e-4)) fprintf (stderr, "Error in matrix inverse, (%d, %d) = %1.8f\n", i, j, ident); } fprintf (stderr, "Finished.\n"); } void test_woodbury() { int const N = MAT_SIZE; int M = 16; int updateBlock = 3; double *A, *Ainv, *Anew, *Anew_inv; int numMats = NUM_MATS; float *A_h, *Ainv_h, *delta_h, *Ainv_delta_h, *Anew_h, *Anew_inv_h;; float *Ainv_d, *Ainv_delta_d, *Ainv_colk_d, *delta_d; A = (double*)malloc (N*N*sizeof(double)); Anew = (double*)malloc (N*N*sizeof(double)); Ainv = (double*)malloc (N*N*sizeof(double)); Anew_inv = (double*)malloc (N*N*sizeof(double)); Ainv_h = (float*) malloc (N*N*sizeof(float)); A_h = (float*) malloc (N*N*sizeof(float)); delta_h = (float*) malloc (N*M*sizeof(float)); Ainv_delta_h = (float*) malloc (N*M*sizeof(float)); Anew_h = (float*) malloc (N*N*sizeof(float)); Anew_inv_h = (float*) malloc (N*N*sizeof(float)); cudaMalloc((void**)&Ainv_d, N*N*sizeof(float)); cudaMalloc((void**)&delta_d, N*M*sizeof(float)); cudaMalloc((void**)&Ainv_delta_d, N*M*sizeof(float)); cudaMalloc((void**)&Ainv_colk_d, N *sizeof(float)); float **AinvList, **Ainv_deltaList, **AList, **Ainv_colkList, **deltaList, **invBlockList; AList = (float**)malloc(NUM_MATS*sizeof(float*)); AinvList = (float**)malloc(NUM_MATS*sizeof(float*)); Ainv_deltaList = (float**)malloc(NUM_MATS*sizeof(float*)); Ainv_colkList = (float**)malloc(NUM_MATS*sizeof(float*)); deltaList = (float**)malloc(NUM_MATS*sizeof(float*)); invBlockList = (float**)malloc(NUM_MATS*sizeof(float*)); float **AList_d, **AinvList_d, **Ainv_deltaList_d, **Ainv_colkList_d, **deltaList_d, **invBlockList_d; cudaMalloc((void**)&AinvList_d, numMats*sizeof(float*)); cudaMalloc((void**)&Ainv_deltaList_d, numMats*sizeof(float*)); cudaMalloc((void**)&deltaList_d, numMats*sizeof(float*)); cudaMalloc((void**)&invBlockList_d, numMats*sizeof(float*)); for (int mat=0; mat<numMats; mat++) { // cudaMalloc((void**)&(AList[mat]) , N*N*sizeof(float)+1000); cudaMalloc((void**)&(AinvList[mat]) , N*N*sizeof(float)+1000); cudaMalloc((void**)&(Ainv_deltaList[mat]), N*M*sizeof(float)+1000); cudaMalloc((void**)&(deltaList[mat]) , N*M*sizeof(float)+1000); cudaMalloc((void**)&(invBlockList[mat]) , M*M*sizeof(float)+1000); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { fprintf (stderr, "CUDA error in test_woodbury malloc:\n %s\n", cudaGetErrorString(err)); abort(); } } cudaMemcpyAsync (AinvList_d, AinvList, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpyAsync (Ainv_deltaList_d, Ainv_deltaList, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpyAsync (deltaList_d, deltaList, numMats*sizeof(float*), cudaMemcpyHostToDevice); cudaMemcpyAsync (invBlockList_d, invBlockList, numMats*sizeof(float*), cudaMemcpyHostToDevice); srand48((long int) 12341313); int row = 0; for (int mat=0; mat<numMats; mat++) { if (mat == 0 ) { for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { A[i*N+j] = Ainv[i*N+j] = A_h[i*N+j] = drand48(); Anew[i*N+j] = Anew_inv[i*N+j] = A[i*N+j]; } } for (int i=0; i<16; i++) for (int j=0; j<N; j++) { delta_h[i*N+j] = drand48()-0.5; Anew[(updateBlock*16+i)*N + j] = Anew_inv[(updateBlock*16+i)*N + j] = A[(updateBlock*16+i)*N + j] + delta_h[i*N+j]; } // for (int i=0; i<N; i++) // delta_h[i] = A_h[row*N+i]; GJInverse(Ainv, N); GJInverse(Anew_inv, N); for (int i=0; i<N; i++) for (int j=0; j<N; j++) { Ainv_h[i*N+j] = (float)Ainv[j*N+i]; Anew_inv_h[i*N+j] = (float)Anew_inv[j*N+i]; } } // for (int i=0; i<N; i++) // delta_h[i] = A_h[row*N+i]; // cudaMemcpyAsync (AList[mat], A_h, N*N*sizeof(float), // cudaMemcpyHostToDevice); cudaMemcpyAsync (AinvList[mat], Ainv_h, N*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyAsync (deltaList[mat], delta_h, N*M*sizeof(float), cudaMemcpyHostToDevice); } cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { fprintf (stderr, "CUDA error in test_woodbury memcopy's:\n %s\n", cudaGetErrorString(err)); abort(); } dim3 dimBlock2(64); dim3 dimGrida((N+15)/16, numMats); dim3 dimGridb((N+31)/32, numMats); //dim3 dimGrid2((N/32), numMats); double start = omp_get_wtime(); for (int i=0; i<100; i++) { woodbury_update_16a<float><<<dimGridb,dimBlock2>>> (AinvList_d, deltaList_d, Ainv_deltaList_d, invBlockList_d, N, N, updateBlock); woodbury_update_16b<float><<<dimGridb,dimBlock2>>> (AinvList_d, deltaList_d, Ainv_deltaList_d, invBlockList_d, N, N, updateBlock); } cudaThreadSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf (stderr, "CUDA error in woodbury_update_16:\n %s\n", cudaGetErrorString(err)); abort(); } double end = omp_get_wtime(); fprintf (stderr, "Rate = %12.8f updates per second.\n", (double)(100*NUM_MATS)/(end - start)); fprintf (stderr, "About to copy %ld back\n", N*M*sizeof(float)); cudaMemcpy (Ainv_delta_h, Ainv_deltaList[0], N*M*sizeof(float), cudaMemcpyDeviceToHost); fprintf (stderr, "About to copy %ld back\n", N*N*sizeof(float)); cudaMemcpy (Anew_inv_h, AinvList[0], N*N*sizeof(float), cudaMemcpyDeviceToHost); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf (stderr, "CUDA error in test_woodbury memcopy back:\n %s\n", cudaGetErrorString(err)); abort(); } fprintf(stderr, "Copied result back.\n"); float Ainv_delta[N*M]; for (int i=0; i<N*M; i++) Ainv_delta[i] = 0.0; for (int i=0; i<16; i++) for (int j=0; j<N; j++) for (int k=0; k<N; k++) Ainv_delta[i*N+j] += Ainv_h[j*N+k]*delta_h[i*N+k]; fprintf (stderr, "Ainv_delta_cpu = %1.8e\n", Ainv_delta[51]); fprintf (stderr, "Ainv_delta_gpu = %1.8e\n", Ainv_delta_h[51]); int i = 10; int j = 17; FILE *fcpu = fopen("Ainv_cpu.dat", "w"); FILE *fgpu = fopen("Ainv_gpu.dat", "w"); for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { fprintf (fcpu, "%16.8e ", Anew_inv[i*N+j]); fprintf (fgpu, "%16.8e ", Anew_inv_h[j*N+i]); } fprintf (fcpu, "\n"); fprintf (fgpu, "\n"); } fprintf (stderr, "Anew_inv cpu = %1.8e\n", Anew_inv[i*N+j]); fprintf (stderr, "Anew_inv_gpu = %1.8e\n", Anew_inv_h[j*N+i]); // cudaMemcpy (Ainv_h, AinvList[0], N*N*sizeof(float),cudaMemcpyDeviceToHost); // for (int i=0; i<N; i++) // A[row*N+i] = delta_h[i]; // for (int i=0; i<N; i++) // for (int j=0; j<N; j++) { // double ident = 0.0; // for (int k=0; k<N; k++) // ident += Ainv_h[i*N+k]*A[k*N+j]; // if ((i==j && fabs(ident - 1.0) > 1.0e-4) || // (i!=j && fabs(ident) > 1.0e-4)) // fprintf (stderr, "Error in matrix inverse, (%d, %d) = %1.8f\n", i, j, ident); // } fprintf (stderr, "Finished.\n"); } // Compile with: // nvcc -o test_all_ratios -DCUDA_TEST_MAIN ../src/QMCWaveFunctions/Fermion/determinant_update.cu main() { //test_all_ratios_kernel(); // test_all_grad_lapl_kernel(); test_update(); // test_update_transpose(); test_woodbury(); } #endif
the_stack
#include <algorithm> #include <cstdint> #include <mutex> #include <type_traits> #include "chainerx/cuda/cuda.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/data_type.cuh" #include "chainerx/macro.h" #include "chainerx/reduction_kernel_arg.h" namespace chainerx { namespace cuda { namespace reduce_detail { static constexpr int kMaxReductionBlockSize{512}; static constexpr int64_t kMaxGridSize{0x7fffffff}; inline int64_t RoundUpToPowerOf2(int64_t x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; x |= x >> 32; return x + 1; } template <typename In, typename Out, typename ReductionImpl, int8_t InNdim = kDynamicNdim, int8_t OutNdim = kDynamicNdim> __global__ void ReductionKernel( ReductionKernelArg<In, Out, InNdim, OutNdim> arg, int out_block_size, int reduce_block_size, ReductionImpl impl) { using AccumType = decltype(impl.Identity()); extern __shared__ __align__(8) uint8_t work_bytes[]; AccumType* work = reinterpret_cast<AccumType*>(work_bytes); // NOLINT(cppcoreguidelines-pro-type-reinterpret-cast) int tid = threadIdx.x; int64_t reduce_block_offset = tid / out_block_size; int64_t reduce_offset = reduce_block_offset * arg.out_indexer.total_size(); int64_t reduce_stride = reduce_block_size * arg.out_indexer.total_size(); int64_t out_offset = tid % out_block_size; int64_t out_base = blockIdx.x * out_block_size; int64_t out_stride = gridDim.x * out_block_size; auto it_in = arg.in_indexer.It(0, reduce_stride); for (auto it_out = arg.out_indexer.It(out_base + out_offset, out_stride); it_out; ++it_out) { AccumType accum = impl.Identity(); int64_t i_reduce = reduce_block_offset; for (it_in.Restart(it_out.raw_index() + reduce_offset); it_in; ++it_in, i_reduce += reduce_block_size) { impl.Reduce(impl.MapIn(cuda_internal::StorageToDataType<const In>(arg.in[it_in]), i_reduce), accum); } if (out_block_size <= kMaxReductionBlockSize / 2) { work[tid] = accum; __syncthreads(); // NOTE: Compiler optimizes to unroll this loop for (int stride = kMaxReductionBlockSize / 2; stride > 0; stride >>= 1) { if (out_block_size <= stride) { if (tid < stride) { impl.Reduce(work[tid + stride], work[tid]); } __syncthreads(); } } accum = work[tid]; __syncthreads(); } if (reduce_block_offset == 0 && it_out) { arg.out[it_out] = cuda_internal::DataToStorageType<Out>(impl.MapOut(accum)); } } } template <typename In, typename Out, typename ReductionImpl, int8_t InNdim = kDynamicNdim, int8_t OutNdim = kDynamicNdim> __global__ void ScanKernel( ReductionKernelArg<In, Out, InNdim, OutNdim> arg, int out_block_size, int reduce_block_size, ReductionImpl impl, int64_t reduce_len) { int tid = threadIdx.x; int64_t len = arg.out_indexer.total_size() / reduce_len; int64_t reduce_block_offset = tid / out_block_size; int64_t reduce_offset = reduce_block_offset * len; int64_t reduce_stride = reduce_block_size * len; int64_t out_offset = tid % out_block_size; int64_t out_base = blockIdx.x * out_block_size; int64_t out_stride = gridDim.x * out_block_size; auto reduce = [&impl, &arg](auto& it_from, auto& it_to) { auto from = cuda_internal::StorageToDataType<Out>(arg.out[it_from]); auto& to = cuda_internal::StorageToDataType<Out>(arg.out[it_to]); impl.Reduce(from, to); ++it_from; ++it_to; }; for (int64_t i = out_base + out_offset; i < len; i += out_stride) { // Copy input array to output array auto it_in = arg.in_indexer.It(i + reduce_offset, reduce_stride); auto it_out = arg.out_indexer.It(i + reduce_offset, reduce_stride); for (int64_t j = reduce_block_offset; j < reduce_len; j += reduce_block_size, ++it_in, ++it_out) { auto value = cuda_internal::StorageToDataType<const In>(arg.in[it_in]); arg.out[it_out] = cuda_internal::DataToStorageType<Out>(impl.MapIn(value, j)); } __syncthreads(); int64_t stride = 1; // Up-Sweep Phase for (stride = 1; stride * 2 <= reduce_len; stride <<= 1) { int64_t index_from = reduce_block_offset * stride * 2 + stride - 1; int64_t index_to = index_from + stride; auto it_from = arg.out_indexer.It(i + index_from * len, reduce_stride * stride * 2); auto it_to = arg.out_indexer.It(i + index_to * len, reduce_stride * stride * 2); for (int64_t j = index_to; j < reduce_len; j += reduce_block_size * stride * 2) { reduce(it_from, it_to); } __syncthreads(); } // Down-Sweep Phase for (; stride >= 1; stride >>= 1) { int64_t index_from = reduce_block_offset * stride * 2 + stride * 2 - 1; int64_t index_to = index_from + stride; auto it_from = arg.out_indexer.It(i + index_from * len, reduce_stride * stride * 2); auto it_to = arg.out_indexer.It(i + index_to * len, reduce_stride * stride * 2); for (int64_t j = index_to; j < reduce_len; j += reduce_block_size * stride * 2) { reduce(it_from, it_to); } __syncthreads(); } } } } // namespace reduce_detail // Computes the reduction of the input and stores into the output array. // // `ReductionImpl` is required to provide the following device member function. // T can be arbitrary but should be common between these functions. // // - T Identity(); // Returns the initial value of reduction. // - T MapIn(In in, int64_t index); // Applies pre-reduction mapping of the input and its index. // - void Reduce(T next, T& accum); // Accumulates the iterated value to accum. // - Out MapOut(T accum); // Applies post-reduction mapping of the output. // // Example: // Simple summation over a float array can be implemented as the following reduction impl. // // struct SumImpl { // __device__ float Identity() { return 0; } // __device__ float MapIn(float in, int64_t /*index*/) { return in; } // __device__ void Reduce(float next, float& accum) { accum += next; } // __device__ float MapOut(float accum) { return accum; } // }; // // Then, it can be passed to Reduce like: Reduce(input, axis, output, SumImpl{}); template <typename In, typename Out, typename ReductionImpl> void Reduce(const Array& in, const Axes& axis, const Array& out, ReductionImpl&& impl) { if (out.GetTotalSize() == 0) { return; } ReductionArg arg{in, axis, out}; // TODO(niboshi): Calculate kMaxBlockSize per device std::lock_guard<std::mutex> lock{*cuda_internal::g_mutex}; static const int64_t kMaxBlockSize = std::min( reduce_detail::kMaxReductionBlockSize, CudaOccupancyMaxPotentialBlockSize(&reduce_detail::ReductionKernel<In, Out, ReductionImpl>).block_size); int64_t reduce_total_size_pow2 = reduce_detail::RoundUpToPowerOf2(std::max(int64_t{1}, arg.in_shape().GetTotalSize() / arg.out_shape().GetTotalSize())); int64_t reduce_block_size = std::min(kMaxBlockSize, reduce_total_size_pow2); int64_t out_block_size = kMaxBlockSize / reduce_block_size; int64_t out_block_num = (arg.out_shape().GetTotalSize() + out_block_size - 1) / out_block_size; int64_t block_size = kMaxBlockSize; int64_t grid_size = std::min(reduce_detail::kMaxGridSize, out_block_num); int64_t shared_mem_size = sizeof(decltype(impl.Identity())) * block_size; #ifdef NDEBUG // Optimize only in Release build to save time on development // TODO(sonots): Reconsider the number of statically-optimized kernels in terms of speed and binary size trade-offs. // Currently, only contiguous output arrays are optimized. switch (arg.in_strides().ndim()) { case 1: switch (arg.out_strides().ndim()) { case 0: reduce_detail::ReductionKernel<<<grid_size, block_size, shared_mem_size>>>( MakeReductionKernelArg<In, Out, 1, 0>(arg), out_block_size, reduce_block_size, impl); return; case 1: reduce_detail::ReductionKernel<<<grid_size, block_size, shared_mem_size>>>( MakeReductionKernelArg<In, Out, 1, 1>(arg), out_block_size, reduce_block_size, impl); return; } break; case 2: switch (arg.out_strides().ndim()) { case 0: reduce_detail::ReductionKernel<<<grid_size, block_size, shared_mem_size>>>( MakeReductionKernelArg<In, Out, 2, 0>(arg), out_block_size, reduce_block_size, impl); return; case 1: reduce_detail::ReductionKernel<<<grid_size, block_size, shared_mem_size>>>( MakeReductionKernelArg<In, Out, 2, 1>(arg), out_block_size, reduce_block_size, impl); return; } break; case 3: switch (arg.out_strides().ndim()) { case 0: reduce_detail::ReductionKernel<<<grid_size, block_size, shared_mem_size>>>( MakeReductionKernelArg<In, Out, 3, 0>(arg), out_block_size, reduce_block_size, impl); return; case 1: reduce_detail::ReductionKernel<<<grid_size, block_size, shared_mem_size>>>( MakeReductionKernelArg<In, Out, 3, 1>(arg), out_block_size, reduce_block_size, impl); return; } break; case 4: switch (arg.out_strides().ndim()) { case 0: reduce_detail::ReductionKernel<<<grid_size, block_size, shared_mem_size>>>( MakeReductionKernelArg<In, Out, 4, 0>(arg), out_block_size, reduce_block_size, impl); return; case 1: reduce_detail::ReductionKernel<<<grid_size, block_size, shared_mem_size>>>( MakeReductionKernelArg<In, Out, 4, 1>(arg), out_block_size, reduce_block_size, impl); return; } break; } #endif // NDEBUG reduce_detail::ReductionKernel<<<grid_size, block_size, shared_mem_size>>>( MakeReductionKernelArg<In, Out>(arg), out_block_size, reduce_block_size, impl); } template <typename In, typename Out, typename ReductionImpl> void Scan(const Array& in, int8_t axis, const Array& out, ReductionImpl&& impl) { if (out.GetTotalSize() == 0) { return; } ReductionArg arg{in, Axes{axis}, out}; int64_t reduce_len = in.shape()[axis]; // TODO(niboshi): Calculate kMaxBlockSize per device std::lock_guard<std::mutex> lock{*cuda_internal::g_mutex}; static const int64_t kMaxBlockSize = std::min( reduce_detail::kMaxReductionBlockSize, CudaOccupancyMaxPotentialBlockSize(&reduce_detail::ReductionKernel<In, Out, ReductionImpl>).block_size); int64_t reduce_total_size_pow2 = reduce_detail::RoundUpToPowerOf2(std::max(int64_t{1}, reduce_len)); int64_t reduce_block_size = std::min(kMaxBlockSize, reduce_total_size_pow2); int64_t out_block_size = kMaxBlockSize / reduce_block_size; int64_t out_block_num = (arg.in_shape().GetTotalSize() / reduce_len + out_block_size - 1) / out_block_size; int64_t block_size = kMaxBlockSize; int64_t grid_size = std::min(reduce_detail::kMaxGridSize, out_block_num); int64_t shared_mem_size = sizeof(decltype(impl.Identity())) * block_size; #ifdef NDEBUG // Optimize only in Release build to save time on development // TODO(sonots): Reconsider the number of statically-optimized kernels in terms of speed and binary size trade-offs. // Currently, only contiguous output arrays are optimized. if (arg.in_strides().ndim() == 1 && arg.out_strides().ndim() == 1) { reduce_detail::ScanKernel<<<grid_size, block_size, shared_mem_size>>>( MakeReductionKernelArg<In, Out, 1, 1>(arg), out_block_size, reduce_block_size, impl, reduce_len); return; } #endif // NDEBUG reduce_detail::ScanKernel<<<grid_size, block_size, shared_mem_size>>>( MakeReductionKernelArg<In, Out>(arg), out_block_size, reduce_block_size, impl, reduce_len); } } // namespace cuda } // namespace chainerx
the_stack
#include "cudapoa_limits.hpp" #include "cudapoa_batch.cuh" #include <claraparabricks/genomeworks/cudapoa/batch.hpp> namespace claraparabricks { namespace genomeworks { namespace cudapoa { /// constructor- set other parameters based on a minimum set of input arguments BatchConfig::BatchConfig(int32_t max_seq_sz /*= 1024*/, int32_t max_seq_per_poa /*= 100*/, int32_t band_width /*= 256*/, BandMode banding /*= BandMode::full_band*/, float adapive_storage_factor /*= 2.0*/, float graph_length_factor /*= 3.0*/, int32_t max_pred_dist /*= 0*/) /// ensure a 4-byte boundary alignment for any allocated buffer : max_sequence_size(max_seq_sz) , max_consensus_size(2 * max_sequence_size) /// ensure 128-alignment for band_width size, 128 = CUDAPOA_MIN_BAND_WIDTH , alignment_band_width(cudautils::align<int32_t, CUDAPOA_MIN_BAND_WIDTH>(band_width)) , max_sequences_per_poa(max_seq_per_poa) , band_mode(banding) , max_banded_pred_distance(max_pred_dist > 0 ? max_pred_dist : 2 * cudautils::align<int32_t, CUDAPOA_MIN_BAND_WIDTH>(band_width)) { max_nodes_per_graph = cudautils::align<int32_t, CUDAPOA_CELLS_PER_THREAD>(graph_length_factor * max_sequence_size); if (banding == BandMode::full_band) { matrix_sequence_dimension = cudautils::align<int32_t, CUDAPOA_CELLS_PER_THREAD>(max_sequence_size); } else if (banding == BandMode::static_band || banding == BandMode::static_band_traceback) { matrix_sequence_dimension = cudautils::align<int32_t, CUDAPOA_CELLS_PER_THREAD>(alignment_band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING); } else // BandMode::adaptive_band || BandMode::adaptive_band_traceback { // adapive_storage_factor is to reserve extra memory for cases with extended band-width matrix_sequence_dimension = cudautils::align<int32_t, CUDAPOA_CELLS_PER_THREAD>(adapive_storage_factor * (alignment_band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING)); } throw_on_negative(max_seq_sz, "max_sequence_size cannot be negative."); throw_on_negative(max_seq_per_poa, "max_sequences_per_poa cannot be negative."); throw_on_negative(band_width, "alignment_band_width cannot be negative."); throw_on_negative(max_nodes_per_graph, "max_nodes_per_graph cannot be negative."); if (alignment_band_width != band_width) { std::cerr << "Band-width should be multiple of 128. The input was changed from " << band_width << " to " << alignment_band_width << std::endl; } } /// constructor- set all parameters separately BatchConfig::BatchConfig(int32_t max_seq_sz, int32_t max_consensus_sz, int32_t max_nodes_per_poa, int32_t band_width, int32_t max_seq_per_poa, int32_t matrix_seq_dim, BandMode banding, int32_t max_pred_distance) /// ensure a 4-byte boundary alignment for any allocated buffer : max_sequence_size(max_seq_sz) , max_consensus_size(max_consensus_sz) , max_nodes_per_graph(cudautils::align<int32_t, CUDAPOA_CELLS_PER_THREAD>(max_nodes_per_poa)) , matrix_sequence_dimension(cudautils::align<int32_t, CUDAPOA_CELLS_PER_THREAD>(matrix_seq_dim)) /// ensure 128-alignment for band_width size , alignment_band_width(cudautils::align<int32_t, CUDAPOA_MIN_BAND_WIDTH>(band_width)) , max_sequences_per_poa(max_seq_per_poa) , band_mode(banding) , max_banded_pred_distance(max_pred_distance) { throw_on_negative(max_seq_sz, "max_sequence_size cannot be negative."); throw_on_negative(max_consensus_sz, "max_consensus_size cannot be negative."); throw_on_negative(max_nodes_per_poa, "max_nodes_per_graph cannot be negative."); throw_on_negative(max_seq_per_poa, "max_sequences_per_poa cannot be negative."); throw_on_negative(band_width, "alignment_band_width cannot be negative."); throw_on_negative(max_pred_distance, "max_banded_pred_distance cannot be negative."); if (max_nodes_per_graph < max_sequence_size) throw std::invalid_argument("max_nodes_per_graph should be greater than or equal to max_sequence_size."); if (max_consensus_size < max_sequence_size) throw std::invalid_argument("max_consensus_size should be greater than or equal to max_sequence_size."); if (max_sequence_size < alignment_band_width) throw std::invalid_argument("alignment_band_width should not be greater than max_sequence_size."); if (alignment_band_width != band_width) { std::cerr << "Band-width should be multiple of 128. The input was changed from " << band_width << " to " << alignment_band_width << std::endl; } } std::unique_ptr<Batch> create_batch(int32_t device_id, cudaStream_t stream, DefaultDeviceAllocator allocator, int64_t max_mem, int8_t output_mask, const BatchConfig& batch_size, int16_t gap_score, int16_t mismatch_score, int16_t match_score) { if (use32bitScore(batch_size, gap_score, mismatch_score, match_score)) { if (use32bitSize(batch_size)) { if (use16bitTrace(batch_size)) { return std::make_unique<CudapoaBatch<int32_t, int32_t, int16_t>>(device_id, stream, allocator, max_mem, output_mask, batch_size, gap_score, mismatch_score, match_score); } else { return std::make_unique<CudapoaBatch<int32_t, int32_t, int8_t>>(device_id, stream, allocator, max_mem, output_mask, batch_size, gap_score, mismatch_score, match_score); } } else { if (use16bitTrace(batch_size)) { return std::make_unique<CudapoaBatch<int32_t, int16_t, int16_t>>(device_id, stream, allocator, max_mem, output_mask, batch_size, gap_score, mismatch_score, match_score); } else { return std::make_unique<CudapoaBatch<int32_t, int16_t, int8_t>>(device_id, stream, allocator, max_mem, output_mask, batch_size, gap_score, mismatch_score, match_score); } } } else { // if ScoreT is 16-bit, then it's safe to assume SizeT is 16-bit if (use16bitTrace(batch_size)) { return std::make_unique<CudapoaBatch<int16_t, int16_t, int16_t>>(device_id, stream, allocator, max_mem, output_mask, batch_size, gap_score, mismatch_score, match_score); } else { return std::make_unique<CudapoaBatch<int16_t, int16_t, int8_t>>(device_id, stream, allocator, max_mem, output_mask, batch_size, gap_score, mismatch_score, match_score); } } } std::unique_ptr<Batch> create_batch(int32_t device_id, cudaStream_t stream, int64_t max_mem, int8_t output_mask, const BatchConfig& batch_size, int16_t gap_score, int16_t mismatch_score, int16_t match_score) { if (max_mem < -1) { throw std::invalid_argument("max_mem has to be either -1 (=all available GPU memory) or greater or equal than 0."); } #ifdef GW_ENABLE_CACHING_ALLOCATOR // uses CachingDeviceAllocator if (max_mem == -1) { max_mem = claraparabricks::genomeworks::cudautils::find_largest_contiguous_device_memory_section(); if (max_mem == 0) { throw std::runtime_error("No memory available for caching"); } } claraparabricks::genomeworks::DefaultDeviceAllocator allocator(max_mem); #else // uses CudaMallocAllocator claraparabricks::genomeworks::DefaultDeviceAllocator allocator; #endif return create_batch(device_id, stream, allocator, max_mem, output_mask, batch_size, gap_score, mismatch_score, match_score); } } // namespace cudapoa } // namespace genomeworks } // namespace claraparabricks
the_stack
#include <torch/extension.h> #include <ATen/ATen.h> #include "fast_lsh_cumulation.h" #include "fast_lsh_cumulation_cuda.h" #include "common_cuda.h" #include "common.h" #include <vector> ////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// std::vector<at::Tensor> fast_hash_ver1_kernel( at::Tensor query_mask, at::Tensor query_vector, at::Tensor key_mask, at::Tensor key_vector, int num_hash_f, int hash_code_len, bool use_cuda ) { int batch_size = query_vector.size(0); int num_query = query_vector.size(1); int num_key = key_vector.size(1); int vector_dim = query_vector.size(2); int num_hash_per_part = vector_dim / hash_code_len; int num_part = max(1, ceil_divide(num_hash_f, num_hash_per_part)); at::Tensor Dmat = 2 * at::randint(0, 2, {batch_size, 3, num_part, vector_dim}, query_mask.options()) - 1; at::Tensor query_hash_code = at::zeros({batch_size, num_query, num_hash_f}, query_mask.options()); at::Tensor key_hash_code = at::zeros({batch_size, num_key, num_hash_f}, key_mask.options()); int *query_mask_ptr = query_mask.data_ptr<int>(); float *query_vector_ptr = query_vector.data_ptr<float>(); int *key_mask_ptr = key_mask.data_ptr<int>(); float *key_vector_ptr = key_vector.data_ptr<float>(); int *Dmat_ptr = Dmat.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); if (use_cuda) { { dim3 threads(vector_dim); dim3 blocks(num_part, num_query, batch_size); int shared_mem = vector_dim * sizeof(float); fast_hash_ver1_cuda_kernel<<<blocks, threads, shared_mem>>>( query_mask_ptr, query_vector_ptr, Dmat_ptr, query_hash_code_ptr, batch_size, num_query, vector_dim, num_part, num_hash_f, hash_code_len ); } { dim3 threads(vector_dim); dim3 blocks(num_part, num_key, batch_size); int shared_mem = vector_dim * sizeof(float); fast_hash_ver1_cuda_kernel<<<blocks, threads, shared_mem>>>( key_mask_ptr, key_vector_ptr, Dmat_ptr, key_hash_code_ptr, batch_size, num_key, vector_dim, num_part, num_hash_f, hash_code_len ); } } return {query_hash_code, key_hash_code}; } at::Tensor lsh_cumulation_ver1_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor value, int hashtable_capacity, bool use_cuda ) { int batch_size = query_hash_code.size(0); int num_hash_f = query_hash_code.size(2); int num_query = query_hash_code.size(1); int num_key = key_hash_code.size(1); int value_dim = value.size(2); at::Tensor hashtable_value = at::empty({batch_size, num_hash_f, hashtable_capacity, WARP_SIZE}, value.options()); at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); if (use_cuda) { int threads_x = WARP_SIZE; int threads_y = OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE; int block_x_step1 = num_key / threads_y; int block_x_step2 = num_query / threads_y; int block_y = batch_size; dim3 threads(threads_x, threads_y); dim3 blocks_step1(block_x_step1, block_y); dim3 blocks_step2(block_x_step2, block_y); int *query_mask_ptr = query_mask.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); int *key_mask_ptr = key_mask.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); float *value_ptr = value.data_ptr<float>(); float *hashtable_value_ptr = hashtable_value.data_ptr<float>(); float *cumulation_value_ptr = cumulation_value.data_ptr<float>(); for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) { cudaMemset(hashtable_value_ptr, 0, (batch_size * num_hash_f * hashtable_capacity * WARP_SIZE) * sizeof(float)); lsh_cumulation_ver1_step1_cuda_kernel<<<blocks_step1, threads>>>( key_mask_ptr, key_hash_code_ptr, value_ptr, hashtable_value_ptr, batch_size, num_hash_f, hashtable_capacity, num_key, value_dim, value_offset ); lsh_cumulation_ver1_step2_cuda_kernel<<<blocks_step2, threads>>>( query_mask_ptr, query_hash_code_ptr, hashtable_value_ptr, cumulation_value_ptr, batch_size, num_hash_f, hashtable_capacity, num_query, value_dim, value_offset ); } } return cumulation_value; } at::Tensor lsh_weighted_cumulation_ver1_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor query_weight, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor key_weight, at::Tensor value, int hashtable_capacity, bool use_cuda ) { int batch_size = query_hash_code.size(0); int num_hash_f = query_hash_code.size(2); int num_query = query_hash_code.size(1); int num_key = key_hash_code.size(1); int value_dim = value.size(2); int weight_dim = query_weight.size(2); at::Tensor hashtable_value = at::zeros({batch_size, num_hash_f, hashtable_capacity, WARP_SIZE}, value.options()); at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); if (use_cuda) { int threads_x = WARP_SIZE; int threads_y = OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE; int block_x_step1 = num_key / threads_y; int block_x_step2 = num_query / threads_y; int block_y = batch_size; dim3 threads(threads_x, threads_y); dim3 blocks_step1(block_x_step1, block_y); dim3 blocks_step2(block_x_step2, block_y); int *query_mask_ptr = query_mask.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); float *query_weight_ptr = query_weight.data_ptr<float>(); int *key_mask_ptr = key_mask.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); float *key_weight_ptr = key_weight.data_ptr<float>(); float *value_ptr = value.data_ptr<float>(); float *hashtable_value_ptr = hashtable_value.data_ptr<float>(); float *cumulation_value_ptr = cumulation_value.data_ptr<float>(); for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) { for (int weight_idx = 0; weight_idx < weight_dim; weight_idx++) { cudaMemset(hashtable_value_ptr, 0, (batch_size * num_hash_f * hashtable_capacity * WARP_SIZE) * sizeof(float)); lsh_weighted_cumulation_ver1_step1_cuda_kernel<<<blocks_step1, threads>>>( key_mask_ptr, key_hash_code_ptr, key_weight_ptr, value_ptr, hashtable_value_ptr, batch_size, num_hash_f, hashtable_capacity, num_key, value_dim, weight_dim, value_offset, weight_idx ); lsh_weighted_cumulation_ver1_step2_cuda_kernel<<<blocks_step2, threads>>>( query_mask_ptr, query_hash_code_ptr, query_weight_ptr, hashtable_value_ptr, cumulation_value_ptr, batch_size, num_hash_f, hashtable_capacity, num_query, value_dim, weight_dim, value_offset, weight_idx ); } } } return cumulation_value; } at::Tensor lsh_weighted_cumulation_ver2_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor query_weight, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor key_weight, at::Tensor value, int hashtable_capacity, bool use_cuda ) { int batch_size = query_hash_code.size(0); int num_hash_f = query_hash_code.size(2); int num_query = query_hash_code.size(1); int num_key = key_hash_code.size(1); int value_dim = value.size(2); int weight_dim = query_weight.size(2); at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options()); at::Tensor key_sorted_idxes = at::zeros({batch_size, num_hash_f, num_key}, query_hash_code.options()); at::Tensor query_info = at::zeros({batch_size, num_query, 2, num_hash_f}, query_hash_code.options()); at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); if (use_cuda) { int *query_mask_ptr = query_mask.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); float *query_weight_ptr = query_weight.data_ptr<float>(); int *key_mask_ptr = key_mask.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); float *key_weight_ptr = key_weight.data_ptr<float>(); float *value_ptr = value.data_ptr<float>(); int *count_sort_table_ptr = count_sort_table.data_ptr<int>(); int *key_sorted_idxes_ptr = key_sorted_idxes.data_ptr<int>(); int *query_info_ptr = query_info.data_ptr<int>(); float *cumulation_value_ptr = cumulation_value.data_ptr<float>(); { dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks_step13(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK)); dim3 blocks_step2(num_hash_f, batch_size); int shared_mem = hashtable_capacity * sizeof(float); count_sort_step1_cuda_kernel<<<blocks_step13, threads_step13>>>( key_mask_ptr, key_hash_code_ptr, count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity, num_key ); count_sort_step2_cuda_kernel<<<blocks_step2, threads_step2, shared_mem>>>( count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity ); count_sort_step3_cuda_kernel<<<blocks_step13, threads_step13>>>( key_mask_ptr, key_hash_code_ptr, count_sort_table_ptr, key_sorted_idxes_ptr, batch_size, num_hash_f, hashtable_capacity, num_key ); } { dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); extract_query_info_cuda_kernel<<<blocks, threads>>>( query_mask_ptr, query_hash_code_ptr, count_sort_table_ptr, query_info_ptr, batch_size, num_hash_f, hashtable_capacity, num_query ); } { dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE); dim3 blocks(num_query, num_hash_f, batch_size); int shared_mem = (weight_dim + WARP_SIZE) * sizeof(float); lsh_weighted_cumulation_ver2_step2_cuda_kernel<<<blocks, threads, shared_mem>>>( query_mask_ptr, query_info_ptr, key_sorted_idxes_ptr, query_weight_ptr, key_weight_ptr, value_ptr, cumulation_value_ptr, batch_size, num_hash_f, num_query, num_key, value_dim, weight_dim ); } } return cumulation_value; } at::Tensor lsh_weighted_cumulation_ver3_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor query_weight, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor key_weight, at::Tensor value, int hashtable_capacity, bool use_cuda ) { int batch_size = query_hash_code.size(0); int num_hash_f = query_hash_code.size(2); int num_query = query_hash_code.size(1); int num_key = key_hash_code.size(1); int value_dim = value.size(2); int weight_dim = query_weight.size(2); at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options()); at::Tensor query_sorted_idxes = at::zeros({batch_size, num_hash_f, num_query}, query_hash_code.options()); at::Tensor key_info = at::zeros({batch_size, num_key, 2, num_hash_f}, query_hash_code.options()); at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); if (use_cuda) { int *query_mask_ptr = query_mask.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); float *query_weight_ptr = query_weight.data_ptr<float>(); int *key_mask_ptr = key_mask.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); float *key_weight_ptr = key_weight.data_ptr<float>(); float *value_ptr = value.data_ptr<float>(); int *count_sort_table_ptr = count_sort_table.data_ptr<int>(); int *query_sorted_idxes_ptr = query_sorted_idxes.data_ptr<int>(); int *key_info_ptr = key_info.data_ptr<int>(); float *cumulation_value_ptr = cumulation_value.data_ptr<float>(); { dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks_step13(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK)); dim3 blocks_step2(num_hash_f, batch_size); int shared_mem = hashtable_capacity * sizeof(float); count_sort_step1_cuda_kernel<<<blocks_step13, threads_step13>>>( query_mask_ptr, query_hash_code_ptr, count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity, num_query ); count_sort_step2_cuda_kernel<<<blocks_step2, threads_step2, shared_mem>>>( count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity ); count_sort_step3_cuda_kernel<<<blocks_step13, threads_step13>>>( query_mask_ptr, query_hash_code_ptr, count_sort_table_ptr, query_sorted_idxes_ptr, batch_size, num_hash_f, hashtable_capacity, num_query ); } { dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); extract_query_info_cuda_kernel<<<blocks, threads>>>( key_mask_ptr, key_hash_code_ptr, count_sort_table_ptr, key_info_ptr, batch_size, num_hash_f, hashtable_capacity, num_key ); } { dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE); dim3 blocks(num_key, num_hash_f, batch_size); int shared_mem = (weight_dim + value_dim + WARP_SIZE) * sizeof(float); lsh_weighted_cumulation_ver3_step2_cuda_kernel<<<blocks, threads, shared_mem>>>( query_sorted_idxes_ptr, key_mask_ptr, key_info_ptr, query_weight_ptr, key_weight_ptr, value_ptr, cumulation_value_ptr, batch_size, num_hash_f, num_query, num_key, value_dim, weight_dim ); } } return cumulation_value; } at::Tensor lsh_weighted_cumulation_ver4_kernel( at::Tensor query_mask, at::Tensor query_hash_code, at::Tensor query_weight, at::Tensor key_mask, at::Tensor key_hash_code, at::Tensor key_weight, at::Tensor value, int hashtable_capacity, bool use_cuda ) { int batch_size = query_hash_code.size(0); int num_hash_f = query_hash_code.size(2); int num_query = query_hash_code.size(1); int num_key = key_hash_code.size(1); int value_dim = value.size(2); int weight_dim = query_weight.size(2); at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options()); at::Tensor query_sorted_idxes = at::zeros({batch_size, num_hash_f, num_query}, query_hash_code.options()); at::Tensor key_info = at::zeros({batch_size, num_key, 2, num_hash_f}, query_hash_code.options()); at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); if (use_cuda) { int *query_mask_ptr = query_mask.data_ptr<int>(); int *query_hash_code_ptr = query_hash_code.data_ptr<int>(); float *query_weight_ptr = query_weight.data_ptr<float>(); int *key_mask_ptr = key_mask.data_ptr<int>(); int *key_hash_code_ptr = key_hash_code.data_ptr<int>(); float *key_weight_ptr = key_weight.data_ptr<float>(); float *value_ptr = value.data_ptr<float>(); int *count_sort_table_ptr = count_sort_table.data_ptr<int>(); int *query_sorted_idxes_ptr = query_sorted_idxes.data_ptr<int>(); int *key_info_ptr = key_info.data_ptr<int>(); float *cumulation_value_ptr = cumulation_value.data_ptr<float>(); { dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks_step13(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK)); dim3 blocks_step2(num_hash_f, batch_size); int shared_mem = hashtable_capacity * sizeof(float); count_sort_step1_cuda_kernel<<<blocks_step13, threads_step13>>>( query_mask_ptr, query_hash_code_ptr, count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity, num_query ); count_sort_step2_cuda_kernel<<<blocks_step2, threads_step2, shared_mem>>>( count_sort_table_ptr, batch_size, num_hash_f, hashtable_capacity ); count_sort_step3_cuda_kernel<<<blocks_step13, threads_step13>>>( query_mask_ptr, query_hash_code_ptr, count_sort_table_ptr, query_sorted_idxes_ptr, batch_size, num_hash_f, hashtable_capacity, num_query ); } { dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); dim3 blocks(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); extract_query_info_cuda_kernel<<<blocks, threads>>>( key_mask_ptr, key_hash_code_ptr, count_sort_table_ptr, key_info_ptr, batch_size, num_hash_f, hashtable_capacity, num_key ); } { dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE); dim3 blocks(num_key, batch_size); int shared_mem = (weight_dim + value_dim + 2 * num_hash_f) * sizeof(float); lsh_weighted_cumulation_ver4_step2_cuda_kernel<<<blocks, threads, shared_mem>>>( query_sorted_idxes_ptr, key_mask_ptr, key_info_ptr, query_weight_ptr, key_weight_ptr, value_ptr, cumulation_value_ptr, batch_size, num_hash_f, num_query, num_key, value_dim, weight_dim ); } } return cumulation_value; }
the_stack
namespace amgx { template <typename index_type, typename value_type> __global__ void shift_diagonal(index_type num_rows, const index_type *row_offsets, const index_type *col_indices, value_type *values, value_type shift) { index_type tidx = blockDim.x * blockIdx.x + threadIdx.x; for (int r = tidx; r < num_rows; r += blockDim.x * gridDim.x) { index_type row_start = row_offsets[r]; index_type row_end = row_offsets[r + 1]; for (int j = row_start; j < row_end; j++) { if (col_indices[j] == r) { values[j] += shift; continue; } } } } // OBSOLETE compute a when the input is H only template <typename index_type, typename value_type> __global__ void dangling_nodes(index_type num_rows, const index_type *row_offsets, value_type *aa) { index_type tidx = blockDim.x * blockIdx.x + threadIdx.x; for (int r = tidx; r < num_rows; r += blockDim.x * gridDim.x) { index_type row_start = row_offsets[r]; index_type row_end = row_offsets[r + 1]; // NOTE 1 : a = alpha*a + (1-alpha)e // NOTE 2 : a is initialized to (1-alpha) if (row_start == row_end) { aa[r] = 1.0; // NOTE 3 : alpha*1 + (1-alpha)*1 = 1.0 } } } // used when a is given as input and the matrix is H^T template <typename index_type, typename value_type> __global__ void update_a(index_type num_rows, value_type *aa, value_type beta) { index_type tidx = blockDim.x * blockIdx.x + threadIdx.x; for (int r = tidx; r < num_rows; r += blockDim.x * gridDim.x) { // NOTE 1 : a = alpha*a + (1-alpha)e if (aa[r] == 0.0) { aa[r] = beta; // NOTE 2 : alpha*0 + (1-alpha)*1 = (1-alpha) } } } template <class TConfig> SingleIteration_EigenSolver<TConfig>::SingleIteration_EigenSolver(AMG_Config &cfg, const std::string &cfg_scope) : Base(cfg, cfg_scope), m_cfg(cfg), m_operator(NULL) { m_convergence_check_freq = cfg.getParameter<int>("eig_convergence_check_freq", cfg_scope); } template <class TConfig> SingleIteration_EigenSolver<TConfig>::~SingleIteration_EigenSolver() { /*if (m_operator) delete m_operator;*/ if (this->m_which == EIG_SMALLEST) { delete m_operator; } if (this->m_which == EIG_PAGERANK) { delete m_operator; } free_allocated(); } template <class TConfig> void SingleIteration_EigenSolver<TConfig>::free_allocated() { } template <class TConfig> void SingleIteration_EigenSolver<TConfig>::shift_matrix() { ValueTypeMat shift = this->m_shift; if (shift == 0) { return; } Matrix<TConfig> *pA = dynamic_cast< Matrix<TConfig>* > (this->m_A); Matrix<TConfig> &A = *pA; int num_threads = 128; int max_grid_size = 4096; int num_rows = A.get_num_rows(); int num_blocks = std::min(max_grid_size, (num_rows + num_threads - 1) / num_rows); shift_diagonal <<< num_blocks, num_threads>>>(num_rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), -shift); cudaCheckError(); } // OBSOLETE compute a when the input is H only template <class TConfig> void SingleIteration_EigenSolver<TConfig>::get_dangling_nodes() { Matrix<TConfig> *pA = dynamic_cast< Matrix<TConfig>* > (this->m_A); Matrix<TConfig> &A = *pA; int num_threads = 128; int max_grid_size = 4096; int num_rows = A.get_num_rows(); int num_blocks = std::min(max_grid_size, (num_rows + num_threads - 1) / num_rows); dangling_nodes <<< num_blocks, num_threads>>>(num_rows, A.row_offsets.raw(), m_a.raw()); cudaCheckError(); // CPU CODE : you shouldn't use it, it is very slow /* for (int i = 0; i < num_rows; ++i) { if (A.row_offsets[i] == A.row_offsets[i+1]) m_a[i] = 1.0; // alpha*1 + (1-alpha)*1 = 1.0 else m_a[i] = beta; } */ } // 0 are replaced by 1-alpha template <class TConfig> void SingleIteration_EigenSolver<TConfig>::update_dangling_nodes() { Operator<TConfig> &A = *this->m_A; ValueTypeVec beta = 1.0 - this->m_damping_factor; int num_rows = A.get_num_rows(); // CPU if (TConfig::memSpace == AMGX_host) { for (int i = 0; i < num_rows; i++) { // NOTE 1 : a = alpha*a + (1-alpha)e if (m_a[i] == 0.0) { m_a[i] = beta; // NOTE 2 : alpha*0 + (1-alpha)*1 = (1-alpha) } } } //GPU else { int num_threads = 128; int max_grid_size = 4096; int num_blocks = std::min(max_grid_size, (num_rows + num_threads - 1) / num_rows); update_a <<< num_blocks, num_threads>>>(num_rows, m_a.raw(), beta ); cudaCheckError(); } } template <class TConfig> void SingleIteration_EigenSolver<TConfig>::solver_setup() { Operator<TConfig> &A = *this->m_A; ViewType oldView = A.currentView(); A.setViewExterior(); if (this->m_which == EIG_PAGERANK) { PagerankOperator<TConfig> *op = new PagerankOperator<TConfig>(A, &m_a, &m_b, this->m_damping_factor); m_operator = op; } else if (this->m_which == EIG_SMALLEST) { Solver<TConfig> *solver = SolverFactory<TConfig>::allocate(m_cfg, "default", "solver"); #ifdef AMGX_EXPLICIT_SHIFT shift_matrix(); SolveOperator<TConfig> *solve_op = new SolveOperator<TConfig>(A, *solver); #else ShiftedOperator<TConfig> *op = new ShiftedOperator<TConfig>(A, -this->m_shift); SolveOperator<TConfig> *solve_op = new SolveOperator<TConfig>(*op, *solver); #endif solve_op->setup(); m_operator = solve_op; } else { m_operator = &A; } const int N = static_cast<int>(A.get_num_cols() * A.get_block_dimy()); // Allocate two vectors. m_v.resize(N); m_x.resize(N); m_allocated_vectors.push_back(&m_v); m_allocated_vectors.push_back(&m_x); // Vectors "a" and "b" are needed only for PR if (this->m_which == EIG_PAGERANK) { m_a.resize(N); m_b.resize(N); m_allocated_vectors.push_back(&m_a); m_allocated_vectors.push_back(&m_b); } int start_tag = 100; for (int i = 0; i < m_allocated_vectors.size(); ++i) { VVector *v = m_allocated_vectors[i]; v->tag = start_tag + i; v->set_block_dimy(A.get_block_dimy()); v->set_block_dimx(1); v->dirtybit = 1; v->delayed_send = 1; } A.setView(oldView); } template <class TConfig> void SingleIteration_EigenSolver<TConfig>::solver_pagerank_setup(VVector &a) { if (this->m_which == EIG_PAGERANK) { Matrix<TConfig> *pA = dynamic_cast< Matrix<TConfig>* > (this->m_A); Matrix<TConfig> &A = *pA; ViewType oldView = A.currentView(); A.setViewExterior(); int offset, size; A.getOffsetAndSizeForView(A.getViewExterior(), &offset, &size); copy(a, m_a, offset, size); //Get the number of rows of the matrix int num_rows_loc = A.get_num_rows(); int num_rows_glob = num_rows_loc; // MPI? #ifdef AMGX_WITH_MPI int mpi_initialized = 0; MPI_Initialized(&mpi_initialized); if (mpi_initialized) { if (A.is_matrix_distributed()) { A.getManager()->global_reduce_sum(&num_rows_glob); } } #endif // Requiered to compute b // a = alpha*a + (1-alpha)e update_dangling_nodes(); // b is a constant and uniform vector ValueTypeMat tmp = 1.0 / num_rows_glob; fill(m_b, tmp); A.setView(oldView); } } template <class TConfig> void SingleIteration_EigenSolver<TConfig>::solve_init(VVector &x) { Operator<TConfig> &A = *this->m_A; ViewType oldView = A.currentView(); A.setViewExterior(); int offset, size; A.getOffsetAndSizeForView(A.getViewExterior(), &offset, &size); copy(x, m_x, offset, size); std::swap(m_x, m_v); A.setView(oldView); } // One iteration of the single iteration. // MATLAB code of the algorithm: // while true // V = X / norm(X); // X = linsolve(A, V); // lambda = V' * X; // R = X - lambda * V; // if norm(R) < tol * abs(lambda) // break; // end template <class TConfig> bool SingleIteration_EigenSolver<TConfig>::solve_iteration(VVector &x) { Operator<TConfig> &A = *this->m_A; ViewType oldView = A.currentView(); A.setViewExterior(); int offset, size; A.getOffsetAndSizeForView(A.getViewExterior(), &offset, &size); // V = X / norm(X) scal(m_v, ValueTypeVec(1) / get_norm(A, m_v, this->m_norm_type), offset, size); // X = linsolve(A, v) m_operator->apply(m_v, m_x); if ((this->m_curr_iter % this->m_convergence_check_freq) == 0) { ValueTypeVec lambda; if (this->m_which == EIG_PAGERANK) { // The maximum eigenvalue of G is 1.0 lambda = 1.0; // v = x - v // axpy(m_x, m_v, ValueTypeVec(-1),offset, size); axpby(m_v, m_x, m_v, ValueTypeVec(-1.0), ValueTypeVec(1), offset, size); } else { // lambda = v.x lambda = dot(A, m_v, m_x); // v = x - lambda * v axpby(m_v, m_x, m_v, -lambda, ValueTypeVec(1), offset, size); } ValueTypeVec residual_norm = get_norm(A, m_v, this->m_norm_type); this->m_residuals.push_back(residual_norm / fabs(lambda)); // Check convergence. if (residual_norm < this->m_tolerance * fabs(lambda)) { // Normalize eigenvector. if (this->m_which == EIG_PAGERANK) { //Norm L1 is more fited for the output of PageRank ValueTypeVec norm = get_norm(A, m_x, L1); scal(m_x, ValueTypeVec(1) / norm, offset, size); } else { ValueTypeVec norm = get_norm(A, m_x, this->m_norm_type); scal(m_x, ValueTypeVec(1) / norm, offset, size); } // With inverse iteration we need to scale the eigenvector by the inverse of the eigenvalue, // but this doesn't seems to be needed since we already normalized the eigenvector just above. this->m_eigenvectors.push_back(m_x); copy(m_x, x, offset, size); this->m_eigenvalues.push_back(lambda); this->postprocess_eigenpairs(); return true; } } std::swap(m_x, m_v); A.setView(oldView); return false; } template <class TConfig> void SingleIteration_EigenSolver<TConfig>::solve_finalize() { } // Explicit template instantiation. #define AMGX_CASE_LINE(CASE) template class SingleIteration_EigenSolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE };
the_stack
#include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <math/templatemath.h> #include <ops/declarable/helpers/convolutions.h> namespace sd { namespace ops { ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static SD_KERNEL void avgPooling2dCuda(const void *vx, const sd::LongType *xShapeInfo, void *vz, const sd::LongType *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const auto x = reinterpret_cast<const X *>(vx); auto z = reinterpret_cast<Z *>(vz); __shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff; if (threadIdx.x == 0) { bS = shape::sizeAt(xShapeInfo, 0); iC = shape::sizeAt(xShapeInfo, 1); oH = shape::sizeAt(zShapeInfo, 2); oW = shape::sizeAt(zShapeInfo, 3); iH = shape::sizeAt(xShapeInfo, 2); iW = shape::sizeAt(xShapeInfo, 3); strideB = shape::stride(xShapeInfo)[0]; strideC = shape::stride(xShapeInfo)[1]; strideY = shape::stride(xShapeInfo)[2]; strideX = shape::stride(xShapeInfo)[3]; strideOB = shape::stride(zShapeInfo)[0]; strideOC = shape::stride(zShapeInfo)[1]; strideOY = shape::stride(zShapeInfo)[2]; strideOX = shape::stride(zShapeInfo)[3]; length = shape::length(zShapeInfo); // Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH - 1) * (dH - 1); kWEff = kW + (kW - 1) * (dW - 1); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % oW; const int ph = (index / oW) % oH; const int c = (index / oW / oH) % iC; const int n = index / oW / oH / iC; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; if (hstart < 0) { int f = sd::math::sd_ceil<Z, int>((Z)-hstart / (Z)dH); hstart += f * dH; } if (wstart < 0) { int f = sd::math::sd_ceil<Z, int>((Z)-wstart / (Z)dW); wstart += f * dW; } if (hend > iH) { int f = sd::math::sd_ceil<Z, int>((Z)(hend - iH) / (Z)dH); hend -= f * dH; } if (wend > iW) { int f = sd::math::sd_ceil<Z, int>((Z)(wend - iW) / (Z)dW); wend -= f * dW; } // Accounts for dilation int pool_size = sd::math::sd_ceil<double, int>((double)(hend - hstart) / (double)dH) * sd::math::sd_ceil<double, int>((double)(wend - wstart) / (double)dW); Z sum = 0.0f; const X *inSlice = x + (n * strideB + c * strideC); for (int h = hstart; h < hend; h += dH) for (int w = wstart; w < wend; w += dW) sum += static_cast<Z>(inSlice[h * strideY + w * strideX]); int divide_factor = pool_size; // Case 0: exclude padding if (extraParam0 == 1) // Case 1: include padding divide_factor = kH * kW; z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = sum / static_cast<Z>(divide_factor); } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static void avgPooling2dCudaLauncher(sd::LaunchContext &block, const void *vx, const sd::LongType *vxShapeInfo, void *vz, const sd::LongType *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { avgPooling2dCuda<X, Z><<<512, 512, 4192, *block.getCudaStream()>>>(vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static SD_KERNEL void pnormPooling2dCuda(const void *vx, const sd::LongType *xShapeInfo, void *vz, const sd::LongType *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const auto x = reinterpret_cast<const X *>(vx); auto z = reinterpret_cast<Z *>(vz); __shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff; __shared__ bool fOrder; if (threadIdx.x == 0) { bS = shape::sizeAt(xShapeInfo, 0); iC = shape::sizeAt(xShapeInfo, 1); oH = shape::sizeAt(zShapeInfo, 2); oW = shape::sizeAt(zShapeInfo, 3); iH = shape::sizeAt(xShapeInfo, 2); iW = shape::sizeAt(xShapeInfo, 3); strideB = shape::stride(xShapeInfo)[0]; strideC = shape::stride(xShapeInfo)[1]; strideY = shape::stride(xShapeInfo)[2]; strideX = shape::stride(xShapeInfo)[3]; strideOB = shape::stride(zShapeInfo)[0]; strideOC = shape::stride(zShapeInfo)[1]; strideOY = shape::stride(zShapeInfo)[2]; strideOX = shape::stride(zShapeInfo)[3]; length = shape::length(zShapeInfo); // Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH - 1) * (dH - 1); kWEff = kW + (kW - 1) * (dW - 1); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % oW; const int ph = (index / oW) % oH; const int c = (index / oW / oH) % iC; const int n = index / oW / oH / iC; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; if (hstart < 0) { int f = sd::math::sd_ceil<Z, int>((Z)-hstart / (Z)dH); hstart += f * dH; } if (wstart < 0) { int f = sd::math::sd_ceil<Z, int>((Z)-wstart / (Z)dW); wstart += f * dW; } if (hend > iH) { int f = sd::math::sd_ceil<Z, int>((Z)(hend - iH) / (Z)dH); hend -= f * dH; } if (wend > iW) { int f = sd::math::sd_ceil<Z, int>((Z)(wend - iW) / (Z)dW); wend -= f * dW; } // Accounts for dilation int pool_size = sd::math::sd_ceil<double, int>((double)(hend - hstart) / (double)dH) * sd::math::sd_ceil<double, int>((double)(wend - wstart) / (double)dW); Z sum = 0.f; const X *inSlice = x + (n * strideB + c * strideC); for (int h = hstart; h < hend; h += dH) for (int w = wstart; w < wend; w += dW) sum += sd::math::sd_pow<Z, Z, Z>(static_cast<Z>(sd::math::sd_abs<X>(inSlice[h * strideY + w * strideX])), extraParam0); z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = sd::math::sd_pow<Z, Z, Z>(sum, (Z)1.0f / extraParam0); } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static void pnormPooling2dCudaLauncher(sd::LaunchContext &block, const void *vx, const sd::LongType *vxShapeInfo, void *vz, const sd::LongType *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { pnormPooling2dCuda<X, Z><<<512, 512, 4192, *block.getCudaStream()>>>(vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0); } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static SD_KERNEL void maxPooling2dCuda(const void *vx, const sd::LongType *xShapeInfo, void *vz, const sd::LongType *zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { // input is [bS, iC, iH, iW] // output is [bS, iC, oH, oW] const auto x = reinterpret_cast<const X *>(vx); auto z = reinterpret_cast<Z *>(vz); __shared__ int bS, iC, oH, oW, iH, iW, strideB, strideC, strideY, strideX, strideOB, strideOC, strideOY, strideOX, length, kHEff, kWEff; __shared__ bool fOrder; if (threadIdx.x == 0) { bS = shape::sizeAt(xShapeInfo, 0); iC = shape::sizeAt(xShapeInfo, 1); oH = shape::sizeAt(zShapeInfo, 2); oW = shape::sizeAt(zShapeInfo, 3); iH = shape::sizeAt(xShapeInfo, 2); iW = shape::sizeAt(xShapeInfo, 3); strideB = shape::stride(xShapeInfo)[0]; strideC = shape::stride(xShapeInfo)[1]; strideY = shape::stride(xShapeInfo)[2]; strideX = shape::stride(xShapeInfo)[3]; strideOB = shape::stride(zShapeInfo)[0]; strideOC = shape::stride(zShapeInfo)[1]; strideOY = shape::stride(zShapeInfo)[2]; strideOX = shape::stride(zShapeInfo)[3]; length = shape::length(zShapeInfo); // Replace kernel H/W with *effective* kernel H/W accounting for dilatyon kHEff = kH + (kH - 1) * (dH - 1); kWEff = kW + (kW - 1) * (dW - 1); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int index = tid; index < length; index += blockDim.x * gridDim.x) { const int pw = index % oW; const int ph = (index / oW) % oH; const int c = (index / oW / oH) % iC; const int n = index / oW / oH / iC; int hstart = sH * ph - pH; int wstart = sW * pw - pW; int hend = hstart + kHEff; int wend = wstart + kWEff; if (hstart < 0) { int f = sd::math::sd_ceil<Z, int>((Z)-hstart / (Z)dH); hstart += f * dH; } if (wstart < 0) { int f = sd::math::sd_ceil<Z, int>((Z)-wstart / (Z)dW); wstart += f * dW; } if (hend > iH) { int f = sd::math::sd_ceil<Z, int>((Z)(hend - iH) / (Z)dH); hend -= f * dH; } if (wend > iW) { int f = sd::math::sd_ceil<Z, int>((Z)(wend - iW) / (Z)dW); wend -= f * dW; } // Accounts for dilation int pool_size = sd::math::sd_ceil<double, int>((double)(hend - hstart) / (double)dH) * sd::math::sd_ceil<double, int>((double)(wend - wstart) / (double)dW); Z max = -sd::DataTypeUtils::max<Z>(); const X *inSlice = x + (n * strideB + c * strideC); for (int h = hstart; h < hend; h += dH) { for (int w = wstart; w < wend; w += dW) { Z v = static_cast<Z>(inSlice[h * strideY + w * strideX]); if (v > max) max = v; } } z[n * strideOB + c * strideOC + pw * strideOX + ph * strideOY] = max; } } ////////////////////////////////////////////////////////////////////////// template <typename X, typename Z> static void maxPooling2dCudaLauncher(sd::LaunchContext &block, const void *vx, const sd::LongType *vxShapeInfo, void *vz, const sd::LongType *vzShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int extraParam0) { maxPooling2dCuda<X, Z><<<512, 512, 4192, *block.getCudaStream()>>>(vx, vxShapeInfo, vz, vzShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, extraParam0); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::pooling2d(sd::graph::Context &block, const NDArray &input, NDArray &output, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const PoolingType poolingMode, const int extraParam0) { if (!input.isActualOnDeviceSide()) input.syncToDevice(); switch (poolingMode) { case MAX_POOL: { BUILD_SINGLE_SELECTOR_TWICE( input.dataType(), maxPooling2dCudaLauncher, (*block.launchContext(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), SD_NUMERIC_TYPES); } break; case AVG_POOL: { BUILD_SINGLE_SELECTOR_TWICE( input.dataType(), avgPooling2dCudaLauncher, (*block.launchContext(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), SD_NUMERIC_TYPES); } break; case PNORM_POOL: { BUILD_SINGLE_SELECTOR_TWICE( input.dataType(), pnormPooling2dCudaLauncher, (*block.launchContext(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, extraParam0), SD_FLOAT_TYPES); } break; default: throw std::runtime_error("Pooling2D: Unknown PoolingType used"); } output.tickWriteDevice(); input.tickReadDevice(); auto result = cudaStreamSynchronize(*block.launchContext()->getCudaStream()); if (result != 0) throw cuda_exception::build("Pooling2D failed", result); } } // namespace ops } // namespace sd
the_stack
#include "cuda_helper.h" #include "miner.h" #ifdef __INTELLISENSE__ #define __CUDA_ARCH__ 500 #define __funnelshift_r(x,y,n) (x >> n) #define atomicExch(p,x) x #endif // 64 Registers Variant for Compute 3.0 #include "quark/groestl_functions_quad.h" #include "quark/groestl_transf_quad.h" // globaler Speicher für alle HeftyHashes aller Threads static uint32_t *d_outputHashes[MAX_GPUS]; __constant__ uint32_t _ALIGN(8) c_input[32]; // muss expandiert werden __constant__ const uint32_t sha256_constantTable[64] = { 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2 }; __constant__ const uint32_t sha256_constantTable2[64] = { 0xC28A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF374, 0x649B69C1, 0xF0FE4786, 0x0FE1EDC6, 0x240CF254, 0x4FE9346F, 0x6CC984BE, 0x61B9411E, 0x16F988FA, 0xF2C65152, 0xA88E5A6D, 0xB019FC65, 0xB9D99EC7, 0x9A1231C3, 0xE70EEAA0, 0xFDB1232B, 0xC7353EB0, 0x3069BAD5, 0xCB976D5F, 0x5A0F118F, 0xDC1EEEFD, 0x0A35B689, 0xDE0B7A04, 0x58F4CA9D, 0xE15D5B16, 0x007F3E86, 0x37088980, 0xA507EA32, 0x6FAB9537, 0x17406110, 0x0D8CD6F1, 0xCDAA3B6D, 0xC0BBBE37, 0x83613BDA, 0xDB48A363, 0x0B02E931, 0x6FD15CA7, 0x521AFACA, 0x31338431, 0x6ED41A95, 0x6D437890, 0xC39C91F2, 0x9ECCABBD, 0xB5C9A0E6, 0x532FB63C, 0xD2C741C6, 0x07237EA3, 0xA4954B68, 0x4C191D76 }; #define Ch(a, b, c) (((b^c) & a) ^ c) #define Maj(x, y, z) ((x & (y | z)) | (y & z)) //((b) & (c)) | (((b) | (c)) & (a)); //andor32(a,b,c); #define xor3b(a,b,c) ((a ^ b) ^ c) __device__ __forceinline__ uint32_t bsg2_0(const uint32_t x) { return xor3b(ROTR32(x,2),ROTR32(x,13),ROTR32(x,22)); } __device__ __forceinline__ uint32_t bsg2_1(const uint32_t x) { return xor3b(ROTR32(x,6),ROTR32(x,11),ROTR32(x,25)); } __device__ __forceinline__ uint32_t ssg2_0(const uint32_t x) { return xor3b(ROTR32(x,7),ROTR32(x,18),(x>>3)); } __device__ __forceinline__ uint32_t ssg2_1(const uint32_t x) { return xor3b(ROTR32(x,17),ROTR32(x,19),(x>>10)); } __device__ __forceinline__ static void sha2_step1(const uint32_t a,const uint32_t b,const uint32_t c, uint32_t &d,const uint32_t e,const uint32_t f,const uint32_t g, uint32_t &h,const uint32_t in, const uint32_t Kshared) { const uint32_t t1 = h + bsg2_1(e) + Ch(e, f, g) + Kshared + in; h = t1 + bsg2_0(a) + Maj(a, b, c); d+= t1; } __device__ __forceinline__ static void sha2_step2(const uint32_t a,const uint32_t b,const uint32_t c, uint32_t &d,const uint32_t e,const uint32_t f,const uint32_t g, uint32_t &h, const uint32_t Kshared) { const uint32_t t1 = h + bsg2_1(e) + Ch(e, f, g) + Kshared; h = t1 + bsg2_0(a) + Maj(a, b, c); d+= t1; } __device__ __forceinline__ static void sha256_round_body(uint32_t* in, uint32_t* state,const uint32_t* __restrict__ Kshared) { uint32_t a = state[0]; uint32_t b = state[1]; uint32_t c = state[2]; uint32_t d = state[3]; uint32_t e = state[4]; uint32_t f = state[5]; uint32_t g = state[6]; uint32_t h = state[7]; sha2_step1(a,b,c,d,e,f,g,h,in[0], Kshared[0]); sha2_step1(h,a,b,c,d,e,f,g,in[1], Kshared[1]); sha2_step1(g,h,a,b,c,d,e,f,in[2], Kshared[2]); sha2_step1(f,g,h,a,b,c,d,e,in[3], Kshared[3]); sha2_step1(e,f,g,h,a,b,c,d,in[4], Kshared[4]); sha2_step1(d,e,f,g,h,a,b,c,in[5], Kshared[5]); sha2_step1(c,d,e,f,g,h,a,b,in[6], Kshared[6]); sha2_step1(b,c,d,e,f,g,h,a,in[7], Kshared[7]); sha2_step1(a,b,c,d,e,f,g,h,in[8], Kshared[8]); sha2_step1(h,a,b,c,d,e,f,g,in[9], Kshared[9]); sha2_step1(g,h,a,b,c,d,e,f,in[10],Kshared[10]); sha2_step1(f,g,h,a,b,c,d,e,in[11],Kshared[11]); sha2_step1(e,f,g,h,a,b,c,d,in[12],Kshared[12]); sha2_step1(d,e,f,g,h,a,b,c,in[13],Kshared[13]); sha2_step1(c,d,e,f,g,h,a,b,in[14],Kshared[14]); sha2_step1(b,c,d,e,f,g,h,a,in[15],Kshared[15]); #pragma unroll 3 for (int i=0; i<3; i++) { #pragma unroll 16 for (int j = 0; j < 16; j++){ in[j] = in[j] + in[(j + 9) & 15] + ssg2_0(in[(j + 1) & 15]) + ssg2_1(in[(j + 14) & 15]); } sha2_step1(a, b, c, d, e, f, g, h, in[0], Kshared[16 + 16 * i]); sha2_step1(h, a, b, c, d, e, f, g, in[1], Kshared[17 + 16 * i]); sha2_step1(g, h, a, b, c, d, e, f, in[2], Kshared[18 + 16 * i]); sha2_step1(f, g, h, a, b, c, d, e, in[3], Kshared[19 + 16 * i]); sha2_step1(e, f, g, h, a, b, c, d, in[4], Kshared[20 + 16 * i]); sha2_step1(d, e, f, g, h, a, b, c, in[5], Kshared[21 + 16 * i]); sha2_step1(c, d, e, f, g, h, a, b, in[6], Kshared[22 + 16 * i]); sha2_step1(b, c, d, e, f, g, h, a, in[7], Kshared[23 + 16 * i]); sha2_step1(a, b, c, d, e, f, g, h, in[8], Kshared[24 + 16 * i]); sha2_step1(h, a, b, c, d, e, f, g, in[9], Kshared[25 + 16 * i]); sha2_step1(g, h, a, b, c, d, e, f, in[10], Kshared[26 + 16 * i]); sha2_step1(f, g, h, a, b, c, d, e, in[11], Kshared[27 + 16 * i]); sha2_step1(e, f, g, h, a, b, c, d, in[12], Kshared[28 + 16 * i]); sha2_step1(d, e, f, g, h, a, b, c, in[13], Kshared[29 + 16 * i]); sha2_step1(c, d, e, f, g, h, a, b, in[14], Kshared[30 + 16 * i]); sha2_step1(b, c, d, e, f, g, h, a, in[15], Kshared[31 + 16 * i]); } state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; } __device__ __forceinline__ static void sha256_round_body_final(uint32_t* state,const uint32_t* Kshared) { uint32_t a = state[0]; uint32_t b = state[1]; uint32_t c = state[2]; uint32_t d = state[3]; uint32_t e = state[4]; uint32_t f = state[5]; uint32_t g = state[6]; uint32_t h = state[7]; sha2_step2(a,b,c,d,e,f,g,h, Kshared[0]); sha2_step2(h,a,b,c,d,e,f,g, Kshared[1]); sha2_step2(g,h,a,b,c,d,e,f, Kshared[2]); sha2_step2(f,g,h,a,b,c,d,e, Kshared[3]); sha2_step2(e,f,g,h,a,b,c,d, Kshared[4]); sha2_step2(d,e,f,g,h,a,b,c, Kshared[5]); sha2_step2(c,d,e,f,g,h,a,b, Kshared[6]); sha2_step2(b,c,d,e,f,g,h,a, Kshared[7]); sha2_step2(a,b,c,d,e,f,g,h, Kshared[8]); sha2_step2(h,a,b,c,d,e,f,g, Kshared[9]); sha2_step2(g,h,a,b,c,d,e,f, Kshared[10]); sha2_step2(f,g,h,a,b,c,d,e, Kshared[11]); sha2_step2(e,f,g,h,a,b,c,d, Kshared[12]); sha2_step2(d,e,f,g,h,a,b,c, Kshared[13]); sha2_step2(c,d,e,f,g,h,a,b, Kshared[14]); sha2_step2(b,c,d,e,f,g,h,a, Kshared[15]); #pragma unroll for (int i=0; i<2; i++){ sha2_step2(a, b, c, d, e, f, g, h, Kshared[16 + 16 * i]); sha2_step2(h, a, b, c, d, e, f, g, Kshared[17 + 16 * i]); sha2_step2(g, h, a, b, c, d, e, f, Kshared[18 + 16 * i]); sha2_step2(f, g, h, a, b, c, d, e, Kshared[19 + 16 * i]); sha2_step2(e, f, g, h, a, b, c, d, Kshared[20 + 16 * i]); sha2_step2(d, e, f, g, h, a, b, c, Kshared[21 + 16 * i]); sha2_step2(c, d, e, f, g, h, a, b, Kshared[22 + 16 * i]); sha2_step2(b, c, d, e, f, g, h, a, Kshared[23 + 16 * i]); sha2_step2(a, b, c, d, e, f, g, h, Kshared[24 + 16 * i]); sha2_step2(h, a, b, c, d, e, f, g, Kshared[25 + 16 * i]); sha2_step2(g, h, a, b, c, d, e, f, Kshared[26 + 16 * i]); sha2_step2(f, g, h, a, b, c, d, e, Kshared[27 + 16 * i]); sha2_step2(e, f, g, h, a, b, c, d, Kshared[28 + 16 * i]); sha2_step2(d, e, f, g, h, a, b, c, Kshared[29 + 16 * i]); sha2_step2(c, d, e, f, g, h, a, b, Kshared[30 + 16 * i]); sha2_step2(b, c, d, e, f, g, h, a, Kshared[31 + 16 * i]); } sha2_step2(a, b, c, d, e, f, g, h, Kshared[16 + 16 * 2]); sha2_step2(h, a, b, c, d, e, f, g, Kshared[17 + 16 * 2]); sha2_step2(g, h, a, b, c, d, e, f, Kshared[18 + 16 * 2]); sha2_step2(f, g, h, a, b, c, d, e, Kshared[19 + 16 * 2]); sha2_step2(e, f, g, h, a, b, c, d, Kshared[20 + 16 * 2]); sha2_step2(d, e, f, g, h, a, b, c, Kshared[21 + 16 * 2]); sha2_step2(c, d, e, f, g, h, a, b, Kshared[22 + 16 * 2]); sha2_step2(b, c, d, e, f, g, h, a, Kshared[23 + 16 * 2]); sha2_step2(a, b, c, d, e, f, g, h, Kshared[24 + 16 * 2]); sha2_step2(h, a, b, c, d, e, f, g, Kshared[25 + 16 * 2]); sha2_step2(g, h, a, b, c, d, e, f, Kshared[26 + 16 * 2]); sha2_step2(f, g, h, a, b, c, d, e, Kshared[27 + 16 * 2]); sha2_step2(e, f, g, h, a, b, c, d, Kshared[28 + 16 * 2]); sha2_step2(d, e, f, g, h, a, b, c, Kshared[29 + 16 * 2]); state[6]+= g; state[7]+= h; } __global__ #if __CUDA_ARCH__ > 500 __launch_bounds__(1024,2) /* to force 32 regs */ #else __launch_bounds__(768,2) /* to force 32 regs */ #endif void myriadgroestl_gpu_hash_sha(uint32_t threads, uint32_t startNounce, uint32_t* hashBuffer, uint32_t *resNonces,const uint64_t target64){ const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { uint32_t W[16]; uint32_t *inpHash = &hashBuffer[thread<<4]; *(uint2x4*)&W[ 0] = __ldg4((uint2x4*)&inpHash[ 0]); *(uint2x4*)&W[ 8] = __ldg4((uint2x4*)&inpHash[ 8]); uint32_t buf[ 8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; sha256_round_body(W,buf,sha256_constantTable); sha256_round_body_final(buf,sha256_constantTable2); #if 0 // Full sha hash #pragma unroll for(int k=0; k<8; k++) W[k] = cuda_swab32(buf[k]); #else W[6] = cuda_swab32(buf[6]); W[7] = cuda_swab32(buf[7]); #endif if (*(uint64_t*)&W[6] <= target64){ uint32_t tmp = atomicExch(&resNonces[0], startNounce + thread); if (tmp != UINT32_MAX) resNonces[1] = tmp; } } } #define TPB52 512 #define TPB50 512 #define THF 4 __global__ #if __CUDA_ARCH__ > 500 __launch_bounds__(TPB52, 2) #else __launch_bounds__(TPB50, 2) #endif void myriadgroestl_gpu_hash_quad(uint32_t threads, uint32_t startNounce, uint32_t *d_hash){ // durch 4 dividieren, weil jeweils 4 Threads zusammen ein Hash berechnen const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x)>>2; if (thread < threads) { const uint32_t thr = threadIdx.x & 3; // GROESTL uint32_t input[8]; uint32_t other[8]; uint32_t msgBitsliced[8]; uint32_t state[8]; uint32_t output[16]; *(uint2x4*)input = *(uint2x4*)&c_input[((threadIdx.x & 2)<<3)]; *(uint2x4*)other = *(uint2x4*)&c_input[(((threadIdx.x+1)&3)<<3)]; #pragma unroll 8 for(int k=0; k<8; k++){ // input[k] = c_input[k+((threadIdx.x & 2)<<3)]; // other[k] = c_input[k+(((threadIdx.x+1)&3)<<3)]; other[k] = __shfl(other[k], threadIdx.x & 2, 4); } if ((thr == 2) || (thr == 3)) other[4] = cuda_swab32(startNounce + thread); uint32_t t; const uint32_t perm = (threadIdx.x & 1) ? 0x7362 : 0x5140; merge8(msgBitsliced[0], input[0], input[4], perm); merge8(msgBitsliced[1], other[0], other[4], perm); merge8(msgBitsliced[2], input[1], input[5], perm); merge8(msgBitsliced[3], other[1], other[5], perm); merge8(msgBitsliced[4], input[2], input[6], perm); merge8(msgBitsliced[5], other[2], other[6], perm); merge8(msgBitsliced[6], input[3], input[7], perm); merge8(msgBitsliced[7], other[3], other[7], perm); SWAP1(msgBitsliced[0], msgBitsliced[1]); SWAP1(msgBitsliced[2], msgBitsliced[3]); SWAP1(msgBitsliced[4], msgBitsliced[5]); SWAP1(msgBitsliced[6], msgBitsliced[7]); SWAP2(msgBitsliced[0], msgBitsliced[2]); SWAP2(msgBitsliced[1], msgBitsliced[3]); SWAP2(msgBitsliced[4], msgBitsliced[6]); SWAP2(msgBitsliced[5], msgBitsliced[7]); SWAP4(msgBitsliced[0], msgBitsliced[4]); SWAP4(msgBitsliced[1], msgBitsliced[5]); SWAP4(msgBitsliced[2], msgBitsliced[6]); SWAP4(msgBitsliced[3], msgBitsliced[7]); groestl512_progressMessage_quad(state, msgBitsliced,thr); from_bitslice_quad52(state, output); uint2x4* outHash = (uint2x4*)&d_hash[thread<<4]; #if __CUDA_ARCH__ <= 500 output[0] = __byte_perm(output[0], __shfl(output[0], (threadIdx.x + 1) & 3, 4), 0x0167); output[2] = __byte_perm(output[2], __shfl(output[2], (threadIdx.x + 1) & 3, 4), 0x0167); output[4] = __byte_perm(output[4], __shfl(output[4], (threadIdx.x + 1) & 3, 4), 0x2367); output[6] = __byte_perm(output[6], __shfl(output[6], (threadIdx.x + 1) & 3, 4), 0x2367); output[8] = __byte_perm(output[8], __shfl(output[8], (threadIdx.x + 1) & 3, 4), 0x0167); output[10] = __byte_perm(output[10], __shfl(output[10], (threadIdx.x + 1) & 3, 4), 0x0167); output[12] = __byte_perm(output[12], __shfl(output[12], (threadIdx.x + 1) & 3, 4), 0x2367); output[14] = __byte_perm(output[14], __shfl(output[14], (threadIdx.x + 1) & 3, 4), 0x2367); if (thr == 0 || thr == 2){ output[0 + 1] = __shfl(output[0], (threadIdx.x + 2) & 3, 4); output[2 + 1] = __shfl(output[2], (threadIdx.x + 2) & 3, 4); output[4 + 1] = __shfl(output[4], (threadIdx.x + 2) & 3, 4); output[6 + 1] = __shfl(output[6], (threadIdx.x + 2) & 3, 4); output[8 + 1] = __shfl(output[8], (threadIdx.x + 2) & 3, 4); output[10 + 1] = __shfl(output[10], (threadIdx.x + 2) & 3, 4); output[12 + 1] = __shfl(output[12], (threadIdx.x + 2) & 3, 4); output[14 + 1] = __shfl(output[14], (threadIdx.x + 2) & 3, 4); if(thr==0){ outHash[0] = *(uint2x4*)&output[0]; outHash[1] = *(uint2x4*)&output[8]; } } #else output[ 0] = __byte_perm(output[0], __shfl(output[0], (threadIdx.x + 1) & 3, 4), 0x0167); output[ 1] = __shfl(output[0], (threadIdx.x + 2) & 3, 4); output[ 2] = __byte_perm(output[2], __shfl(output[2], (threadIdx.x + 1) & 3, 4), 0x0167); output[ 3] = __shfl(output[2], (threadIdx.x + 2) & 3, 4); output[ 4] = __byte_perm(output[4], __shfl(output[4], (threadIdx.x + 1) & 3, 4), 0x2367); output[ 5] = __shfl(output[4], (threadIdx.x + 2) & 3, 4); output[ 6] = __byte_perm(output[6], __shfl(output[6], (threadIdx.x + 1) & 3, 4), 0x2367); output[ 7] = __shfl(output[6], (threadIdx.x + 2) & 3, 4); output[ 8] = __byte_perm(output[8], __shfl(output[8], (threadIdx.x + 1) & 3, 4), 0x0167); output[ 9] = __shfl(output[8], (threadIdx.x + 2) & 3, 4); output[10] = __byte_perm(output[10], __shfl(output[10], (threadIdx.x + 1) & 3, 4), 0x0167); output[11] = __shfl(output[10], (threadIdx.x + 2) & 3, 4); output[12] = __byte_perm(output[12], __shfl(output[12], (threadIdx.x + 1) & 3, 4), 0x2367); output[13] = __shfl(output[12], (threadIdx.x + 2) & 3, 4); output[14] = __byte_perm(output[14], __shfl(output[14], (threadIdx.x + 1) & 3, 4), 0x2367); output[15] = __shfl(output[14], (threadIdx.x + 2) & 3, 4); if(thr==0){ outHash[0] = *(uint2x4*)&output[0]; outHash[1] = *(uint2x4*)&output[8]; } #endif } } // Setup Function __host__ void myriadgroestl_cpu_init(int thr_id, uint32_t threads) { CUDA_SAFE_CALL(cudaMalloc(&d_outputHashes[thr_id], (size_t) 64 * threads)); } __host__ void myriadgroestl_cpu_free(int thr_id) { cudaFree(d_outputHashes[thr_id]); } __host__ void myriadgroestl_cpu_setBlock(int thr_id, void *data){ uint32_t msgBlock[32] = { 0 }; uint32_t paddedInput[32]; memcpy(&msgBlock[0], data, 80); msgBlock[20] = 0x80; msgBlock[31] = 0x01000000; for(int thr=0;thr<4;thr++) for(int k=0; k<8; k++) paddedInput[k+(thr<<3)] = msgBlock[4*k+thr]; for(int k=0;k<8;k++){ uint32_t temp = paddedInput[k+(1<<3)]; paddedInput[k+(1<<3)] = paddedInput[k+(2<<3)]; paddedInput[k+(2<<3)] = temp; } cudaMemcpyToSymbol(c_input, paddedInput, 128); } __host__ void myriadgroestl_cpu_hash(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_resNounce, const uint64_t target) { // Compute 3.0 benutzt die registeroptimierte Quad Variante mit Warp Shuffle // mit den Quad Funktionen brauchen wir jetzt 4 threads pro Hash, daher Faktor 4 bei der Blockzahl uint32_t tpb = TPB52; int dev_id = device_map[thr_id]; if (device_sm[dev_id] <= 500) tpb = TPB50; const dim3 grid((THF*threads + tpb-1)/tpb); const dim3 block(tpb); myriadgroestl_gpu_hash_quad <<< grid, block >>> (threads, startNounce, d_outputHashes[thr_id]); tpb = (device_sm[dev_id] <= 500) ? 768 : 1024; dim3 grid2((threads + tpb - 1) / tpb); dim3 block2(tpb); myriadgroestl_gpu_hash_sha <<< grid2, block2 >>> (threads, startNounce, d_outputHashes[thr_id], d_resNounce, target); }
the_stack
// CUDA libraries #include <hip/hip_runtime.h> /** Problem size along one side; total number of cells is this squared */ #define NUM 512 // block size #define BLOCK_SIZE 128 /** Double precision */ #define DOUBLE #ifdef DOUBLE #define Real double #define ZERO 0.0 #define ONE 1.0 #define TWO 2.0 #define FOUR 4.0 #define SMALL 1.0e-10; /** Reynolds number */ const Real Re_num = 1000.0; /** SOR relaxation parameter */ const Real omega = 1.7; /** Discretization mixture parameter (gamma) */ const Real mix_param = 0.9; /** Safety factor for time step modification */ const Real tau = 0.5; /** Body forces in x- and y- directions */ const Real gx = 0.0; const Real gy = 0.0; /** Domain size (non-dimensional) */ #define xLength 1.0 #define yLength 1.0 #else #define Real float // replace double functions with float versions #undef fmin #define fmin fminf #undef fmax #define fmax fmaxf #undef fabs #define fabs fabsf #undef sqrt #define sqrt sqrtf #define ZERO 0.0f #define ONE 1.0f #define TWO 2.0f #define FOUR 4.0f #define SMALL 1.0e-10f; /** Reynolds number */ const Real Re_num = 1000.0f; /** SOR relaxation parameter */ const Real omega = 1.7f; /** Discretization mixture parameter (gamma) */ const Real mix_param = 0.9f; /** Safety factor for time step modification */ const Real tau = 0.5f; /** Body forces in x- and y- directions */ const Real gx = 0.0f; const Real gy = 0.0f; /** Domain size (non-dimensional) */ #define xLength 1.0f #define yLength 1.0f #endif /** Mesh sizes */ const Real dx = xLength / NUM; const Real dy = yLength / NUM; /** Max macro (type safe, from GNU) */ //#define MAX(a,b) ({ __typeof__ (a) _a = (a); __typeof__ (b) _b = (b); _a > _b ? _a : _b; }) /** Min macro (type safe) */ //#define MIN(a,b) ({ __typeof__ (a) _a = (a); __typeof__ (b) _b = (b); _a < _b ? _a : _b; }) // map two-dimensional indices to one-dimensional memory #define u(I, J) u[((I) * ((NUM) + 2)) + (J)] #define v(I, J) v[((I) * ((NUM) + 2)) + (J)] #define F(I, J) F[((I) * ((NUM) + 2)) + (J)] #define G(I, J) G[((I) * ((NUM) + 2)) + (J)] #define pres_red(I, J) pres_red[((I) * ((NUM_2) + 2)) + (J)] #define pres_black(I, J) pres_black[((I) * ((NUM_2) + 2)) + (J)] /////////////////////////////////////////////////////////////////////////////// __host__ void set_BCs_host (Real* u, Real* v) { int ind; // loop through rows and columns for (ind = 0; ind < NUM + 2; ++ind) { // left boundary u(0, ind) = ZERO; v(0, ind) = -v(1, ind); // right boundary u(NUM, ind) = ZERO; v(NUM + 1, ind) = -v(NUM, ind); // bottom boundary u(ind, 0) = -u(ind, 1); v(ind, 0) = ZERO; // top boundary u(ind, NUM + 1) = TWO - u(ind, NUM); v(ind, NUM) = ZERO; if (ind == NUM) { // left boundary u(0, 0) = ZERO; v(0, 0) = -v(1, 0); u(0, NUM + 1) = ZERO; v(0, NUM + 1) = -v(1, NUM + 1); // right boundary u(NUM, 0) = ZERO; v(NUM + 1, 0) = -v(NUM, 0); u(NUM, NUM + 1) = ZERO; v(NUM + 1, NUM + 1) = -v(NUM, NUM + 1); // bottom boundary u(0, 0) = -u(0, 1); v(0, 0) = ZERO; u(NUM + 1, 0) = -u(NUM + 1, 1); v(NUM + 1, 0) = ZERO; // top boundary u(0, NUM + 1) = TWO - u(0, NUM); v(0, NUM) = ZERO; u(NUM + 1, NUM + 1) = TWO - u(NUM + 1, NUM); v(ind, NUM + 1) = ZERO; } // end if } // end for } // end set_BCs_host /////////////////////////////////////////////////////////////////////////////// __global__ void set_BCs (Real* u, Real* v) { int ind = (blockIdx.x * blockDim.x) + threadIdx.x + 1; // left boundary u(0, ind) = ZERO; v(0, ind) = -v(1, ind); // right boundary u(NUM, ind) = ZERO; v(NUM + 1, ind) = -v(NUM, ind); // bottom boundary u(ind, 0) = -u(ind, 1); v(ind, 0) = ZERO; // top boundary u(ind, NUM + 1) = TWO - u(ind, NUM); v(ind, NUM) = ZERO; if (ind == NUM) { // left boundary u(0, 0) = ZERO; v(0, 0) = -v(1, 0); u(0, NUM + 1) = ZERO; v(0, NUM + 1) = -v(1, NUM + 1); // right boundary u(NUM, 0) = ZERO; v(NUM + 1, 0) = -v(NUM, 0); u(NUM, NUM + 1) = ZERO; v(NUM + 1, NUM + 1) = -v(NUM, NUM + 1); // bottom boundary u(0, 0) = -u(0, 1); v(0, 0) = ZERO; u(NUM + 1, 0) = -u(NUM + 1, 1); v(NUM + 1, 0) = ZERO; // top boundary u(0, NUM + 1) = TWO - u(0, NUM); v(0, NUM) = ZERO; u(NUM + 1, NUM + 1) = TWO - u(NUM + 1, NUM); v(ind, NUM + 1) = ZERO; } // end if } // end set_BCs /////////////////////////////////////////////////////////////////////////////// __global__ void calculate_F (const Real dt, const Real* u, const Real* v, Real* F) { int row = (blockIdx.x * blockDim.x) + threadIdx.x + 1; int col = (blockIdx.y * blockDim.y) + threadIdx.y + 1; if (col == NUM) { // right boundary, F_ij = u_ij // also do left boundary F(0, row) = u(0, row); F(NUM, row) = u(NUM, row); } else { // u velocities Real u_ij = u(col, row); Real u_ip1j = u(col + 1, row); Real u_ijp1 = u(col, row + 1); Real u_im1j = u(col - 1, row); Real u_ijm1 = u(col, row - 1); // v velocities Real v_ij = v(col, row); Real v_ip1j = v(col + 1, row); Real v_ijm1 = v(col, row - 1); Real v_ip1jm1 = v(col + 1, row - 1); // finite differences Real du2dx, duvdy, d2udx2, d2udy2; du2dx = (((u_ij + u_ip1j) * (u_ij + u_ip1j) - (u_im1j + u_ij) * (u_im1j + u_ij)) + mix_param * (fabs(u_ij + u_ip1j) * (u_ij - u_ip1j) - fabs(u_im1j + u_ij) * (u_im1j - u_ij))) / (FOUR * dx); duvdy = ((v_ij + v_ip1j) * (u_ij + u_ijp1) - (v_ijm1 + v_ip1jm1) * (u_ijm1 + u_ij) + mix_param * (fabs(v_ij + v_ip1j) * (u_ij - u_ijp1) - fabs(v_ijm1 + v_ip1jm1) * (u_ijm1 - u_ij))) / (FOUR * dy); d2udx2 = (u_ip1j - (TWO * u_ij) + u_im1j) / (dx * dx); d2udy2 = (u_ijp1 - (TWO * u_ij) + u_ijm1) / (dy * dy); F(col, row) = u_ij + dt * (((d2udx2 + d2udy2) / Re_num) - du2dx - duvdy + gx); } // end if } // end calculate_F /////////////////////////////////////////////////////////////////////////////// __global__ void calculate_G (const Real dt, const Real* u, const Real* v, Real* G) { int row = (blockIdx.x * blockDim.x) + threadIdx.x + 1; int col = (blockIdx.y * blockDim.y) + threadIdx.y + 1; if (row == NUM) { // top and bottom boundaries G(col, 0) = v(col, 0); G(col, NUM) = v(col, NUM); } else { // u velocities Real u_ij = u(col, row); Real u_ijp1 = u(col, row + 1); Real u_im1j = u(col - 1, row); Real u_im1jp1 = u(col - 1, row + 1); // v velocities Real v_ij = v(col, row); Real v_ijp1 = v(col, row + 1); Real v_ip1j = v(col + 1, row); Real v_ijm1 = v(col, row - 1); Real v_im1j = v(col - 1, row); // finite differences Real dv2dy, duvdx, d2vdx2, d2vdy2; dv2dy = ((v_ij + v_ijp1) * (v_ij + v_ijp1) - (v_ijm1 + v_ij) * (v_ijm1 + v_ij) + mix_param * (fabs(v_ij + v_ijp1) * (v_ij - v_ijp1) - fabs(v_ijm1 + v_ij) * (v_ijm1 - v_ij))) / (FOUR * dy); duvdx = ((u_ij + u_ijp1) * (v_ij + v_ip1j) - (u_im1j + u_im1jp1) * (v_im1j + v_ij) + mix_param * (fabs(u_ij + u_ijp1) * (v_ij - v_ip1j) - fabs(u_im1j + u_im1jp1) * (v_im1j - v_ij))) / (FOUR * dx); d2vdx2 = (v_ip1j - (TWO * v_ij) + v_im1j) / (dx * dx); d2vdy2 = (v_ijp1 - (TWO * v_ij) + v_ijm1) / (dy * dy); G(col, row) = v_ij + dt * (((d2vdx2 + d2vdy2) / Re_num) - dv2dy - duvdx + gy); } // end if } // end calculate_G /////////////////////////////////////////////////////////////////////////////// __global__ void sum_pressure (const Real* pres_red, const Real* pres_black, Real* pres_sum) { int row = (blockIdx.x * blockDim.x) + threadIdx.x + 1; int col = (blockIdx.y * blockDim.y) + threadIdx.y + 1; // shared memory for block's sum __shared__ Real sum_cache[BLOCK_SIZE]; int NUM_2 = NUM >> 1; Real pres_r = pres_red(col, row); Real pres_b = pres_black(col, row); // add squared pressure sum_cache[threadIdx.x] = (pres_r * pres_r) + (pres_b * pres_b); // synchronize threads in block to ensure all thread values stored __syncthreads(); // add up values for block int i = BLOCK_SIZE >> 1; while (i != 0) { if (threadIdx.x < i) { sum_cache[threadIdx.x] += sum_cache[threadIdx.x + i]; } __syncthreads(); i >>= 1; } // store block's summed values if (threadIdx.x == 0) { pres_sum[blockIdx.y + (gridDim.y * blockIdx.x)] = sum_cache[0]; } } // end sum_pressure /////////////////////////////////////////////////////////////////////////////// __global__ void set_horz_pres_BCs (Real* pres_red, Real* pres_black) { int col = (blockIdx.x * blockDim.x) + threadIdx.x + 1; col = (col * 2) - 1; int NUM_2 = NUM >> 1; // p_i,0 = p_i,1 pres_black(col, 0) = pres_red(col, 1); pres_red(col + 1, 0) = pres_black(col + 1, 1); // p_i,jmax+1 = p_i,jmax pres_red(col, NUM_2 + 1) = pres_black(col, NUM_2); pres_black(col + 1, NUM_2 + 1) = pres_red(col + 1, NUM_2); } // end set_horz_pres_BCs ////////////////////////////////////////////////////////////////////////////// __global__ void set_vert_pres_BCs (Real* pres_red, Real* pres_black) { int row = (blockIdx.x * blockDim.x) + threadIdx.x + 1; int NUM_2 = NUM >> 1; // p_0,j = p_1,j pres_black(0, row) = pres_red(1, row); pres_red(0, row) = pres_black(1, row); // p_imax+1,j = p_imax,j pres_black(NUM + 1, row) = pres_red(NUM, row); pres_red(NUM + 1, row) = pres_black(NUM, row); } // end set_pressure_BCs /////////////////////////////////////////////////////////////////////////////// /** Function to update pressure for red cells * * \param[in] dt time-step size * \param[in] F array of discretized x-momentum eqn terms * \param[in] G array of discretized y-momentum eqn terms * \param[in] pres_black pressure values of black cells * \param[inout] pres_red pressure values of red cells */ __global__ void red_kernel (const Real dt, const Real* F, const Real* G, const Real* pres_black, Real* pres_red) { int row = (blockIdx.x * blockDim.x) + threadIdx.x + 1; int col = (blockIdx.y * blockDim.y) + threadIdx.y + 1; int NUM_2 = NUM >> 1; Real p_ij = pres_red(col, row); Real p_im1j = pres_black(col - 1, row); Real p_ip1j = pres_black(col + 1, row); Real p_ijm1 = pres_black(col, row - (col & 1)); Real p_ijp1 = pres_black(col, row + ((col + 1) & 1)); // right-hand side Real rhs = (((F(col, (2 * row) - (col & 1)) - F(col - 1, (2 * row) - (col & 1))) / dx) + ((G(col, (2 * row) - (col & 1)) - G(col, (2 * row) - (col & 1) - 1)) / dy)) / dt; pres_red(col, row) = p_ij * (ONE - omega) + omega * (((p_ip1j + p_im1j) / (dx * dx)) + ((p_ijp1 + p_ijm1) / (dy * dy)) - rhs) / ((TWO / (dx * dx)) + (TWO / (dy * dy))); } // end red_kernel /////////////////////////////////////////////////////////////////////////////// /** Function to update pressure for black cells * * \param[in] dt time-step size * \param[in] F array of discretized x-momentum eqn terms * \param[in] G array of discretized y-momentum eqn terms * \param[in] pres_red pressure values of red cells * \param[inout] pres_black pressure values of black cells */ __global__ void black_kernel (const Real dt, const Real* F, const Real* G, const Real* pres_red, Real* pres_black) { int row = (blockIdx.x * blockDim.x) + threadIdx.x + 1; int col = (blockIdx.y * blockDim.y) + threadIdx.y + 1; int NUM_2 = NUM >> 1; Real p_ij = pres_black(col, row); Real p_im1j = pres_red(col - 1, row); Real p_ip1j = pres_red(col + 1, row); Real p_ijm1 = pres_red(col, row - ((col + 1) & 1)); Real p_ijp1 = pres_red(col, row + (col & 1)); // right-hand side Real rhs = (((F(col, (2 * row) - ((col + 1) & 1)) - F(col - 1, (2 * row) - ((col + 1) & 1))) / dx) + ((G(col, (2 * row) - ((col + 1) & 1)) - G(col, (2 * row) - ((col + 1) & 1) - 1)) / dy)) / dt; pres_black(col, row) = p_ij * (ONE - omega) + omega * (((p_ip1j + p_im1j) / (dx * dx)) + ((p_ijp1 + p_ijm1) / (dy * dy)) - rhs) / ((TWO / (dx * dx)) + (TWO / (dy * dy))); } // end black_kernel /////////////////////////////////////////////////////////////////////////////// __global__ void calc_residual (const Real dt, const Real* F, const Real* G, const Real* pres_red, const Real* pres_black, Real* res_array) { int row = (blockIdx.x * blockDim.x) + threadIdx.x + 1; int col = (blockIdx.y * blockDim.y) + threadIdx.y + 1; int NUM_2 = NUM >> 1; Real p_ij, p_im1j, p_ip1j, p_ijm1, p_ijp1, rhs, res, res2; // red point p_ij = pres_red(col, row); p_im1j = pres_black(col - 1, row); p_ip1j = pres_black(col + 1, row); p_ijm1 = pres_black(col, row - (col & 1)); p_ijp1 = pres_black(col, row + ((col + 1) & 1)); rhs = (((F(col, (2 * row) - (col & 1)) - F(col - 1, (2 * row) - (col & 1))) / dx) + ((G(col, (2 * row) - (col & 1)) - G(col, (2 * row) - (col & 1) - 1)) / dy)) / dt; // calculate residual res = ((p_ip1j - (TWO * p_ij) + p_im1j) / (dx * dx)) + ((p_ijp1 - (TWO * p_ij) + p_ijm1) / (dy * dy)) - rhs; // black point p_ij = pres_black(col, row); p_im1j = pres_red(col - 1, row); p_ip1j = pres_red(col + 1, row); p_ijm1 = pres_red(col, row - ((col + 1) & 1)); p_ijp1 = pres_red(col, row + (col & 1)); // right-hand side rhs = (((F(col, (2 * row) - ((col + 1) & 1)) - F(col - 1, (2 * row) - ((col + 1) & 1))) / dx) + ((G(col, (2 * row) - ((col + 1) & 1)) - G(col, (2 * row) - ((col + 1) & 1) - 1)) / dy)) / dt; // calculate residual res2 = ((p_ip1j - (TWO * p_ij) + p_im1j) / (dx * dx)) + ((p_ijp1 - (TWO * p_ij) + p_ijm1) / (dy * dy)) - rhs; // shared memory for block's sum __shared__ Real sum_cache[BLOCK_SIZE]; sum_cache[threadIdx.x] = (res * res) + (res2 * res2); // synchronize threads in block to ensure all residuals stored __syncthreads(); // add up squared residuals for block int i = BLOCK_SIZE >> 1; while (i != 0) { if (threadIdx.x < i) { sum_cache[threadIdx.x] += sum_cache[threadIdx.x + i]; } __syncthreads(); i >>= 1; } // store block's summed residuals if (threadIdx.x == 0) { res_array[blockIdx.y + (gridDim.y * blockIdx.x)] = sum_cache[0]; } } /////////////////////////////////////////////////////////////////////////////// __global__ void calculate_u (const Real dt, const Real* F, const Real* pres_red, const Real* pres_black, Real* u, Real* max_u) { int row = (blockIdx.x * blockDim.x) + threadIdx.x + 1; int col = (blockIdx.y * blockDim.y) + threadIdx.y + 1; // allocate shared memory to store max velocities __shared__ Real max_cache[BLOCK_SIZE]; max_cache[threadIdx.x] = ZERO; int NUM_2 = NUM >> 1; Real new_u = ZERO; if (col != NUM) { Real p_ij, p_ip1j, new_u2; // red point p_ij = pres_red(col, row); p_ip1j = pres_black(col + 1, row); new_u = F(col, (2 * row) - (col & 1)) - (dt * (p_ip1j - p_ij) / dx); u(col, (2 * row) - (col & 1)) = new_u; // black point p_ij = pres_black(col, row); p_ip1j = pres_red(col + 1, row); new_u2 = F(col, (2 * row) - ((col + 1) & 1)) - (dt * (p_ip1j - p_ij) / dx); u(col, (2 * row) - ((col + 1) & 1)) = new_u2; // check for max of these two new_u = fmax(fabs(new_u), fabs(new_u2)); if ((2 * row) == NUM) { // also test for max velocity at vertical boundary new_u = fmax(new_u, fabs( u(col, NUM + 1) )); } } else { // check for maximum velocity in boundary cells also new_u = fmax(fabs( u(NUM, (2 * row)) ), fabs( u(0, (2 * row)) )); new_u = fmax(fabs( u(NUM, (2 * row) - 1) ), new_u); new_u = fmax(fabs( u(0, (2 * row) - 1) ), new_u); new_u = fmax(fabs( u(NUM + 1, (2 * row)) ), new_u); new_u = fmax(fabs( u(NUM + 1, (2 * row) - 1) ), new_u); } // end if // store maximum u for block from each thread max_cache[threadIdx.x] = new_u; // synchronize threads in block to ensure all velocities stored __syncthreads(); // calculate maximum for block int i = BLOCK_SIZE >> 1; while (i != 0) { if (threadIdx.x < i) { max_cache[threadIdx.x] = fmax(max_cache[threadIdx.x], max_cache[threadIdx.x + i]); } __syncthreads(); i >>= 1; } // store block's maximum if (threadIdx.x == 0) { max_u[blockIdx.y + (gridDim.y * blockIdx.x)] = max_cache[0]; } } // end calculate_u /////////////////////////////////////////////////////////////////////////////// __global__ void calculate_v (const Real dt, const Real* G, const Real* pres_red, const Real* pres_black, Real* v, Real* max_v) { int row = (blockIdx.x * blockDim.x) + threadIdx.x + 1; int col = (blockIdx.y * blockDim.y) + threadIdx.y + 1; // allocate shared memory to store maximum velocities __shared__ Real max_cache[BLOCK_SIZE]; max_cache[threadIdx.x] = ZERO; int NUM_2 = NUM >> 1; Real new_v = ZERO; if (row != NUM_2) { Real p_ij, p_ijp1, new_v2; // red pressure point p_ij = pres_red(col, row); p_ijp1 = pres_black(col, row + ((col + 1) & 1)); new_v = G(col, (2 * row) - (col & 1)) - (dt * (p_ijp1 - p_ij) / dy); v(col, (2 * row) - (col & 1)) = new_v; // black pressure point p_ij = pres_black(col, row); p_ijp1 = pres_red(col, row + (col & 1)); new_v2 = G(col, (2 * row) - ((col + 1) & 1)) - (dt * (p_ijp1 - p_ij) / dy); v(col, (2 * row) - ((col + 1) & 1)) = new_v2; // check for max of these two new_v = fmax(fabs(new_v), fabs(new_v2)); if (col == NUM) { // also test for max velocity at vertical boundary new_v = fmax(new_v, fabs( v(NUM + 1, (2 * row)) )); } } else { if ((col & 1) == 1) { // black point is on boundary, only calculate red point below it Real p_ij = pres_red(col, row); Real p_ijp1 = pres_black(col, row + ((col + 1) & 1)); new_v = G(col, (2 * row) - (col & 1)) - (dt * (p_ijp1 - p_ij) / dy); v(col, (2 * row) - (col & 1)) = new_v; } else { // red point is on boundary, only calculate black point below it Real p_ij = pres_black(col, row); Real p_ijp1 = pres_red(col, row + (col & 1)); new_v = G(col, (2 * row) - ((col + 1) & 1)) - (dt * (p_ijp1 - p_ij) / dy); v(col, (2 * row) - ((col + 1) & 1)) = new_v; } // get maximum v velocity new_v = fabs(new_v); // check for maximum velocity in boundary cells also new_v = fmax(fabs( v(col, NUM) ), new_v); new_v = fmax(fabs( v(col, 0) ), new_v); new_v = fmax(fabs( v(col, NUM + 1) ), new_v); } // end if // store absolute value of velocity max_cache[threadIdx.x] = new_v; // synchronize threads in block to ensure all velocities stored __syncthreads(); // calculate maximum for block int i = BLOCK_SIZE >> 1; while (i != 0) { if (threadIdx.x < i) { max_cache[threadIdx.x] = fmax(max_cache[threadIdx.x], max_cache[threadIdx.x + i]); } __syncthreads(); i >>= 1; } // store block's summed residuals if (threadIdx.x == 0) { max_v[blockIdx.y + (gridDim.y * blockIdx.x)] = max_cache[0]; } } // end calculate_v /////////////////////////////////////////////////////////////////////////////// int main (int argc, char *argv[]) { // iterations for Red-Black Gauss-Seidel with SOR int iter = 0; const int it_max = 1000000; // SOR iteration tolerance const Real tol = 0.001; // time range const Real time_start = 0.0; const Real time_end = 0.001; //20.0; // initial time step size Real dt = 0.02; int size = (NUM + 2) * (NUM + 2); int size_pres = ((NUM / 2) + 2) * (NUM + 2); // arrays for pressure and velocity Real* F; Real* u; Real* G; Real* v; F = (Real *) calloc (size, sizeof(Real)); u = (Real *) calloc (size, sizeof(Real)); G = (Real *) calloc (size, sizeof(Real)); v = (Real *) calloc (size, sizeof(Real)); for (int i = 0; i < size; ++i) { F[i] = ZERO; u[i] = ZERO; G[i] = ZERO; v[i] = ZERO; } // arrays for pressure Real* pres_red; Real* pres_black; pres_red = (Real *) calloc (size_pres, sizeof(Real)); pres_black = (Real *) calloc (size_pres, sizeof(Real)); for (int i = 0; i < size_pres; ++i) { pres_red[i] = ZERO; pres_black[i] = ZERO; } // print problem size printf("Problem size: %d x %d \n", NUM, NUM); //////////////////////////////////////// // block and grid dimensions // boundary conditions kernel dim3 block_bcs (BLOCK_SIZE, 1); dim3 grid_bcs (NUM / BLOCK_SIZE, 1); // pressure kernel dim3 block_pr (BLOCK_SIZE, 1); dim3 grid_pr (NUM / (2 * BLOCK_SIZE), NUM); // block and grid dimensions for F dim3 block_F (BLOCK_SIZE, 1); dim3 grid_F (NUM / BLOCK_SIZE, NUM); // block and grid dimensions for G dim3 block_G (BLOCK_SIZE, 1); dim3 grid_G (NUM / BLOCK_SIZE, NUM); // horizontal pressure boundary conditions dim3 block_hpbc (BLOCK_SIZE, 1); dim3 grid_hpbc (NUM / (2 * BLOCK_SIZE), 1); // vertical pressure boundary conditions dim3 block_vpbc (BLOCK_SIZE, 1); dim3 grid_vpbc (NUM / (2 * BLOCK_SIZE), 1); /////////////////////////////////////////// // residual variable Real* res; int size_res = grid_pr.x * grid_pr.y; res = (Real *) calloc (size_res, sizeof(Real)); // variables to store maximum velocities Real* max_u_arr; Real* max_v_arr; int size_max = grid_pr.x * grid_pr.y; max_u_arr = (Real *) calloc (size_max, sizeof(Real)); max_v_arr = (Real *) calloc (size_max, sizeof(Real)); // pressure sum Real* pres_sum; pres_sum = (Real *) calloc (size_res, sizeof(Real)); // set initial BCs set_BCs_host (u, v); Real max_u = SMALL; Real max_v = SMALL; // get max velocity for initial values (including BCs) #pragma unroll for (int col = 0; col < NUM + 2; ++col) { #pragma unroll for (int row = 1; row < NUM + 2; ++row) { max_u = fmax(max_u, fabs( u(col, row) )); } } #pragma unroll for (int col = 1; col < NUM + 2; ++col) { #pragma unroll for (int row = 0; row < NUM + 2; ++row) { max_v = fmax(max_v, fabs( v(col, row) )); } } //////////////////////////////////////// // allocate and transfer device memory Real* u_d; Real* F_d; Real* v_d; Real* G_d; Real* pres_red_d; Real* pres_black_d; Real* pres_sum_d; Real* res_d; Real* max_u_d; Real* max_v_d; hipMalloc ((void**) &u_d, size * sizeof(Real)); hipMalloc ((void**) &F_d, size * sizeof(Real)); hipMalloc ((void**) &v_d, size * sizeof(Real)); hipMalloc ((void**) &G_d, size * sizeof(Real)); hipMalloc ((void**) &pres_red_d, size_pres * sizeof(Real)); hipMalloc ((void**) &pres_black_d, size_pres * sizeof(Real)); hipMalloc ((void**) &pres_sum_d, size_res * sizeof(Real)); hipMalloc ((void**) &res_d, size_res * sizeof(Real)); hipMalloc ((void**) &max_u_d, size_max * sizeof(Real)); hipMalloc ((void**) &max_v_d, size_max * sizeof(Real)); // copy to device memory hipMemcpy (u_d, u, size * sizeof(Real), hipMemcpyHostToDevice); hipMemcpy (F_d, F, size * sizeof(Real), hipMemcpyHostToDevice); hipMemcpy (v_d, v, size * sizeof(Real), hipMemcpyHostToDevice); hipMemcpy (G_d, G, size * sizeof(Real), hipMemcpyHostToDevice); hipMemcpy (pres_red_d, pres_red, size_pres * sizeof(Real), hipMemcpyHostToDevice); hipMemcpy (pres_black_d, pres_black, size_pres * sizeof(Real), hipMemcpyHostToDevice); //////////////////////////////////////// Real time = time_start; // time-step size based on grid and Reynolds number Real dt_Re = 0.5 * Re_num / ((1.0 / (dx * dx)) + (1.0 / (dy * dy))); // time iteration loop while (time < time_end) { // calculate time step based on stability and CFL dt = fmin((dx / max_u), (dy / max_v)); dt = tau * fmin(dt_Re, dt); if ((time + dt) >= time_end) { dt = time_end - time; } // calculate F and G hipLaunchKernelGGL(calculate_F, dim3(grid_F), dim3(block_F), 0, 0, dt, u_d, v_d, F_d); hipLaunchKernelGGL(calculate_G, dim3(grid_G), dim3(block_G), 0, 0, dt, u_d, v_d, G_d); // get L2 norm of initial pressure hipLaunchKernelGGL(sum_pressure, dim3(grid_pr), dim3(block_pr), 0, 0, pres_red_d, pres_black_d, pres_sum_d); hipMemcpy (pres_sum, pres_sum_d, size_res * sizeof(Real), hipMemcpyDeviceToHost); Real p0_norm = ZERO; #pragma unroll for (int i = 0; i < size_res; ++i) { p0_norm += pres_sum[i]; } p0_norm = sqrt(p0_norm / ((Real)(NUM * NUM))); if (p0_norm < 0.0001) { p0_norm = 1.0; } // ensure all kernels are finished //hipDeviceSynchronize(); Real norm_L2; // calculate new pressure // red-black Gauss-Seidel with SOR iteration loop for (iter = 1; iter <= it_max; ++iter) { // set pressure boundary conditions hipLaunchKernelGGL(set_horz_pres_BCs, dim3(grid_hpbc), dim3(block_hpbc), 0, 0, pres_red_d, pres_black_d); hipLaunchKernelGGL(set_vert_pres_BCs, dim3(grid_vpbc), dim3(block_hpbc), 0, 0, pres_red_d, pres_black_d); // ensure kernel finished //hipDeviceSynchronize(); // update red cells hipLaunchKernelGGL(red_kernel, dim3(grid_pr), dim3(block_pr), 0, 0, dt, F_d, G_d, pres_black_d, pres_red_d); // ensure red kernel finished //hipDeviceSynchronize(); // update black cells hipLaunchKernelGGL(black_kernel, dim3(grid_pr), dim3(block_pr), 0, 0, dt, F_d, G_d, pres_red_d, pres_black_d); // ensure red kernel finished //hipDeviceSynchronize(); // calculate residual values hipLaunchKernelGGL(calc_residual, dim3(grid_pr), dim3(block_pr), 0, 0, dt, F_d, G_d, pres_red_d, pres_black_d, res_d); // transfer residual value(s) back to CPU hipMemcpy (res, res_d, size_res * sizeof(Real), hipMemcpyDeviceToHost); norm_L2 = ZERO; #pragma unroll for (int i = 0; i < size_res; ++i) { norm_L2 += res[i]; } // calculate residual norm_L2 = sqrt(norm_L2 / ((Real)(NUM * NUM))) / p0_norm; // if tolerance has been reached, end SOR iterations if (norm_L2 < tol) { break; } } // end for printf("Time = %f, delt = %e, iter = %i, res = %e\n", time + dt, dt, iter, norm_L2); // calculate new velocities and transfer maximums back hipLaunchKernelGGL(calculate_u, dim3(grid_pr), dim3(block_pr), 0, 0, dt, F_d, pres_red_d, pres_black_d, u_d, max_u_d); hipMemcpy (max_u_arr, max_u_d, size_max * sizeof(Real), hipMemcpyDeviceToHost); hipLaunchKernelGGL(calculate_v, dim3(grid_pr), dim3(block_pr), 0, 0, dt, G_d, pres_red_d, pres_black_d, v_d, max_v_d); hipMemcpy (max_v_arr, max_v_d, size_max * sizeof(Real), hipMemcpyDeviceToHost); // get maximum u- and v- velocities max_v = SMALL; max_u = SMALL; #pragma unroll for (int i = 0; i < size_max; ++i) { Real test_u = max_u_arr[i]; max_u = fmax(max_u, test_u); Real test_v = max_v_arr[i]; max_v = fmax(max_v, test_v); } // set velocity boundary conditions hipLaunchKernelGGL(set_BCs, dim3(grid_bcs), dim3(block_bcs), 0, 0, u_d, v_d); hipDeviceSynchronize(); // increase time time += dt; // single time step //break; } // end while // transfer final temperature values back hipMemcpy (u, u_d, size * sizeof(Real), hipMemcpyDeviceToHost); hipMemcpy (v, v_d, size * sizeof(Real), hipMemcpyDeviceToHost); hipMemcpy (pres_red, pres_red_d, size_pres * sizeof(Real), hipMemcpyDeviceToHost); hipMemcpy (pres_black, pres_black_d, size_pres * sizeof(Real), hipMemcpyDeviceToHost); // write data to file FILE * pfile; pfile = fopen("velocity_gpu.dat", "w"); fprintf(pfile, "#x\ty\tu\tv\n"); if (pfile != NULL) { for (int row = 0; row < NUM; ++row) { for (int col = 0; col < NUM; ++col) { Real u_ij = u[(col * NUM) + row]; Real u_im1j; if (col == 0) { u_im1j = 0.0; } else { u_im1j = u[(col - 1) * NUM + row]; } u_ij = (u_ij + u_im1j) / 2.0; Real v_ij = v[(col * NUM) + row]; Real v_ijm1; if (row == 0) { v_ijm1 = 0.0; } else { v_ijm1 = v[(col * NUM) + row - 1]; } v_ij = (v_ij + v_ijm1) / 2.0; fprintf(pfile, "%f\t%f\t%f\t%f\n", ((Real)col + 0.5) * dx, ((Real)row + 0.5) * dy, u_ij, v_ij); } } } fclose(pfile); // free device memory hipFree(u_d); hipFree(v_d); hipFree(F_d); hipFree(G_d); hipFree(pres_red_d); hipFree(pres_black_d); hipFree(max_u_d); hipFree(max_v_d); hipFree(pres_sum_d); hipFree(res_d); free(pres_red); free(pres_black); free(u); free(v); free(F); free(G); free(max_u_arr); free(max_v_arr); free(res); free(pres_sum); return 0; }
the_stack
#define _SIZE_T_DEFINED #include <cuda.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include "float.h" #include <builtin_types.h> #include <vector_functions.h> #include <math.h> #include "..\Activation\ActivationFunction.cu" #include <stdio.h> extern "C" { __device__ int indexFromXY(int x, int y, int width) { return y * width + x; } __global__ void ConvolutionForwardKernel( ActivationFunctionEnum activationFunction, float *inputPtr, float *filterPtr, float *biasPtr, float *outputPtr, float *outputWeightedPtr, int filterWidth, int filterHeight, int filterDepth, int filterSliceSize, // one layer of filter volume, fW * fH int filterSize, // one filter volume, fW * fH * inputDepth int inputSliceSize, // one layer of input data, e.g. one channel of an RGB image int inputWidth, int outputSize, // size of one resulting output layer = one learned filter, oW * oH (there are filterCount of these) int filtersPerRow, int horStride, int verStride, int thisLayerSize ) { int idx = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (idx < thisLayerSize) { int filterIdx = idx / outputSize; int inputTileX = (idx % outputSize) % filtersPerRow; int inputTileY = (idx % outputSize) / filtersPerRow; float result = 0; for (size_t z = 0; z < filterDepth; z++) // Z { int inputIndex = z * inputSliceSize; int y = inputTileY * verStride; for (size_t j = 0; j < filterHeight; j++) // Y { int x = inputTileX * horStride; int filterIndex = filterSize * filterIdx + z * filterSliceSize; for (size_t i = 0; i < filterWidth; i++) // X { result += inputPtr[inputIndex + indexFromXY(x, y, inputWidth)] * // input filterPtr[filterIndex + indexFromXY(i, j, filterWidth)]; // weight ++x; } ++y; } } result += biasPtr[filterIdx]; outputWeightedPtr[idx] = result; outputPtr[idx] = Evaluate(activationFunction, result); } } // computes deltas // launched size(prevDeltaPtr) times, i.e. separately for each delta to be computed __global__ void ConvolutionBackwardKernel( ActivationFunctionEnum inputActFunc, float *filterPtr, float *thisDeltaPtr, float *inputDeltaPtr, float *inputWeightedPtr, int filterCount, int inputSliceSize, // one layer of input data, e.g. one channel of an RGB image int inputPaddedSliceSize, // same, but accounting for possible padding int padding, int inputWidth, int inputHeight, int filterWidth, int filterHeight, int filterSliceSize, // one layer of filter volume, fW * fH int filterSize, // one filter volume, fW * fH * inputDepth int outputWidth, int outputHeight, int outputSliceSize, // size of one resulting output layer = one learned filter, oW * oH (there are filterCount of these) int horStride, int verStride, int prevLayerSize ) { int idx = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (idx < prevLayerSize) { float delta = 0; for (size_t filterIdx = 0; filterIdx < filterCount; filterIdx++) { int inputDepth = idx / inputSliceSize; // currentZ // shift to correct filter index by current index and size, then shift inside the correct filter to the correct depth int filterDepthShift = filterIdx * filterSize + inputDepth * filterSliceSize; int deltaDepthShift = filterIdx * outputSliceSize; // index in the current slice (ignoring depth), accounting for padding int rowIdx = (idx % inputSliceSize) / inputWidth; int currentIdx = (idx % inputSliceSize) + (2 * padding * padding) + (padding * inputWidth) + padding + (padding * padding * rowIdx); int paddedWidth = padding + inputWidth + padding; int paddedHeight = padding + inputHeight + padding; int currentX = currentIdx % paddedWidth; int currentY = currentIdx / paddedWidth; int filterY = 0; // cycle filter through the whole (virtually padded) image for (int j = 0; j < outputHeight; j++) { // check if the current neuron is in the filter's vertical receptive field if (filterY <= currentY && currentY < filterY + filterHeight) { int filterX = 0; for (int i = 0; i < outputWidth; i++) { // check if the current neuron is in the filter's horizontal receptive field if (filterX <= currentX && currentX < filterX + filterWidth) { // identify the proper filter part (weight) int filterIdx = filterDepthShift + indexFromXY(currentX - filterX, currentY - filterY, filterWidth); // identify the proper output neuron (delta) int deltaIdx = deltaDepthShift + indexFromXY(i, j, outputWidth); delta += filterPtr[filterIdx] * thisDeltaPtr[deltaIdx]; } filterX += horStride; } } filterY += verStride; } } delta *= EvaluateDerivative(inputActFunc, inputWeightedPtr[idx]); inputDeltaPtr[idx] += delta; } } __global__ void ConvolutionSGDUpdateWeightsKernel( float learningRate, float momentum, float *filterPtr, float *biasPtr, float *previousBiasDeltaPtr, float *thisDeltaPtr, float *previousWeightDeltaPtr, float *inputPaddedPtr, int inputPaddedWidth, int inputPaddedSliceSize, // needs to account for padding! int filterWidth, int filterSliceSize, // one layer of filter volume, fW * fH int filterSize, int outputWidth, int outputHeight, int outputSliceSize, // size of one resulting output layer = one learned filter, oW * oH (there are filterCount of these) int horStride, int verStride, //float *outputPtr, float L1Lambda, float L2Lambda, int batchSize, int weightCount // == filterSize * filterCount ) { int idx = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (idx < weightCount) { // determine the exact weight to be updated (one thread corresponds to exactly one weight) // index of the weight inside the filter: int filterX = (idx % filterSliceSize) % filterWidth; int filterY = (idx % filterSliceSize) / filterWidth; // filterZ: int inputDepth = (idx % filterSize) / filterSliceSize; int outputDepth = idx / filterSize; // index of the current filter int inputDepthShift = inputDepth * inputPaddedSliceSize; int outputDepthShift = outputDepth * outputSliceSize; int filterInputShift = filterX + filterY * inputPaddedWidth; // by how much is the current weight shifted from the upper-left corner of the filter IN THE INPUT IMAGE // apply the filter over the whole image (do convolution again) with this one weight float gradient = 0; float biasGradient = 0; for (size_t j = 0; j < outputHeight; j++) { for (size_t i = 0; i < outputWidth; i++) { gradient += thisDeltaPtr[outputDepthShift + i + j * outputWidth] * inputPaddedPtr[ inputDepthShift + j * verStride * inputPaddedWidth + i * horStride + filterInputShift ]; /*if (idx == 49) { thisDeltaPtr[outputDepthShift + i + j * outputWidth] = -100; inputPaddedPtr[ inputDepthShift + j * verStride * inputPaddedWidth + i * horStride + filterInputShift ] = -100; }*/ // update bias (one bias per filter, so only do it if we are in the first weight of any filter) // it seems to work better without the following condition though it shouldn't be the case if (idx % filterSize == 0) biasGradient += thisDeltaPtr[outputDepthShift + i + j * outputWidth]; } } // UPDATE WEIGHT ----------------------------- // add regularization gradient += L1Lambda * sign(filterPtr[idx]) + L2Lambda * filterPtr[idx]; float dx = -gradient * learningRate / batchSize; // add momentum if (momentum != 0) { dx += momentum * previousWeightDeltaPtr[idx]; previousWeightDeltaPtr[idx] = dx; } filterPtr[idx] += dx; // ----------------------------------------------- // UPDATE BIAS -------------------------------- if (idx % filterSize == 0) { // bias usually doesn't get regularised // biasDelta += L1Lambda * sign(biasPtr[idx / filterSize]) + L2Lambda * biasPtr[idx / filterSize]; float dx = -biasGradient * learningRate / batchSize; if (momentum != 0) { dx += momentum * previousBiasDeltaPtr[idx / filterSize]; previousBiasDeltaPtr[idx / filterSize] = dx; } biasPtr[idx / filterSize] += dx; } // ------------------------------------------- } } __global__ void ConvolutionRMSPropUpdateWeightsKernel( float learningRate, float momentum, float *filterPtr, float *biasPtr, float *previousBiasDeltaPtr, float *thisDeltaPtr, float *previousWeightDeltaPtr, float *inputPaddedPtr, int inputPaddedWidth, int inputPaddedSliceSize, // needs to account for padding! int filterWidth, int filterSliceSize, // one layer of filter volume, fW * fH int filterSize, int outputWidth, int outputHeight, int outputSliceSize, // size of one resulting output layer = one learned filter, oW * oH (there are filterCount of these) int horStride, int verStride, //float *outputPtr, float L1Lambda, float L2Lambda, float *meanSquareWeight, float *meanSquareBias, float smoothingFactor, int batchSize, int weightCount // == filterSize * filterCount ) { int idx = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (idx < weightCount) { // determine the exact weight to be updated (one thread corresponds to exactly one weight) // index of the weight inside the filter: int filterX = (idx % filterSliceSize) % filterWidth; int filterY = (idx % filterSliceSize) / filterWidth; // filterZ: int inputDepth = (idx % filterSize) / filterSliceSize; int outputDepth = idx / filterSize; // index of the current filter int inputDepthShift = inputDepth * inputPaddedSliceSize; int outputDepthShift = outputDepth * outputSliceSize; int filterInputShift = filterX + filterY * inputPaddedWidth; // by how much is the current weight shifted from the upper-left corner of the filter IN THE INPUT IMAGE // apply the filter over the whole image (do convolution again) with this one weight float gradient = 0; float biasGradient = 0; for (size_t j = 0; j < outputHeight; j++) { for (size_t i = 0; i < outputWidth; i++) { gradient += thisDeltaPtr[outputDepthShift + i + j * outputWidth] * inputPaddedPtr[ inputDepthShift + j * verStride * inputPaddedWidth + i * horStride + filterInputShift ]; // update bias (one bias per filter, so only do it if we are in the first weight of any filter) // it seems to work better without the following condition though it shouldn't be the case if (idx % filterSize == 0) biasGradient += thisDeltaPtr[outputDepthShift + i + j * outputWidth]; } } // UPDATE WEIGHT ----------------------------- // add regularization gradient += L1Lambda * sign(filterPtr[idx]) + L2Lambda * filterPtr[idx]; gradient /= batchSize; // calculate meansquare meanSquareWeight[idx] = smoothingFactor * meanSquareWeight[idx] + (1.0f - smoothingFactor) * gradient * gradient; if (meanSquareWeight[idx] != 0) gradient /= sqrtf(meanSquareWeight[idx]); float dx = -gradient * learningRate; // add momentum if (momentum != 0) { dx += momentum * previousWeightDeltaPtr[idx]; previousWeightDeltaPtr[idx] = dx; } filterPtr[idx] += dx; // ----------------------------------------- // UPDATE BIAS --------------------------- if (idx % filterSize == 0) { // bias usually doesn't get regularised //biasDelta += L1Lambda * sign(biasPtr[idx / filterSize]) + L2Lambda * biasPtr[idx / filterSize]; biasGradient /= batchSize; // calculate meansquare meanSquareBias[idx / filterSize] = smoothingFactor * meanSquareBias[idx / filterSize] + (1.0f - smoothingFactor) * biasGradient * biasGradient; if (meanSquareBias[idx / filterSize] != 0) biasGradient /= sqrtf(meanSquareBias[idx / filterSize]); float dx = -biasGradient * learningRate; if (momentum != 0) { dx += momentum * previousBiasDeltaPtr[idx / filterSize]; previousBiasDeltaPtr[idx / filterSize] = dx; } biasPtr[idx / filterSize] += dx; } // ---------------------------------------- } } __global__ void ConvolutionAdadeltaUpdateWeightsKernel( float *filterPtr, float *biasPtr, float *thisDeltaPtr, float *inputPaddedPtr, int inputPaddedWidth, int inputPaddedSliceSize, // needs to account for padding! int filterWidth, int filterSliceSize, // one layer of filter volume, fW * fH int filterSize, int outputWidth, int outputHeight, int outputSliceSize, // size of one resulting output layer = one learned filter, oW * oH (there are filterCount of these) int horStride, int verStride, //float *outputPtr, float L1Lambda, float L2Lambda, float *adaSquares, float *adaDeltas, float *adaBiasSquares, float *adaBiasDeltas, float ro, float epsilon, int batchSize, int weightCount // == filterSize * filterCount ) { int idx = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (idx < weightCount) { // determine the exact weight to be updated (one thread corresponds to exactly one weight) // index of the weight inside the filter: int filterX = (idx % filterSliceSize) % filterWidth; int filterY = (idx % filterSliceSize) / filterWidth; // filterZ: int inputDepth = (idx % filterSize) / filterSliceSize; int outputDepth = idx / filterSize; // index of the current filter int inputDepthShift = inputDepth * inputPaddedSliceSize; int outputDepthShift = outputDepth * outputSliceSize; int filterInputShift = filterX + filterY * inputPaddedWidth; // by how much is the current weight shifted from the upper-left corner of the filter IN THE INPUT IMAGE // apply the filter over the whole image (do convolution again) with this one weight float gradient = 0; float biasGradient = 0; for (size_t j = 0; j < outputHeight; j++) { for (size_t i = 0; i < outputWidth; i++) { gradient += thisDeltaPtr[outputDepthShift + i + j * outputWidth] * inputPaddedPtr[ inputDepthShift + j * verStride * inputPaddedWidth + i * horStride + filterInputShift ]; // update bias (one bias per filter, so only do it if we are in the first weight of any filter) // it seems to work better without the following condition though it shouldn't be the case if (idx % filterSize == 0) biasGradient += thisDeltaPtr[outputDepthShift + i + j * outputWidth]; } } // UPDATE WEIGHT ----------------------------- // add regularization gradient += L1Lambda * sign(filterPtr[idx]) + L2Lambda * filterPtr[idx]; gradient /= batchSize; // adadelta: adaSquares[idx] = ro * adaSquares[idx] + (1 - ro) * gradient * gradient; float dx = -sqrtf((adaDeltas[idx] + epsilon) / (adaSquares[idx] + epsilon)) * gradient; adaDeltas[idx] = ro * adaDeltas[idx] + (1 - ro) * dx * dx; filterPtr[idx] += dx; // ----------------------------------------- // UPDATE BIAS --------------------------- if (idx % filterSize == 0) { // bias usually doesn't get regularised //biasGradient += L1Lambda * sign(biasPtr[idx / filterSize]) + L2Lambda * biasPtr[idx / filterSize]; biasGradient /= batchSize; int biasIdx = idx / filterSize; adaBiasSquares[biasIdx] = ro * adaBiasSquares[biasIdx] + (1 - ro) * biasGradient * biasGradient; float dx = -sqrtf((adaBiasDeltas[biasIdx] + epsilon) / (adaBiasSquares[biasIdx] + epsilon)) * biasGradient; adaBiasDeltas[biasIdx] = ro * adaBiasDeltas[biasIdx] + (1 - ro) * dx * dx; biasPtr[biasIdx] += dx; } // ---------------------------------------- } } __global__ void PadImageKernel( float *inputPtr, float *outputPtr, int inputWidth, int pad, int inputSize, // one depth slice / one layer / one color channel int outputSize, int totalInputSize // whole image (all color channels combined) ) { int idx = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (idx < totalInputSize) { int depth = idx / inputSize; int rowIdx = (idx % inputSize) / inputWidth; int colIdx = (idx % inputSize) % inputWidth; outputPtr[indexFromXY(pad + colIdx, pad + rowIdx, pad + inputWidth + pad) + (depth * outputSize)] = inputPtr[idx]; } } }
the_stack
template <typename T, typename S> __global__ void ScatterNdUpdate(const size_t unit_size, const size_t index_depth, const size_t updates_size, const S *out_strides, const S *indices, const T *updates, T *input) { int i, j; for (size_t read_index = blockIdx.x * blockDim.x + threadIdx.x; read_index < (updates_size); read_index += blockDim.x * gridDim.x) { size_t write_index = 0; bool out_bound = false; i = read_index / unit_size; j = read_index % unit_size; for (size_t k = 0; k < index_depth; k++) { S indices_i = indices[i * index_depth + k]; out_bound |= indices_i < 0; write_index += indices_i * out_strides[k] * unit_size; } write_index += j; if (!out_bound) { input[write_index] = updates[read_index]; } } } template <typename T, typename S> __global__ void ScatterNdAdd(const size_t unit_size, const size_t index_depth, const size_t updates_size, const S *out_strides, const S *indices, const T *updates, T *input) { int i, j; for (size_t read_index = blockIdx.x * blockDim.x + threadIdx.x; read_index < (updates_size); read_index += blockDim.x * gridDim.x) { size_t write_index = 0; bool out_bound = false; i = read_index / unit_size; j = read_index % unit_size; for (size_t k = 0; k < index_depth; k++) { S indices_i = indices[i * index_depth + k]; out_bound |= indices_i < 0; write_index += indices_i * out_strides[k] * unit_size; } write_index += j; if (!out_bound) { MsAtomicAdd(&input[write_index], updates[read_index]); } } } template <typename T, typename S> __global__ void ScatterNdSub(const size_t unit_size, const size_t index_depth, const size_t updates_size, const S *out_strides, const S *indices, const T *updates, T *input) { int i, j; for (size_t read_index = blockIdx.x * blockDim.x + threadIdx.x; read_index < (updates_size); read_index += blockDim.x * gridDim.x) { size_t write_index = 0; bool out_bound = false; i = read_index / unit_size; j = read_index % unit_size; for (size_t k = 0; k < index_depth; k++) { S indices_i = indices[i * index_depth + k]; out_bound |= indices_i < 0; write_index += indices_i * out_strides[k] * unit_size; } write_index += j; if (!out_bound) { MsAtomicAdd(&input[write_index], -updates[read_index]); } } } template <typename T, typename S> void CalScatterNdFunctor(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const S *out_strides, const S *indices, const T *updates, T *input, cudaStream_t cuda_stream) { const size_t updates_size = unit_size * num_units; switch (func_type) { case SCATTER_ND_FUNC_UPDATE: return ScatterNdUpdate<<<GET_BLOCKS(updates_size), GET_THREADS, 0, cuda_stream>>>( unit_size, index_depth, updates_size, out_strides, indices, updates, input); case SCATTER_ND_FUNC_ADD: return ScatterNdAdd<<<GET_BLOCKS(updates_size), GET_THREADS, 0, cuda_stream>>>( unit_size, index_depth, updates_size, out_strides, indices, updates, input); case SCATTER_ND_FUNC_SUB: return ScatterNdSub<<<GET_BLOCKS(updates_size), GET_THREADS, 0, cuda_stream>>>( unit_size, index_depth, updates_size, out_strides, indices, updates, input); default: break; } } template void CalScatterNdFunctor<double, int64_t>(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const int64_t *out_strides, const int64_t *indices, const double *updates, double *input, cudaStream_t cuda_stream); template void CalScatterNdFunctor<double, int32_t>(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const int32_t *out_strides, const int32_t *indices, const double *updates, double *input, cudaStream_t cuda_stream); template void CalScatterNdFunctor<float, int64_t>(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const int64_t *out_strides, const int64_t *indices, const float *updates, float *input, cudaStream_t cuda_stream); template void CalScatterNdFunctor<float, int32_t>(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const int32_t *out_strides, const int32_t *indices, const float *updates, float *input, cudaStream_t cuda_stream); template void CalScatterNdFunctor<half, int64_t>(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const int64_t *out_strides, const int64_t *indices, const half *updates, half *input, cudaStream_t cuda_stream); template void CalScatterNdFunctor<half, int32_t>(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const int32_t *out_strides, const int32_t *indices, const half *updates, half *input, cudaStream_t cuda_stream); template void CalScatterNdFunctor<int32_t, int64_t>(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const int64_t *out_strides, const int64_t *indices, const int32_t *updates, int32_t *input, cudaStream_t cuda_stream); template void CalScatterNdFunctor<int32_t, int32_t>(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const int32_t *out_strides, const int32_t *indices, const int32_t *updates, int32_t *input, cudaStream_t cuda_stream); template void CalScatterNdFunctor<int16_t, int64_t>(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const int64_t *out_strides, const int64_t *indices, const int16_t *updates, int16_t *input, cudaStream_t cuda_stream); template void CalScatterNdFunctor<int16_t, int32_t>(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const int32_t *out_strides, const int32_t *indices, const int16_t *updates, int16_t *input, cudaStream_t cuda_stream); template void CalScatterNdFunctor<uint8_t, int64_t>(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const int64_t *out_strides, const int64_t *indices, const uint8_t *updates, uint8_t *input, cudaStream_t cuda_stream); template void CalScatterNdFunctor<uint8_t, int32_t>(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const int32_t *out_strides, const int32_t *indices, const uint8_t *updates, uint8_t *input, cudaStream_t cuda_stream); template void CalScatterNdFunctor<int8_t, int64_t>(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const int64_t *out_strides, const int64_t *indices, const int8_t *updates, int8_t *input, cudaStream_t cuda_stream); template void CalScatterNdFunctor<int8_t, int32_t>(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const int32_t *out_strides, const int32_t *indices, const int8_t *updates, int8_t *input, cudaStream_t cuda_stream); template void CalScatterNdFunctor<bool, int64_t>(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const int64_t *out_strides, const int64_t *indices, const bool *updates, bool *input, cudaStream_t cuda_stream); template void CalScatterNdFunctor<bool, int32_t>(enum ScatterNdFunctorType func_type, const size_t &unit_size, const size_t &num_units, const size_t &index_depth, const int32_t *out_strides, const int32_t *indices, const bool *updates, bool *input, cudaStream_t cuda_stream);
the_stack
namespace AggMIS { namespace MergeSplitCPU { MergeSplitConditionerCPU::MergeSplitConditionerCPU(AggMIS::Types::Graph_h &graph, AggMIS::Types::IntVector_h &aggregation) { this->graph = &graph; this->aggregation.assign(aggregation.begin(), aggregation.end()); inducedGraph = GraphHelpers::GetInducedGraph(graph, aggregation); // Getting the sizes of each aggregate: AggMIS::Types::IntVector_h* ps = Aggregation::GetPartSizes(aggregation); partSizes.swap(*ps); delete ps; // Filling in FillAggAdjacency(); FillAggMap(); verbose = false; minSize = 20; maxSize = 30; outsizedParts = 0; merges = 0; mergeSplits = 0; splits = 0; } void MergeSplitConditionerCPU::SetSizeBounds(int min, int max) { minSize = min; maxSize = max; } void MergeSplitConditionerCPU::SetVerbose(bool v) { verbose = v; } void MergeSplitConditionerCPU::SetNodeWeights(AggMIS::Types::IntVector_h &input) { nodeWeights.swap(input); AggMIS::Types::IntVector_h *ws = Aggregation::GetPartSizes(aggregation, nodeWeights); weightedSizes.swap(*ws); ws->clear(); delete ws; } AggMIS::Types::IntVector_h* MergeSplitConditionerCPU::GetAggregation() { return &aggregation; } AggMIS::Types::IntVector_h* MergeSplitConditionerCPU::GetNodeWeights() { return &nodeWeights; } void MergeSplitConditionerCPU::CycleMerges(bool force) { // int count = 0; // while (MarkMerges(force)) // { // MakeMerges(false); // count++; // } // if (verbose) // printf("Finished cycling merges after %d cycles.\n", count); MakeMergesDirect(force); } void MergeSplitConditionerCPU::CycleSplits(bool force) { int count = 0; // while (MarkSplits(force)) // { // MakeSplits(); // count++; // } int splitsMade = 1; while (splitsMade > 0) { int startingSplits = splits; MakeSplitsDirect(force); splitsMade = splits - startingSplits; count++; } if (verbose) printf("Finished cycling splits after %d cycles.\n", count); } void MergeSplitConditionerCPU::CycleMergeSplits(float minImprove, int desiredSize) { // Start with an initial cycle MakeMergeSplits(desiredSize); // Choosing which sizes to use: AggMIS::Types::IntVector_h *sizes = &partSizes; if (nodeWeights.size() > 0) sizes = &weightedSizes; // Check to see how much improvement was made int after = thrust::count_if(sizes->begin(), sizes->end(), Functors::isOutSized(minSize, maxSize)); float improvement = (float)(outsizedParts - after) / outsizedParts; outsizedParts = after; // While good progress is being made continue cycling while (improvement > minImprove) { // Perform Cycle and check improvement MakeMergeSplits(desiredSize); after = thrust::count_if(sizes->begin(), sizes->end(), Functors::isOutSized(minSize, maxSize)); improvement = (float)(outsizedParts - after) / outsizedParts; outsizedParts = after; } } bool MergeSplitConditionerCPU::Condition(int desiredSize, bool respectUpper, float tolerance, float minImprove, int maxCycles) { if (verbose) PrintProgress(&std::cout, "Starting conditioning.", true, true, true); // Start by making any optimal merges and splits if (verbose) printf("Starting to CycleMerges\n"); CycleMerges(false); if (verbose) printf("Starting to CycleSplits\n"); CycleSplits(false); if (verbose) printf("Starting to CycleMergeSplits\n"); // Cycle MergeSplits too, to make sure outsizedParts has a value CycleMergeSplits(minImprove, desiredSize); // Find improvement ratio from initial cycle float currentRatio = (float)outsizedParts / partSizes.size(); if (verbose) printf("Initial outsized ratio is: %d / %d = %f\n", outsizedParts, partSizes.size(), currentRatio); // Starting main cycle phase int counter = 1; bool highCycle = false; while (currentRatio > tolerance && counter++ < maxCycles) { if (highCycle) CycleMerges(true); else CycleSplits(true); CycleMergeSplits(minImprove, desiredSize); // Checking the current improvement ratio if ((highCycle && !respectUpper) || (!highCycle && respectUpper)) currentRatio = (float)outsizedParts / partSizes.size(); // Switch cycle type highCycle = !highCycle; if (verbose) { std::stringstream ss; ss << "After condition cycle: " << counter++; PrintProgress(&std::cout, ss.str(), true, true, true); } } // Cleaning up if (respectUpper) { CycleSplits(true); CycleMerges(false); } else CycleMerges(true); // Checking if we match criteria given: int undersized = thrust::count_if(partSizes.begin(), partSizes.end(), Functors::lessThan(minSize)); int oversized = thrust::count_if(partSizes.begin(), partSizes.end(), Functors::greaterThan(maxSize)); if (verbose) PrintProgress(&std::cout, "After conditioning completed.", true, true, true); // Checking if the size constraints are met for the return if (respectUpper) return (oversized == 0 && (float)outsizedParts / partSizes.size() < tolerance); else return (undersized == 0 && (float)outsizedParts / partSizes.size() < tolerance); } void MergeSplitConditionerCPU::PrintProgress(std::ostream* output, std::string note, bool graphStat, bool progressStat, bool sizeStat) { *output << "\n------------------- Progress Check ------------------\n"; *output << "Note: " << note.c_str() << "\n"; if (graphStat) PrintGraphStats(output, false); if (progressStat) PrintProgressStats(output, false); if (sizeStat) PrintSizeStats(output, false); *output << "-----------------------------------------------------\n\n"; } void MergeSplitConditionerCPU::PrintSizeStats(std::ostream* output, bool makeHeader) { if (makeHeader) *output << "\n--------------------- Size Check --------------------\n"; // Choosing which sizes to use: AggMIS::Types::IntVector_h *sizes = &partSizes; if (nodeWeights.size() > 0) sizes = &weightedSizes; int undersized = thrust::count_if(sizes->begin(), sizes->end(), Functors::lessThan(minSize)); int oversized = thrust::count_if(sizes->begin(), sizes->end(), Functors::greaterThan(maxSize)); int largest = thrust::reduce(sizes->begin(), sizes->end(), 0, thrust::maximum<int>()); int smallest = thrust::reduce(sizes->begin(), sizes->end(), INT_MAX, thrust::minimum<int>()); *output << "Aggregate size statistics:"; *output << "\n\tUndersized(<" << minSize << "): " << undersized << " / " << (partSizes.size()) << " Total"; *output << "\n\tOversized(>" << maxSize << "): " << oversized << " / " << (partSizes.size()) << " Total"; *output << "\n\tSmallest: " << smallest; *output << " Largest: " << largest << "\n"; if (nodeWeights.size() > 0) { largest = thrust::reduce(partSizes.begin(), partSizes.end(), 0, thrust::maximum<int>()); smallest = thrust::reduce(partSizes.begin(), partSizes.end(), INT_MAX, thrust::minimum<int>()); *output << "\n\tUnweighted: Smallest: " << smallest; *output << " Largest: " << largest << "\n"; } if (makeHeader) *output << "-----------------------------------------------------\n\n"; } void MergeSplitConditionerCPU::PrintProgressStats(std::ostream* output, bool makeHeader) { if (makeHeader) *output << "\n------------------- Progress Check ------------------\n"; *output << "Processing done:"; *output << "\n\tMerges: " << merges; *output << "\tSplits: " << splits; *output << "\tMerge-Splits: " << mergeSplits << "\n"; if (makeHeader) *output << "-----------------------------------------------------\n\n"; } void MergeSplitConditionerCPU::PrintGraphStats(std::ostream* output, bool makeHeader) { if (makeHeader) *output << "\n----------------- Graph Information -----------------\n"; int totalWeight = thrust::reduce(nodeWeights.begin(), nodeWeights.end()); int minWeight = thrust::reduce(nodeWeights.begin(), nodeWeights.end(), INT_MAX, thrust::minimum<int>()); int maxWeight = thrust::reduce(nodeWeights.begin(), nodeWeights.end(), 0, thrust::maximum<int>()); AggMIS::Types::IntVector_h *valences = GraphHelpers::GetValences(*graph); int minValence = thrust::reduce(valences->begin(), valences->end(), INT_MAX, thrust::minimum<int>()); int maxValence = thrust::reduce(valences->begin(), valences->end(), 0, thrust::maximum<int>()); valences->clear(); delete valences; *output << "Graph Information:"; *output << "\n\tNodes: " << graph->Size(); if (nodeWeights.size() > 0) *output << " Graph is weighted"; else *output << " Graph is unweighted"; *output << "\n\tMin. Valence: " << minValence; *output << " Max. Valence: " << maxValence; *output << " Avg. Valence: " << ((float)graph->adjacency->size() / graph->Size()); if (nodeWeights.size() > 0) { *output << "\n\tTotal Weight: " << totalWeight; *output << " Avg. Weight: " << ((float)totalWeight / graph->Size()); *output << " Min. Weight: " << minWeight; *output << " Max. Weight: " << maxWeight; } *output << "\n"; if (makeHeader) *output << "-----------------------------------------------------\n\n"; } void MergeSplitConditionerCPU::InteractiveConsole(std::string message) { // Start off by printing overall status info and message PrintProgress(&std::cout, message, true, true, true); // Setting needed variables to defaults float minImprove = .1; int desiredSize = (minSize + maxSize) / 2; float tolerance = .1; int maxCycles = 10; bool cycling = true; bool respectUpper = true; // Starting the main prompt: char operation; printf("\nIC:"); std::cin >> operation; while (operation != 'd') { if (operation == 'o' || operation == 'f') { bool force = operation == 'f'; std::cin >> operation; if (operation == 'm') { // if (cycling) // CycleMerges(force); // else { // MarkMerges(force); // MakeMerges(false); // } MakeMergesDirect(force); std::string msg = force ? "After forced merges" : "After optimal merges"; PrintProgress(&std::cout, msg, false, true, true); } if (operation == 's') { // if (cycling) // CycleSplits(force); // else { // MarkSplits(force); // MakeSplits(); // } MakeSplitsDirect(force); std::string msg = force ? "After forced splits" : "After optimal splits"; PrintProgress(&std::cout, msg, false, true, true); } if (operation == 'g') { // if (cycling) // CycleMergeSplits(minImprove, desiredSize); // else // MakeMergeSplits(desiredSize); MakeMergeSplits(desiredSize); PrintProgress(&std::cout, "After merge-splits", false, true, true); } } else if (operation == 's') { // Printing the current values of the variables std::string cyclingFlag = cycling ? "True" : "False"; std::string respectUpperFlag = respectUpper ? "True" : "False"; std::cout << "\nCurrent values of variables:"; std::cout << "\n\tminSize: " << minSize; std::cout << " maxSize: " << maxSize; std::cout << " desiredSize: " << desiredSize; std::cout << " maxCycles: " << maxCycles; std::cout << "\n\tminImprove: " << minImprove; std::cout << " tolerance: " << tolerance; std::cout << " cycling: " << cyclingFlag; std::cout << " respectUpper: " << respectUpperFlag; std::cout << "\n\nEnter new values in same order\nIC:"; // Grabbing the new values std::cin >> minSize; std::cin >> maxSize; std::cin >> desiredSize; std::cin >> maxCycles; std::cin >> minImprove; std::cin >> tolerance; std::cin >> cycling; std::cin >> respectUpper; // Confirming the entry cyclingFlag = cycling ? "True" : "False"; respectUpperFlag = respectUpper ? "True" : "False"; std::cout << "\nNew values of variables:"; std::cout << "\n\tminSize: " << minSize; std::cout << " maxSize: " << maxSize; std::cout << " desiredSize: " << desiredSize; std::cout << " maxCycles: " << maxCycles; std::cout << "\n\tminImprove: " << minImprove; std::cout << " tolerance: " << tolerance; std::cout << " cycling: " << cyclingFlag; std::cout << " respectUpper: " << respectUpperFlag << "\n\n"; } else if (operation == 'c') { Condition(desiredSize, respectUpper, tolerance, minImprove, maxCycles); PrintProgress(&std::cout, "After conditioning", false, true, true); } else if (operation == 'v') { bool valid = Aggregation::IsValidAggregation(*graph, aggregation, false); if (valid) printf("Aggregation is valid\n"); else printf("Aggregation is not valid!\n"); } else if (operation == 'l') { bool v; std::cin >> v; SetVerbose(v); printf("Set verbose to %s\n", v ? "True" : "False"); } // Printing prompt for another go printf("IC:"); std::cin >> operation; } } bool MergeSplitConditionerCPU::MarkMerges(bool force) { bool marked = false; // Initializing mergesToMake array mergesToMake.assign(inducedGraph->Size(), -1); // Get the appropriate sizes AggMIS::Types::IntVector_h &sizes = nodeWeights.size() > 0 ? weightedSizes : partSizes; // Figure out how large aggregates should be int desiredSize = (minSize + maxSize) / 2; // For every aggregate see if it should merge for (int aggId = 0; aggId < inducedGraph->Size(); aggId++) { // Getting size of current aggregate int currentSize = sizes[aggId]; // Tracking the best seen merge int bestMerge = -1; int smallestDifference = INT_MAX; // If aggregate too small check for merges: if (currentSize < minSize && mergesToMake[aggId] == -1) { // Look at neighboring aggregates for (int* nIt = inducedGraph->nStart(aggId); nIt != inducedGraph->nEnd(aggId); nIt++) { int neighborAgg = *nIt; // Only handle neighbors not already merging if (mergesToMake[neighborAgg] == -1) { int neighborSize = sizes[neighborAgg]; int mergedSize = currentSize + neighborSize; int difference = mergedSize > desiredSize ? mergedSize - desiredSize : desiredSize - mergedSize; if (mergedSize <= maxSize || force) { if (difference < smallestDifference) { smallestDifference = difference; bestMerge = neighborAgg; } } } } } if (bestMerge != -1) { mergesToMake[aggId] = bestMerge; mergesToMake[bestMerge] = aggId; marked = true; } } return marked; } bool MergeSplitConditionerCPU::MarkSplits(bool force) { // Initialize // Get the appropriate sizes AggMIS::Types::IntVector_h &sizes = nodeWeights.size() > 0 ? weightedSizes : partSizes; return false; } void MergeSplitConditionerCPU::MarkMergeSplits(int desiredSize) { } void MergeSplitConditionerCPU::MakeSplits() { } void MergeSplitConditionerCPU::MakeMerges(bool markSplits) { // Figuring the offsets to use int offset = 0; mergeOffsets.resize(mergesToMake.size()); for (int i = 0; i < mergesToMake.size(); i++) mergeOffsets[i] = mergesToMake[i] != -1 && mergesToMake[i] < i ? ++offset : offset; // Making the merges for (int i = 0; i < aggregation.size(); i++) { int aggId = aggregation[i]; int mergeTo = mergesToMake[aggId]; if (mergeTo != -1 && mergeTo < aggId) aggregation[i] = mergeTo - mergeOffsets[mergeTo]; else aggregation[i] = aggId - mergeOffsets[aggId]; } // Refiguring stuff merges += mergeOffsets.back(); delete inducedGraph; inducedGraph = GraphHelpers::GetInducedGraph(*graph, aggregation); AggMIS::Types::IntVector_h *ps = Aggregation::GetPartSizes(aggregation); partSizes.swap(*ps); ps->clear(); delete ps; if (nodeWeights.size() > 0) { AggMIS::Types::IntVector_h *ws = Aggregation::GetPartSizes(aggregation, nodeWeights); weightedSizes.swap(*ws); ws->clear(); delete ws; } } void MergeSplitConditionerCPU::MakeMergesDirect(bool force) { // Get the appropriate sizes AggMIS::Types::IntVector_h &sizes = nodeWeights.size() > 0 ? weightedSizes : partSizes; // Figure out how large aggregates should be int desiredSize = (minSize + maxSize) / 2; // For every aggregate see if it should merge int aggId = 0; while (aggId < aggAdjacency.size()) { // Getting size of current aggregate int currentSize = sizes[aggId]; // Tracking the best seen merge int bestMerge = -1; int smallestDifference = INT_MAX; // If aggregate too small check for merges: while (currentSize < minSize) { // Look at neighboring aggregates for (int nIt = 0; nIt < aggAdjacency[aggId].size(); nIt++) { int neighborAgg = aggAdjacency[aggId][nIt]; int neighborSize = sizes[neighborAgg]; int mergedSize = currentSize + neighborSize; int difference = mergedSize > desiredSize ? mergedSize - desiredSize : desiredSize - mergedSize; if (mergedSize <= maxSize || force) { if (difference < smallestDifference) { smallestDifference = difference; bestMerge = neighborAgg; } } } if (bestMerge != -1) { if (verbose) { printf("Aggregate %d of size %d found neighbor %d of size %d to merge with.\n", aggId, currentSize, bestMerge, sizes[bestMerge]); } aggId = MergeAggregates(aggId, bestMerge); if (verbose) { printf("After merge Aggregate %d has size %d\n", aggId, sizes[aggId]); } merges++; // Resetting to look for other merges currentSize = sizes[aggId]; bestMerge = -1; smallestDifference = INT_MAX; } else { if (verbose) { printf("No merges found for aggregate %d of size %d\n", aggId, currentSize); } break; } } aggId++; } } int MergeSplitConditionerCPU::MergeAggregates(int aggA, int aggB) { return MergeAggregates(aggA, aggB, true); } int MergeSplitConditionerCPU::MergeAggregates(int aggA, int aggB, bool fillSpot) { // Make sure aggA has the lower index if (aggA > aggB) { int swapper = aggB; aggB = aggA; aggA = swapper; } // Mark nodes in aggB as in aggA for (int nIt = 0; nIt < aggMap[aggB].size(); nIt++) aggregation[aggMap[aggB][nIt]] = aggA; // Add nodes in aggB to aggA's node list aggMap[aggA].insert(aggMap[aggA].end(), aggMap[aggB].begin(), aggMap[aggB].end()); sort(aggMap[aggA].begin(), aggMap[aggA].end()); // Clearing out aggB's node list aggMap[aggB].clear(); // Removing edges to aggB and replastd::cing with edges to aggA for (int nIt = 0; nIt < aggAdjacency[aggB].size(); nIt++) { int neighborAgg = aggAdjacency[aggB][nIt]; // If the neighbor of aggB is also a neighbor of aggA // or is aggA just remove reference to aggB. if (binary_search(aggAdjacency[neighborAgg].begin(), aggAdjacency[neighborAgg].end(), aggA) || neighborAgg == aggA) { remove(aggAdjacency[neighborAgg].begin(), aggAdjacency[neighborAgg].end(), aggB); aggAdjacency[neighborAgg].pop_back(); } // Otherwise remove the reference to aggB and add one // to aggA else { remove(aggAdjacency[neighborAgg].begin(), aggAdjacency[neighborAgg].end(), aggB); aggAdjacency[neighborAgg].back() = aggA; sort(aggAdjacency[neighborAgg].begin(), aggAdjacency[neighborAgg].end()); } } // Setting new size of aggA partSizes[aggA] += partSizes[aggB]; if (nodeWeights.size() > 0) weightedSizes[aggA] += weightedSizes[aggB]; // Getting the union of adjacency for merged aggregate std::vector<int> temp(aggAdjacency[aggA].size() + aggAdjacency[aggB].size()); remove(aggAdjacency[aggB].begin(), aggAdjacency[aggB].end(), aggA); std::vector<int>::iterator newEnd; newEnd = set_union(aggAdjacency[aggA].begin(), aggAdjacency[aggA].end(), aggAdjacency[aggB].begin(), aggAdjacency[aggB].end() - 1, temp.begin()); temp.resize(newEnd - temp.begin()); aggAdjacency[aggA].swap(temp); temp.clear(); aggAdjacency[aggB].clear(); if (fillSpot) { // Finding an aggregate to shift into the empty spot if (aggB == aggMap.size() - 1) { aggMap.pop_back(); aggAdjacency.pop_back(); } else { // Move the last aggregate to fill int aggToMove = aggMap.size() - 1; // Swap out the node list aggMap[aggB].swap(aggMap[aggToMove]); aggMap.pop_back(); // Mark nodes in aggregation for (int nIt = 0; nIt < aggMap[aggB].size(); nIt++) aggregation[aggMap[aggB][nIt]] = aggB; // Swap out the adjacency list aggAdjacency[aggB].swap(aggAdjacency[aggToMove]); aggAdjacency.pop_back(); // Fix neighbor's adjacency lists for (int nIt = 0; nIt < aggAdjacency[aggB].size(); nIt++) { // The old Id has to be last in the list int neighborAgg = aggAdjacency[aggB][nIt]; aggAdjacency[neighborAgg].back() = aggB; sort(aggAdjacency[neighborAgg].begin(), aggAdjacency[neighborAgg].end()); } partSizes[aggB] = partSizes[aggToMove]; if (nodeWeights.size() > 0) weightedSizes[aggB] = weightedSizes[aggToMove]; } // Resize the sizes arrays partSizes.pop_back(); if (nodeWeights.size() > 0) weightedSizes.pop_back(); } ValidateArraySizes("At end of MergeAggregates"); return aggA; } void MergeSplitConditionerCPU::MakeSplitsDirect(bool force) { if (verbose) { printf("Beginning MakeSplitsDirect\n"); } // Get the appropriate sizes AggMIS::Types::IntVector_h &sizes = nodeWeights.size() > 0 ? weightedSizes : partSizes; // For every aggregate see if it should split int aggId = 0; while (aggId < aggAdjacency.size()) { // Getting size of current aggregate int currentSize = sizes[aggId]; if (verbose) { printf("Checking if aggregate %d of size %d should split\n", aggId, currentSize); } // If aggregate too big split if (currentSize > maxSize && (currentSize > minSize * 2 || force)) { if (verbose) { printf("Aggregate %d of size %d is being split.\n", aggId, sizes[aggId]); } // Creating empty entry for new aggregate partSizes.resize(partSizes.size() + 1, 0); if (nodeWeights.size() > 0) weightedSizes.resize(weightedSizes.size() + 1, 0); aggMap.resize(aggMap.size() + 1); aggAdjacency.resize(aggAdjacency.size() + 1); SplitAggregate(aggId, aggMap.size() - 1); splits++; if (verbose) { printf("Split into aggregate %d of size %d and %d of size %d\n", aggId, sizes[aggId], aggMap.size() - 1, sizes.back()); } } aggId++; } } void MergeSplitConditionerCPU::SplitAggregate(int agg, int newAgg) { if (verbose) { printf("SplitAggregate called to split aggregate %d into %d and %d\n", agg, agg, newAgg); std::stringstream s1; s1 << "Node list of aggregate " << agg; AggMIS::Types::Display::Print(aggMap[agg], s1.str()); } if (agg == newAgg) { printf("Problem! SplitAggregate called with agg=%d and newAgg=%d\n", agg, newAgg); int t; std::cin >> t; } UnlinkAggregate(agg); // Getting the graph of the aggregate std::vector<std::vector<int> > *am = Aggregation::GetAggregateGraph(*graph, aggMap[agg]); std::vector<std::vector<int> > &aggGraph = *am; // Getting the node weights if needed AggMIS::Types::IntVector_h weights; if (nodeWeights.size() > 0) { weights.resize(aggMap[agg].size()); for (int i = 0; i < weights.size(); i++) weights[i] = nodeWeights[aggMap[agg][i]]; } // Finding the root points: int rootA = Aggregation::FindFarthestNode(aggGraph, 0); int rootB = Aggregation::FindFarthestNode(aggGraph, rootA); // Keep track of the allocated nodes std::vector<int> allocated(aggGraph.size(), -1); // Storing the Id's of the aggregates std::vector<int> aggIds; aggIds.push_back(agg); aggIds.push_back(newAgg); // Queues of possible candidates std::vector<std::queue<int> > filler(2); filler[0].push(rootA); filler[1].push(rootB); // Nodelists for each aggregate std::vector<std::vector<int> > nodeLists(2); // Sizes of each aggregate std::vector<int> aggSizes(2, 0); // Count of allocated nodes int done = 0; // 0 or 1 for which aggregate is looking to allocate int activeAgg = 0; int inactiveAgg = 1; while (done < aggGraph.size()) { // Check if there is any possibilities if (!filler[activeAgg].empty()) { // Checking the next candidate on the queue int node = filler[activeAgg].front(); filler[activeAgg].pop(); // If node is not allocated take it if (allocated[node] == -1) { // Mark node as allocated allocated[node] = 1; // Add to activeAgg's nodelist nodeLists[activeAgg].push_back(aggMap[agg][node]); // Mark in aggregation aggregation[aggMap[agg][node]] = aggIds[activeAgg]; // Increment count of done nodes done++; if (verbose) { printf("Allocated local node %d global node %d to %d. Now %d nodes are done\n", node, aggMap[agg][node], aggIds[activeAgg], done); } // Increment size if (weights.size() > 0) aggSizes[activeAgg] += weights[node]; else aggSizes[activeAgg]++; // Add unallocated neighbors to queue for (int nIt = 0; nIt < aggGraph[node].size(); nIt++) { int neighbor = aggGraph[node][nIt]; if (allocated[neighbor] == -1) filler[activeAgg].push(neighbor); } // Check to see if activeAgg should change if (aggSizes[activeAgg] > aggSizes[inactiveAgg] && !filler[inactiveAgg].empty()) { activeAgg = (activeAgg + 1) % 2; inactiveAgg = (inactiveAgg + 1) % 2; } } } else { activeAgg = (activeAgg + 1) % 2; inactiveAgg = (inactiveAgg + 1) % 2; } } // Sort the generated nodelists sort(nodeLists[0].begin(), nodeLists[0].end()); sort(nodeLists[1].begin(), nodeLists[1].end()); // Swap the generated nodelists into the aggMap nodeLists[0].swap(aggMap[agg]); nodeLists[1].swap(aggMap[newAgg]); if (verbose) { std::stringstream s2; s2 << "AggMap for " << agg; AggMIS::Types::Display::Print(aggMap[agg], s2.str()); std::stringstream s3; s3 << "AggMap for " << newAgg; AggMIS::Types::Display::Print(aggMap[newAgg], s3.str()); } // Link in the split aggregates LinkAggregate(agg); LinkAggregate(newAgg); // Fix sizes FixSizesFromAggMap(agg); FixSizesFromAggMap(newAgg); // Clean up temp stuff delete am; ValidateArraySizes("At end of SplitAggregate"); } void MergeSplitConditionerCPU::MakeMergeSplits(int desiredSize) { // Get the appropriate sizes AggMIS::Types::IntVector_h &sizes = nodeWeights.size() > 0 ? weightedSizes : partSizes; // For every aggregate see if it should merge-split int aggId = 0; while (aggId < aggAdjacency.size()) { // Getting size of current aggregate int currentSize = sizes[aggId]; // Tracking the best seen merge int bestMerge = -1; int smallestDifference = INT_MAX; // If aggregate too small or too big check for merge splits: if (currentSize < minSize || currentSize > maxSize) { // Look at neighboring aggregates for (int nIt = 0; nIt < aggAdjacency[aggId].size(); nIt++) { int neighborAgg = aggAdjacency[aggId][nIt]; int neighborSize = sizes[neighborAgg]; int mergedSize = currentSize + neighborSize; int difference = mergedSize > (desiredSize * 2) ? mergedSize - (desiredSize * 2) : (desiredSize * 2) - mergedSize; if (mergedSize >= (minSize * 2) && mergedSize <= (maxSize * 2)) { if (difference < smallestDifference) { smallestDifference = difference; bestMerge = neighborAgg; } } } if (bestMerge != -1) { if (verbose) { printf("Aggregate %d of size %d found neighbor %d of size %d to merge-split with.\n", aggId, currentSize, bestMerge, sizes[bestMerge]); } // Merging the aggregates and then splitting back into // the same two ID's int lowId = MergeAggregates(aggId, bestMerge, false); if (lowId == aggId) SplitAggregate(aggId, bestMerge); else SplitAggregate(bestMerge, aggId); if (verbose) { printf("After merge-split aggregate %d has size %d and aggregate %d has size %d\n", aggId, sizes[aggId], bestMerge, sizes[bestMerge]); } ValidateArraySizes("After doing a merge-split"); mergeSplits++; } else { if (verbose) { printf("No merge-split found for aggregate %d of size %d\n", aggId, currentSize); } } } aggId++; } } void MergeSplitConditionerCPU::UnlinkAggregate(int aggId) { // Remove aggId from neighbors adjacency lists for (int i = 0; i < aggAdjacency[aggId].size(); i++) { int neighborAgg = aggAdjacency[aggId][i]; remove(aggAdjacency[neighborAgg].begin(), aggAdjacency[neighborAgg].end(), aggId); aggAdjacency[neighborAgg].pop_back(); } // Clear adjacency for aggId aggAdjacency[aggId].clear(); ValidateArraySizes("At end of UnlinkAggregate"); } void MergeSplitConditionerCPU::FixSizesFromAggMap(int aggId) { partSizes[aggId] = aggMap[aggId].size(); if (nodeWeights.size() > 0) { int weight = 0; for (int i = 0; i < aggMap[aggId].size(); i++) weight += nodeWeights[aggMap[aggId][i]]; weightedSizes[aggId] = weight; } ValidateArraySizes("At end of FixSizesFromAggMap"); } void MergeSplitConditionerCPU::LinkAggregate(int aggId) { // Check aggregate of all neighbors of nodes in aggId for (int i = 0; i < aggMap[aggId].size(); i++) { int node = aggMap[aggId][i]; for (int* nIt = graph->nStart(node); nIt != graph->nEnd(node); nIt++) { int neighborAgg = aggregation[*nIt]; if (neighborAgg != aggId) { aggAdjacency[aggId].push_back(neighborAgg); // Insert aggId into neighbor's adjacency if needed if (!binary_search(aggAdjacency[neighborAgg].begin(), aggAdjacency[neighborAgg].end(), aggId)) { aggAdjacency[neighborAgg].push_back(aggId); sort(aggAdjacency[neighborAgg].begin(), aggAdjacency[neighborAgg].end()); } } } } // Sort and remove duplicates sort(aggAdjacency[aggId].begin(), aggAdjacency[aggId].end()); std::vector<int>::iterator newEnd = unique(aggAdjacency[aggId].begin(), aggAdjacency[aggId].end()); aggAdjacency[aggId].resize(newEnd - aggAdjacency[aggId].begin()); ValidateArraySizes("At end of LinkAggregate"); } void MergeSplitConditionerCPU::FillAggAdjacency() { // Clearing anything there out first for (int i = 0; i < aggAdjacency.size(); i++) aggAdjacency[i].clear(); aggAdjacency.clear(); // Going through the aggregation vector to fill for (int node = 0; node < graph->Size(); node++) { int startAgg = aggregation[node]; for (int *nIt = graph->nStart(node); nIt != graph->nEnd(node); nIt++) { int endAgg = aggregation[*nIt]; // If this is an edge between two aggregates add to // the induced graph. if (startAgg != endAgg && startAgg < endAgg) { // Making sure that there are entries in temp if (endAgg >= aggAdjacency.size()) aggAdjacency.resize(endAgg + 1); // Adding edge entries if (aggAdjacency[startAgg].size() == 0 || !(std::binary_search(aggAdjacency[startAgg].begin(), aggAdjacency[startAgg].end(), endAgg))) { aggAdjacency[startAgg].push_back(endAgg); std::sort(aggAdjacency[startAgg].begin(), aggAdjacency[startAgg].end()); } if (aggAdjacency[endAgg].size() == 0 || !(std::binary_search(aggAdjacency[endAgg].begin(), aggAdjacency[endAgg].end(), startAgg))) { aggAdjacency[endAgg].push_back(startAgg); std::sort(aggAdjacency[endAgg].begin(), aggAdjacency[endAgg].end()); } } } } } void MergeSplitConditionerCPU::FillAggMap() { // Clearing anything there out first for (int i = 0; i < aggMap.size(); i++) aggMap[i].clear(); aggMap.clear(); // Going through the aggregation vector to fill for (int node = 0; node < graph->Size(); node++) { int aggId = aggregation[node]; if (aggMap.size() <= aggId) aggMap.resize(aggId + 1); aggMap[aggId].push_back(node); } // Make sure each map is sorted for (int i = 0; i < aggMap.size(); i++) sort(aggMap[i].begin(), aggMap[i].end()); } void MergeSplitConditionerCPU::ValidateAggAdjacency() { // Move current into new spot std::vector<std::vector<int> > temp(aggAdjacency.size()); for (int i = 0; i < temp.size(); i++) temp[i].swap(aggAdjacency[i]); // Regenerate member FillAggAdjacency(); // Compare if (AggMIS::Types::Compare::AreEqual(temp, aggAdjacency, true)) printf("AggAdjacency validated.\n"); else { printf("Failed to validate AggAdjacency.\n"); int t; std::cin >> t; } for (int i = 0; i < temp.size(); i++) temp[i].clear(); } void MergeSplitConditionerCPU::ValidateAggMap() { // Move current into new spot std::vector<std::vector<int> > temp(aggMap.size()); for (int i = 0; i < temp.size(); i++) temp[i].swap(aggMap[i]); // Regenerate member FillAggMap(); // Compare if (AggMIS::Types::Compare::AreEqual(temp, aggMap, true)) printf("AggMap validated.\n"); else { printf("Failed to validate AggMap.\n"); int t; std::cin >> t; } for (int i = 0; i < temp.size(); i++) temp[i].clear(); } void MergeSplitConditionerCPU::ValidatePartSizes() { bool failed = false; AggMIS::Types::IntVector_h *ps = Aggregation::GetPartSizes(aggregation); if (AggMIS::Types::Compare::AreEqual(*ps, partSizes, true)) printf("PartSizes validates.\n"); else { printf("PartSizes does not validate.\n"); failed = true; } if (nodeWeights.size() > 0) { AggMIS::Types::IntVector_h* ws = Aggregation::GetPartSizes(aggregation, nodeWeights); if (AggMIS::Types::Compare::AreEqual(*ws, weightedSizes, true)) printf("WeightedSizes validates.\n"); else { printf("WeightedSizes does not validate.\n"); failed = true; } } if (failed) { int d; std::cin >> d; } } void MergeSplitConditionerCPU::ValidateArraySizes(std::string message) { bool error = nodeWeights.size() > 0 ? aggAdjacency.size() != aggMap.size() || aggMap.size() != partSizes.size() || partSizes.size() != weightedSizes.size() : aggAdjacency.size() != aggMap.size() || aggMap.size() != partSizes.size(); if (error) { printf("Error with array sizes! %s\n", message.c_str()); printf("\taggAdjacency.size()=%d aggMap.size()=%d partSizes.size()=%d weightedSizes.size()=%d\n", aggAdjacency.size(), aggMap.size(), partSizes.size(), weightedSizes.size()); int t; std::cin >> t; } } } }
the_stack
#ifdef WITH_FULL_W_MATRIX #define R_W_MATRICES_SMEM_SLOTS 15 #else #define R_W_MATRICES_SMEM_SLOTS 12 #endif /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #define CHECK_CUDA(call) do { \ cudaError_t status = call; \ if( status != cudaSuccess ) { \ fprintf(stderr, "CUDA Error at line %d in %s: %s\n", __LINE__, __FILE__, cudaGetErrorString(status)); \ exit((int) status); \ } \ } while(0) #define CHECK_CURAND(call) do { \ curandStatus_t status = call; \ if( status != CURAND_STATUS_SUCCESS ) { \ fprintf(stderr, "CURAND Error at line %d in %s: %d\n", __LINE__, __FILE__, status); \ exit((int) status); \ } \ } while(0) // ==================================================================================================================== #define HOST_DEVICE __host__ __device__ #define HOST_DEVICE_INLINE __host__ __device__ __forceinline__ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// HOST_DEVICE_INLINE double3 operator+(const double3 &u, const double3 &v ) { return make_double3(u.x+v.x, u.y+v.y, u.z+v.z); } // ==================================================================================================================== HOST_DEVICE_INLINE double4 operator+(const double4 &u, const double4 &v ) { return make_double4(u.x+v.x, u.y+v.y, u.z+v.z, u.w+v.w); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// struct PayoffCall { double m_K; HOST_DEVICE_INLINE PayoffCall(double K) : m_K(K) {} HOST_DEVICE_INLINE double operator()(double S) const { return max(S - m_K, 0.0); } HOST_DEVICE_INLINE int is_in_the_money(double S) const { return S > m_K; } }; struct PayoffPut { double m_K; HOST_DEVICE_INLINE PayoffPut(double K) : m_K(K) {} HOST_DEVICE_INLINE double operator()(double S) const { return max(m_K - S, 0.0); } HOST_DEVICE_INLINE int is_in_the_money(double S) const { return S < m_K; } }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #ifdef WITH_ATOMIC_BETA static __device__ __forceinline__ void atomic_add(double *address, double val) { unsigned long long *address_as_ull = (unsigned long long *) address; unsigned long long old = __double_as_longlong(address[0]), assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while(assumed != old); } #endif /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< int NUM_THREADS_PER_BLOCK, typename Payoff > __global__ __launch_bounds__(NUM_THREADS_PER_BLOCK) void generate_paths_kernel(int num_timesteps, int num_paths, Payoff payoff, double dt, double S0, double r, double sigma, const double *__restrict samples, double *__restrict paths) { // The path generated by this thread. int path = blockIdx.x*NUM_THREADS_PER_BLOCK + threadIdx.x; // Early exit. if( path >= num_paths ) return; // Compute (r - sigma^2 / 2). const double r_min_half_sigma_sq_dt = (r - 0.5*sigma*sigma)*dt; // Compute sigma*sqrt(dt). const double sigma_sqrt_dt = sigma*sqrt(dt); // Keep the previous price. double S = S0; // The offset. int offset = path; // Each thread generates several timesteps. for( int timestep = 0 ; timestep < num_timesteps-1 ; ++timestep, offset += num_paths ) { S = S * exp(r_min_half_sigma_sq_dt + sigma_sqrt_dt*samples[offset]); paths[offset] = S; } // The asset price. S = S * exp(r_min_half_sigma_sq_dt + sigma_sqrt_dt*samples[offset]); // Store the payoff at expiry. paths[offset] = payoff(S); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// static __device__ __forceinline__ void assemble_R(int m, double4 &sums, double *smem_svds) { // Assemble R. double x0 = smem_svds[0]; double x1 = smem_svds[1]; double x2 = smem_svds[2]; double x0_sq = x0 * x0; double sum1 = sums.x - x0; double sum2 = sums.y - x0_sq; double sum3 = sums.z - x0_sq*x0; double sum4 = sums.w - x0_sq*x0_sq; double m_as_dbl = (double) m; double sigma = m_as_dbl - 1.0; double mu = sqrt(m_as_dbl); double v0 = -sigma / (1.0 + mu); double v0_sq = v0*v0; double beta = 2.0 * v0_sq / (sigma + v0_sq); double inv_v0 = 1.0 / v0; double one_min_beta = 1.0 - beta; double beta_div_v0 = beta * inv_v0; smem_svds[0] = mu; smem_svds[1] = one_min_beta*x0 - beta_div_v0*sum1; smem_svds[2] = one_min_beta*x0_sq - beta_div_v0*sum2; // Rank update coefficients. double beta_div_v0_sq = beta_div_v0 * inv_v0; double c1 = beta_div_v0_sq*sum1 + beta_div_v0*x0; double c2 = beta_div_v0_sq*sum2 + beta_div_v0*x0_sq; // 2nd step of QR. double x1_sq = x1*x1; sum1 -= x1; sum2 -= x1_sq; sum3 -= x1_sq*x1; sum4 -= x1_sq*x1_sq; x0 = x1-c1; x0_sq = x0*x0; sigma = sum2 - 2.0*c1*sum1 + (m_as_dbl-2.0)*c1*c1; if( abs(sigma) < 1.0e-16 ) beta = 0.0; else { mu = sqrt(x0_sq + sigma); if( x0 <= 0.0 ) v0 = x0 - mu; else v0 = -sigma / (x0 + mu); v0_sq = v0*v0; beta = 2.0*v0_sq / (sigma + v0_sq); } inv_v0 = 1.0 / v0; beta_div_v0 = beta * inv_v0; // The coefficient to perform the rank update. double c3 = (sum3 - c1*sum2 - c2*sum1 + (m_as_dbl-2.0)*c1*c2)*beta_div_v0; double c4 = (x1_sq-c2)*beta_div_v0 + c3*inv_v0; double c5 = c1*c4 - c2; one_min_beta = 1.0 - beta; // Update R. smem_svds[3] = one_min_beta*x0 - beta_div_v0*sigma; smem_svds[4] = one_min_beta*(x1_sq-c2) - c3; // 3rd step of QR. double x2_sq = x2*x2; sum1 -= x2; sum2 -= x2_sq; sum3 -= x2_sq*x2; sum4 -= x2_sq*x2_sq; x0 = x2_sq-c4*x2+c5; sigma = sum4 - 2.0*c4*sum3 + (c4*c4 + 2.0*c5)*sum2 - 2.0*c4*c5*sum1 + (m_as_dbl-3.0)*c5*c5; if( abs(sigma) < 1.0e-12 ) beta = 0.0; else { mu = sqrt(x0*x0 + sigma); if( x0 <= 0.0 ) v0 = x0 - mu; else v0 = -sigma / (x0 + mu); v0_sq = v0*v0; beta = 2.0*v0_sq / (sigma + v0_sq); } // Update R. smem_svds[5] = (1.0-beta)*x0 - (beta/v0)*sigma; } // ==================================================================================================================== static __host__ __device__ double off_diag_norm(double A01, double A02, double A12) { return sqrt(2.0 * (A01*A01 + A02*A02 + A12*A12)); } // ==================================================================================================================== static __device__ __forceinline__ void swap(double &x, double &y) { double t = x; x = y; y = t; } // ==================================================================================================================== static __device__ __forceinline__ void svd_3x3(int m, double4 &sums, double *smem_svds) { // Assemble the R matrix. assemble_R(m, sums, smem_svds); // The matrix R. double R00 = smem_svds[0]; double R01 = smem_svds[1]; double R02 = smem_svds[2]; double R11 = smem_svds[3]; double R12 = smem_svds[4]; double R22 = smem_svds[5]; // We compute the eigenvalues/eigenvectors of A = R^T R. double A00 = R00*R00; double A01 = R00*R01; double A02 = R00*R02; double A11 = R01*R01 + R11*R11; double A12 = R01*R02 + R11*R12; double A22 = R02*R02 + R12*R12 + R22*R22; // We keep track of V since A = Sigma^2 V. Each thread stores a row of V. double V00 = 1.0, V01 = 0.0, V02 = 0.0; double V10 = 0.0, V11 = 1.0, V12 = 0.0; double V20 = 0.0, V21 = 0.0, V22 = 1.0; // The Jacobi algorithm is iterative. We fix the max number of iter and the minimum tolerance. const int max_iters = 16; const double tolerance = 1.0e-12; // Iterate until we reach the max number of iters or the tolerance. for( int iter = 0 ; off_diag_norm(A01, A02, A12) >= tolerance && iter < max_iters ; ++iter ) { double c, s, B00, B01, B02, B10, B11, B12, B20, B21, B22; // Compute the Jacobi matrix for p=0 and q=1. c = 1.0, s = 0.0; if( A01 != 0.0 ) { double tau = (A11 - A00) / (2.0 * A01); double sgn = tau < 0.0 ? -1.0 : 1.0; double t = sgn / (sgn*tau + sqrt(1.0 + tau*tau)); c = 1.0 / sqrt(1.0 + t*t); s = t*c; } // Update A = J^T A J and V = V J. B00 = c*A00 - s*A01; B01 = s*A00 + c*A01; B10 = c*A01 - s*A11; B11 = s*A01 + c*A11; B02 = A02; A00 = c*B00 - s*B10; A01 = c*B01 - s*B11; A11 = s*B01 + c*B11; A02 = c*B02 - s*A12; A12 = s*B02 + c*A12; B00 = c*V00 - s*V01; V01 = s*V00 + c*V01; V00 = B00; B10 = c*V10 - s*V11; V11 = s*V10 + c*V11; V10 = B10; B20 = c*V20 - s*V21; V21 = s*V20 + c*V21; V20 = B20; // Compute the Jacobi matrix for p=0 and q=2. c = 1.0, s = 0.0; if( A02 != 0.0 ) { double tau = (A22 - A00) / (2.0 * A02); double sgn = tau < 0.0 ? -1.0 : 1.0; double t = sgn / (sgn*tau + sqrt(1.0 + tau*tau)); c = 1.0 / sqrt(1.0 + t*t); s = t*c; } // Update A = J^T A J and V = V J. B00 = c*A00 - s*A02; B01 = c*A01 - s*A12; B02 = s*A00 + c*A02; B20 = c*A02 - s*A22; B22 = s*A02 + c*A22; A00 = c*B00 - s*B20; A12 = s*A01 + c*A12; A02 = c*B02 - s*B22; A22 = s*B02 + c*B22; A01 = B01; B00 = c*V00 - s*V02; V02 = s*V00 + c*V02; V00 = B00; B10 = c*V10 - s*V12; V12 = s*V10 + c*V12; V10 = B10; B20 = c*V20 - s*V22; V22 = s*V20 + c*V22; V20 = B20; // Compute the Jacobi matrix for p=1 and q=2. c = 1.0, s = 0.0; if( A12 != 0.0 ) { double tau = (A22 - A11) / (2.0 * A12); double sgn = tau < 0.0 ? -1.0 : 1.0; double t = sgn / (sgn*tau + sqrt(1.0 + tau*tau)); c = 1.0 / sqrt(1.0 + t*t); s = t*c; } // Update A = J^T A J and V = V J. B02 = s*A01 + c*A02; B11 = c*A11 - s*A12; B12 = s*A11 + c*A12; B21 = c*A12 - s*A22; B22 = s*A12 + c*A22; A01 = c*A01 - s*A02; A02 = B02; A11 = c*B11 - s*B21; A12 = c*B12 - s*B22; A22 = s*B12 + c*B22; B01 = c*V01 - s*V02; V02 = s*V01 + c*V02; V01 = B01; B11 = c*V11 - s*V12; V12 = s*V11 + c*V12; V11 = B11; B21 = c*V21 - s*V22; V22 = s*V21 + c*V22; V21 = B21; } // Swap the columns to have S[0] >= S[1] >= S[2]. if( A00 < A11 ) { swap(A00, A11); swap(V00, V01); swap(V10, V11); swap(V20, V21); } if( A00 < A22 ) { swap(A00, A22); swap(V00, V02); swap(V10, V12); swap(V20, V22); } if( A11 < A22 ) { swap(A11, A22); swap(V01, V02); swap(V11, V12); swap(V21, V22); } //printf("timestep=%3d, svd0=%.8lf svd1=%.8lf svd2=%.8lf\n", blockIdx.x, sqrt(A00), sqrt(A11), sqrt(A22)); // Invert the diagonal terms and compute V*S^-1. double inv_S0 = abs(A00) < 1.0e-12 ? 0.0 : 1.0 / A00; double inv_S1 = abs(A11) < 1.0e-12 ? 0.0 : 1.0 / A11; double inv_S2 = abs(A22) < 1.0e-12 ? 0.0 : 1.0 / A22; // printf("SVD: timestep=%3d %12.8lf %12.8lf %12.8lf\n", blockIdx.x, sqrt(A00), sqrt(A11), sqrt(A22)); double U00 = V00 * inv_S0; double U01 = V01 * inv_S1; double U02 = V02 * inv_S2; double U10 = V10 * inv_S0; double U11 = V11 * inv_S1; double U12 = V12 * inv_S2; double U20 = V20 * inv_S0; double U21 = V21 * inv_S1; double U22 = V22 * inv_S2; // Compute V*S^-1*V^T*R^T. #ifdef WITH_FULL_W_MATRIX double B00 = U00*V00 + U01*V01 + U02*V02; double B01 = U00*V10 + U01*V11 + U02*V12; double B02 = U00*V20 + U01*V21 + U02*V22; double B10 = U10*V00 + U11*V01 + U12*V02; double B11 = U10*V10 + U11*V11 + U12*V12; double B12 = U10*V20 + U11*V21 + U12*V22; double B20 = U20*V00 + U21*V01 + U22*V02; double B21 = U20*V10 + U21*V11 + U22*V12; double B22 = U20*V20 + U21*V21 + U22*V22; smem_svds[ 6] = B00*R00 + B01*R01 + B02*R02; smem_svds[ 7] = B01*R11 + B02*R12; smem_svds[ 8] = B02*R22; smem_svds[ 9] = B10*R00 + B11*R01 + B12*R02; smem_svds[10] = B11*R11 + B12*R12; smem_svds[11] = B12*R22; smem_svds[12] = B20*R00 + B21*R01 + B22*R02; smem_svds[13] = B21*R11 + B22*R12; smem_svds[14] = B22*R22; #else double B00 = U00*V00 + U01*V01 + U02*V02; double B01 = U00*V10 + U01*V11 + U02*V12; double B02 = U00*V20 + U01*V21 + U02*V22; double B11 = U10*V10 + U11*V11 + U12*V12; double B12 = U10*V20 + U11*V21 + U12*V22; double B22 = U20*V20 + U21*V21 + U22*V22; smem_svds[ 6] = B00*R00 + B01*R01 + B02*R02; smem_svds[ 7] = B01*R11 + B02*R12; smem_svds[ 8] = B02*R22; smem_svds[ 9] = B11*R11 + B12*R12; smem_svds[10] = B12*R22; smem_svds[11] = B22*R22; #endif } // ==================================================================================================================== template< int NUM_THREADS_PER_BLOCK, typename Payoff > __global__ __launch_bounds__(NUM_THREADS_PER_BLOCK, 4) void prepare_svd_kernel(int num_paths, int min_in_the_money, Payoff payoff, const double */*__restrict*/ paths, int *__restrict all_out_of_the_money, double *__restrict svds) { // We need to perform a scan to find the first 3 stocks pay off. typedef cub::BlockScan<int, NUM_THREADS_PER_BLOCK> BlockScan; // We need to perform a reduction at the end of the kernel to compute the final sums. typedef cub::BlockReduce<int, NUM_THREADS_PER_BLOCK> BlockReduce1; typedef cub::BlockReduce<double4, NUM_THREADS_PER_BLOCK> BlockReduce4; // The union for the scan/reduce. union TempStorage { typename BlockScan ::TempStorage for_scan; typename BlockReduce1::TempStorage for_reduce1; typename BlockReduce4::TempStorage for_reduce4; }; // Shared memory. __shared__ TempStorage smem_storage; // Shared buffer for the ouput. __shared__ double smem_svds[R_W_MATRICES_SMEM_SLOTS]; // Each block works on a single timestep. const int timestep = blockIdx.x; // The timestep offset. const int offset = timestep * num_paths; // Sums. int m = 0; double4 sums = { 0.0, 0.0, 0.0, 0.0 }; // Initialize the shared memory. DBL_MAX is a marker to specify that the value is invalid. if( threadIdx.x < R_W_MATRICES_SMEM_SLOTS ) smem_svds[threadIdx.x] = 0.0; __syncthreads(); // Have we already found our 3 first paths which pay off. int found_paths = 0; // Iterate over the paths. for( int path = threadIdx.x ; path < num_paths ; path += NUM_THREADS_PER_BLOCK ) { // Load the asset price to determine if it pays off. double S = 0.0; if( path < num_paths ) S = paths[offset + path]; // Check if it pays off. const int in_the_money = payoff.is_in_the_money(S); // Try to check if we have found the 3 first stocks. if( found_paths < 3 ) { int partial_sum = 0, total_sum = 0; BlockScan(smem_storage.for_scan).ExclusiveSum(in_the_money, partial_sum, total_sum); if( in_the_money && found_paths + partial_sum < 3 ) smem_svds[found_paths + partial_sum] = S; __syncthreads(); found_paths += total_sum; } // Early continue if no item pays off. if( !__any(in_the_money) ) { continue; } // Update the number of payoff items. m += in_the_money; // The "normalized" value. double x = 0.0, x_sq = 0.0; if( in_the_money ) { x = S; x_sq = S*S; } // Compute the 4 sums. sums.x += x; sums.y += x_sq; sums.z += x_sq*x; sums.w += x_sq*x_sq; } // Make sure the scan is finished. __syncthreads(); // Compute the final reductions. m = BlockReduce1(smem_storage.for_reduce1).Sum(m); // Do we all exit? int not_enough_paths = __syncthreads_or(threadIdx.x == 0 && m < min_in_the_money); // Early exit if no path is in the money. if( not_enough_paths ) { if( threadIdx.x == 0 ) all_out_of_the_money[blockIdx.x] = 1; return; } // Compute the final reductions. sums = BlockReduce4(smem_storage.for_reduce4).Sum(sums); // The 1st thread has everything he needs to build R from the QR decomposition. if( threadIdx.x == 0 ) svd_3x3(m, sums, smem_svds); __syncthreads(); // Store the final results. if( threadIdx.x < R_W_MATRICES_SMEM_SLOTS ) svds[16*blockIdx.x + threadIdx.x] = smem_svds[threadIdx.x]; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< int NUM_THREADS_PER_BLOCK, typename Payoff > __global__ __launch_bounds__(NUM_THREADS_PER_BLOCK, 8) void compute_partial_beta_kernel(int num_paths, Payoff payoff, const double *__restrict svd, const double */*__restrict*/ paths, const double */*__restrict*/ cashflows, const int *__restrict all_out_of_the_money, double *__restrict partial_sums) { typedef cub::BlockReduce<double3, NUM_THREADS_PER_BLOCK> BlockReduce; // The shared memory storage. __shared__ typename BlockReduce::TempStorage smem_for_reduce; // The shared memory to store the SVD. __shared__ double shared_svd[R_W_MATRICES_SMEM_SLOTS]; // Early exit if needed. if( *all_out_of_the_money ) { return; } // The number of threads per grid. const int NUM_THREADS_PER_GRID = NUM_THREADS_PER_BLOCK * gridDim.x; // The 1st threads loads the matrices SVD and R. if( threadIdx.x < R_W_MATRICES_SMEM_SLOTS ) shared_svd[threadIdx.x] = svd[threadIdx.x]; __syncthreads(); // Load the terms of R. const double R00 = shared_svd[ 0]; const double R01 = shared_svd[ 1]; const double R02 = shared_svd[ 2]; const double R11 = shared_svd[ 3]; const double R12 = shared_svd[ 4]; const double R22 = shared_svd[ 5]; // Load the elements of W. #ifdef WITH_FULL_W_MATRIX const double W00 = shared_svd[ 6]; const double W01 = shared_svd[ 7]; const double W02 = shared_svd[ 8]; const double W10 = shared_svd[ 9]; const double W11 = shared_svd[10]; const double W12 = shared_svd[11]; const double W20 = shared_svd[12]; const double W21 = shared_svd[13]; const double W22 = shared_svd[14]; #else const double W00 = shared_svd[ 6]; const double W01 = shared_svd[ 7]; const double W02 = shared_svd[ 8]; const double W11 = shared_svd[ 9]; const double W12 = shared_svd[10]; const double W22 = shared_svd[11]; #endif // Invert the diagonal of R. const double inv_R00 = R00 != 0.0 ? __drcp_rn(R00) : 0.0; const double inv_R11 = R11 != 0.0 ? __drcp_rn(R11) : 0.0; const double inv_R22 = R22 != 0.0 ? __drcp_rn(R22) : 0.0; // Precompute the R terms. const double inv_R01 = inv_R00*inv_R11*R01; const double inv_R02 = inv_R00*inv_R22*R02; const double inv_R12 = inv_R22*R12; // Precompute W00/R00. #ifdef WITH_FULL_W_MATRIX const double inv_W00 = W00*inv_R00; const double inv_W10 = W10*inv_R00; const double inv_W20 = W20*inv_R00; #else const double inv_W00 = W00*inv_R00; #endif // Each thread has 3 numbers to sum. double beta0 = 0.0, beta1 = 0.0, beta2 = 0.0; // Iterate over the paths. for( int path = blockIdx.x*NUM_THREADS_PER_BLOCK + threadIdx.x ; path < num_paths ; path += NUM_THREADS_PER_GRID ) { // Threads load the asset price to rebuild Q from the QR decomposition. double S = paths[path]; // Is the path in the money? const int in_the_money = payoff.is_in_the_money(S); // Compute Qis. The elements of the Q matrix in the QR decomposition. double Q1i = inv_R11*S - inv_R01; double Q2i = inv_R22*S*S - inv_R02 - Q1i*inv_R12; // Compute the ith row of the pseudo-inverse of [1 X X^2]. #ifdef WITH_FULL_W_MATRIX const double WI0 = inv_W00 + W01 * Q1i + W02 * Q2i; const double WI1 = inv_W10 + W11 * Q1i + W12 * Q2i; const double WI2 = inv_W20 + W21 * Q1i + W22 * Q2i; #else const double WI0 = inv_W00 + W01 * Q1i + W02 * Q2i; const double WI1 = W11 * Q1i + W12 * Q2i; const double WI2 = W22 * Q2i; #endif // Each thread loads its element from the Y vector. double cashflow = in_the_money ? cashflows[path] : 0.0; // Update beta. beta0 += WI0*cashflow; beta1 += WI1*cashflow; beta2 += WI2*cashflow; } // Compute the sum of the elements in the block. We could do slightly better by removing the bank conflicts here. double3 sums = BlockReduce(smem_for_reduce).Sum(make_double3(beta0, beta1, beta2)); // The 1st thread stores the result to GMEM. #ifdef WITH_ATOMIC_BETA if( threadIdx.x == 0 ) { atomic_add(&partial_sums[0], sums.x); atomic_add(&partial_sums[1], sums.y); atomic_add(&partial_sums[2], sums.z); } #else if( threadIdx.x == 0 ) { partial_sums[0*NUM_THREADS_PER_BLOCK + blockIdx.x] = sums.x; partial_sums[1*NUM_THREADS_PER_BLOCK + blockIdx.x] = sums.y; partial_sums[2*NUM_THREADS_PER_BLOCK + blockIdx.x] = sums.z; } #endif } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< int NUM_THREADS_PER_BLOCK > __global__ __launch_bounds__(NUM_THREADS_PER_BLOCK) void compute_final_beta_kernel(const int *__restrict all_out_of_the_money, double *__restrict beta) { typedef cub::BlockReduce<double3, NUM_THREADS_PER_BLOCK> BlockReduce; // The shared memory for the reduction. __shared__ typename BlockReduce::TempStorage smem_for_reduce; // Early exit if needed. if( *all_out_of_the_money ) { if( threadIdx.x < 3 ) beta[threadIdx.x] = 0.0; return; } // The final sums. double3 sums; // We load the elements. sums.x = beta[0*NUM_THREADS_PER_BLOCK + threadIdx.x]; sums.y = beta[1*NUM_THREADS_PER_BLOCK + threadIdx.x]; sums.z = beta[2*NUM_THREADS_PER_BLOCK + threadIdx.x]; // Compute the sums. sums = BlockReduce(smem_for_reduce).Sum(sums); // Store beta. if( threadIdx.x == 0 ) { //printf("beta0=%.8lf beta1=%.8lf beta2=%.8lf\n", sums.x, sums.y, sums.z); beta[0] = sums.x; beta[1] = sums.y; beta[2] = sums.z; } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // If you enable WITH_FUSED_BETA, that kernel will assemble the beta coefficients from the partial sums computed // in compute_partial_beta_kernel. Otherwise, it assumes beta has been built either by compute_final_beta_kernel or // by atomic operations at the end of compute_partial_beta_kernel. template< int NUM_THREADS_PER_BLOCK, typename Payoff > __global__ __launch_bounds__(NUM_THREADS_PER_BLOCK) void update_cashflow_kernel(int num_paths, Payoff payoff_object, double exp_min_r_dt, const double *__restrict beta, const double */*__restrict*/ paths, const int *__restrict all_out_of_the_money, double */*__restrict*/ cashflows) { const int NUM_THREADS_PER_GRID = gridDim.x * NUM_THREADS_PER_BLOCK; // Are we going to skip the computations. const int skip_computations = *all_out_of_the_money; #ifdef WITH_FUSED_BETA typedef cub::BlockReduce<double3, NUM_THREADS_PER_BLOCK> BlockReduce; // The shared memory for the reduction. __shared__ typename BlockReduce::TempStorage smem_for_reduce; // The shared memory to exchange beta. __shared__ double smem_beta[3]; // The final sums. double3 sums; // We load the elements. Each block loads the same elements. sums.x = beta[0*NUM_THREADS_PER_BLOCK + threadIdx.x]; sums.y = beta[1*NUM_THREADS_PER_BLOCK + threadIdx.x]; sums.z = beta[2*NUM_THREADS_PER_BLOCK + threadIdx.x]; // Compute the sums. sums = BlockReduce(smem_for_reduce).Sum(sums); // Store beta. if( threadIdx.x == 0 ) { smem_beta[0] = sums.x; smem_beta[1] = sums.y; smem_beta[2] = sums.z; } __syncthreads(); // Load the beta coefficients from SMEM. const double beta0 = smem_beta[0]; const double beta1 = smem_beta[1]; const double beta2 = smem_beta[2]; #else // Load the beta coefficients for the linear regression. const double beta0 = beta[0]; const double beta1 = beta[1]; const double beta2 = beta[2]; #endif // Iterate over the paths. int path = blockIdx.x*NUM_THREADS_PER_BLOCK + threadIdx.x; for( ; path < num_paths ; path += NUM_THREADS_PER_GRID ) { // The cashflow. const double old_cashflow = exp_min_r_dt*cashflows[path]; if( skip_computations ) { cashflows[path] = old_cashflow; continue; } // Load the asset price. double S = paths[path]; double S2 = S*S; // The payoff. double payoff = payoff_object(S); // Compute the estimated payoff from continuing. double estimated_payoff = beta0 + beta1*S + beta2*S2; // Discount the payoff because we did not take it into account for beta. estimated_payoff *= exp_min_r_dt; // Update the payoff. if( payoff <= 1.0e-8 || payoff <= estimated_payoff ) payoff = old_cashflow; // Store the updated cashflow. cashflows[path] = payoff; } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #ifdef WITH_CDP template< int NUM_THREADS_PER_BLOCK, typename Payoff > __global__ __launch_bounds__(NUM_THREADS_PER_BLOCK, 8) void cdp_timestep_loop_kernel(int num_timesteps, int num_paths, int update_cashflow_grid, Payoff payoff, double exp_min_r_dt, const double *__restrict svds, const double */*__restrict*/ paths, double */*__restrict*/ cashflows, const int *__restrict all_out_of_the_money, double *__restrict temp_storage) { #if __CUDA_ARCH__ >= 350 for( int timestep = num_timesteps-2 ; timestep >= 0 ; --timestep ) { compute_partial_beta_kernel<NUM_THREADS_PER_BLOCK><<<NUM_THREADS_PER_BLOCK, NUM_THREADS_PER_BLOCK>>>( num_paths, payoff, svds + 16*timestep, paths + timestep*num_paths, cashflows, all_out_of_the_money + timestep, temp_storage); #if defined(WITH_FUSED_BETA) || defined(WITH_ATOMIC_BETA) #else compute_final_beta_kernel<NUM_THREADS_PER_BLOCK><<<1, NUM_THREADS_PER_BLOCK>>>( all_out_of_the_money + timestep, temp_storage); #endif update_cashflow_kernel<NUM_THREADS_PER_BLOCK><<<update_cashflow_grid, NUM_THREADS_PER_BLOCK>>>( num_paths, payoff, exp_min_r_dt, temp_storage, paths + timestep*num_paths, all_out_of_the_money + timestep, cashflows); } #endif } #endif // WITH_CDP /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< int NUM_THREADS_PER_BLOCK > __global__ __launch_bounds__(NUM_THREADS_PER_BLOCK) void compute_partial_sums_kernel(int num_paths, const double *__restrict cashflows, double *__restrict sums) { typedef cub::BlockReduce<double, NUM_THREADS_PER_BLOCK> BlockReduce; // Shared memory to compute the final sum. __shared__ typename BlockReduce::TempStorage smem_storage; // Each thread works on a single path. const int path = blockIdx.x * NUM_THREADS_PER_BLOCK + threadIdx.x; // Load the final sum. double sum = 0.0; if( path < num_paths ) sum = cashflows[path]; // Compute the sum over the block. sum = BlockReduce(smem_storage).Sum(sum); // The block leader writes the sum to GMEM. if( threadIdx.x == 0 ) sums[blockIdx.x] = sum; } // ==================================================================================================================== template< int NUM_THREADS_PER_BLOCK > __global__ __launch_bounds__(NUM_THREADS_PER_BLOCK) void compute_final_sum_kernel(int num_paths, int num_blocks, double exp_min_r_dt, double *__restrict sums) { typedef cub::BlockReduce<double, NUM_THREADS_PER_BLOCK> BlockReduce; // Shared memory to compute the final sum. __shared__ typename BlockReduce::TempStorage smem_storage; // The sum. double sum = 0.0; for( int item = threadIdx.x ; item < num_blocks ; item += NUM_THREADS_PER_BLOCK ) sum += sums[item]; // Compute the sum over the block. sum = BlockReduce(smem_storage).Sum(sum); // The block leader writes the sum to GMEM. if( threadIdx.x == 0 ) { sums[0] = exp_min_r_dt * sum / (double) num_paths; } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Payoff > static inline void do_run(cudaStream_t stream, curandGenerator_t rng, int num_timesteps, int num_paths, const Payoff &payoff, double dt, double S0, double r, double sigma, double *d_samples, double *d_paths, double *d_cashflows, double *d_svds, int *d_all_out_of_the_money, double *d_temp_storage, double *h_price) { // Generate random samples. CHECK_CURAND(curandGenerateNormalDouble(rng, d_samples, num_timesteps*num_paths, 0.0, 1.0)); // Generate asset prices. const int NUM_THREADS_PER_BLOCK0 = 256; int grid_dim = (num_paths + NUM_THREADS_PER_BLOCK0-1) / NUM_THREADS_PER_BLOCK0; generate_paths_kernel<NUM_THREADS_PER_BLOCK0><<<grid_dim, NUM_THREADS_PER_BLOCK0, 0, stream>>>( num_timesteps, num_paths, payoff, dt, S0, r, sigma, d_samples, d_paths); CHECK_CUDA(cudaGetLastError()); // Reset the all_out_of_the_money array. CHECK_CUDA(cudaMemsetAsync(d_all_out_of_the_money, 0, num_timesteps*sizeof(int), stream)); // Prepare the SVDs. const int NUM_THREADS_PER_BLOCK1 = 256; CHECK_CUDA(cudaFuncSetSharedMemConfig(prepare_svd_kernel<NUM_THREADS_PER_BLOCK1, Payoff>, cudaSharedMemBankSizeEightByte)); prepare_svd_kernel<NUM_THREADS_PER_BLOCK1><<<num_timesteps-1, NUM_THREADS_PER_BLOCK1, 0, stream>>>( num_paths, 4, //1024, payoff, d_paths, d_all_out_of_the_money, d_svds); CHECK_CUDA(cudaGetLastError()); // The constant to discount the payoffs. const double exp_min_r_dt = std::exp(-r*dt); // Estimate the number of blocks in a wave of update_cashflow. cudaDeviceProp properties; int device = 0; CHECK_CUDA(cudaGetDevice(&device)); CHECK_CUDA(cudaGetDeviceProperties(&properties, device)); // The number of SMs. const int num_sms = properties.multiProcessorCount; // Number of threads per wave at fully occupancy. const int num_threads_per_wave_full_occupancy = properties.maxThreadsPerMultiProcessor*num_sms; // Enable 8B mode for SMEM. const int NUM_THREADS_PER_BLOCK2 = 128; CHECK_CUDA(cudaFuncSetSharedMemConfig(compute_partial_beta_kernel<NUM_THREADS_PER_BLOCK2, Payoff>, cudaSharedMemBankSizeEightByte)); CHECK_CUDA(cudaFuncSetSharedMemConfig(compute_final_beta_kernel<NUM_THREADS_PER_BLOCK2>, cudaSharedMemBankSizeEightByte)); // Update the cashflows. grid_dim = (num_paths + NUM_THREADS_PER_BLOCK2-1) / NUM_THREADS_PER_BLOCK2; double num_waves = grid_dim*NUM_THREADS_PER_BLOCK2 / (double) num_threads_per_wave_full_occupancy; int update_cashflow_grid = grid_dim; if( num_waves < 10 && num_waves - (int) num_waves < 0.6 ) update_cashflow_grid = std::max(1, (int) num_waves) * num_threads_per_wave_full_occupancy / NUM_THREADS_PER_BLOCK2; // Run the main loop. #ifdef WITH_CDP CHECK_CUDA(cudaDeviceSetLimit(cudaLimitDevRuntimePendingLaunchCount, 512)); CHECK_CUDA(cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, 1)); cdp_timestep_loop_kernel<NUM_THREADS_PER_BLOCK2><<<1, 1, 0, stream>>>(num_timesteps, num_paths, update_cashflow_grid, payoff, exp_min_r_dt, d_svds, d_paths, d_cashflows, d_all_out_of_the_money, d_temp_storage); CHECK_CUDA(cudaGetLastError()); #else for( int timestep = num_timesteps-2 ; timestep >= 0 ; --timestep ) { #ifdef WITH_ATOMIC_BETA // Reset the buffer to store the results. CHECK_CUDA(cudaMemsetAsync(d_temp_storage, 0, 3*sizeof(double))); #endif // Compute beta (two kernels) for that timestep. compute_partial_beta_kernel<NUM_THREADS_PER_BLOCK2><<<NUM_THREADS_PER_BLOCK2, NUM_THREADS_PER_BLOCK2, 0, stream>>>( num_paths, payoff, d_svds + 16*timestep, d_paths + timestep*num_paths, d_cashflows, d_all_out_of_the_money + timestep, d_temp_storage); CHECK_CUDA(cudaGetLastError()); #if defined(WITH_FUSED_BETA) || defined(WITH_ATOMIC_BETA) #else compute_final_beta_kernel<NUM_THREADS_PER_BLOCK2><<<1, NUM_THREADS_PER_BLOCK2, 0, stream>>>( d_all_out_of_the_money + timestep, d_temp_storage); CHECK_CUDA(cudaGetLastError()); #endif update_cashflow_kernel<NUM_THREADS_PER_BLOCK2><<<update_cashflow_grid, NUM_THREADS_PER_BLOCK2, 0, stream>>>( num_paths, payoff, exp_min_r_dt, d_temp_storage, d_paths + timestep*num_paths, d_all_out_of_the_money + timestep, d_cashflows); CHECK_CUDA(cudaGetLastError()); } #endif // WITH_CDP // Compute the final sum. const int NUM_THREADS_PER_BLOCK4 = 128; grid_dim = (num_paths + NUM_THREADS_PER_BLOCK4-1) / NUM_THREADS_PER_BLOCK4; CHECK_CUDA(cudaFuncSetSharedMemConfig(compute_partial_sums_kernel<NUM_THREADS_PER_BLOCK4>, cudaSharedMemBankSizeEightByte)); compute_partial_sums_kernel<NUM_THREADS_PER_BLOCK4><<<grid_dim, NUM_THREADS_PER_BLOCK4, 0, stream>>>( num_paths, d_cashflows, d_temp_storage); CHECK_CUDA(cudaGetLastError()); CHECK_CUDA(cudaFuncSetSharedMemConfig(compute_final_sum_kernel<NUM_THREADS_PER_BLOCK4>, cudaSharedMemBankSizeEightByte)); compute_final_sum_kernel<NUM_THREADS_PER_BLOCK4><<<1, NUM_THREADS_PER_BLOCK4, 0, stream>>>( num_paths, grid_dim, exp_min_r_dt, d_temp_storage); CHECK_CUDA(cudaGetLastError()); // Copy the result to the host. CHECK_CUDA(cudaMemcpyAsync(h_price, d_temp_storage, sizeof(double), cudaMemcpyDeviceToHost, stream)); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Payoff > static double binomial_tree(int num_timesteps, const Payoff &payoff, double dt, double S0, double r, double sigma) { double *tree = new double[num_timesteps+1]; double u = std::exp( sigma * std::sqrt(dt)); double d = std::exp(-sigma * std::sqrt(dt)); double a = std::exp( r * dt); double p = (a - d) / (u - d); double k = std::pow(d, num_timesteps); for( int t = 0 ; t <= num_timesteps ; ++t ) { tree[t] = payoff(S0*k); k *= u*u; } for( int t = num_timesteps-1 ; t >= 0 ; --t ) { k = std::pow(d, t); for( int i = 0 ; i <= t ; ++i ) { double expected = std::exp(-r*dt) * (p*tree[i+1] + (1.0 - p)*tree[i]); double earlyex = payoff(S0*k); tree[i] = std::max(earlyex, expected); k *= u*u; } } double f = tree[0]; delete[] tree; return f; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// static double black_scholes_merton_put(double T, double K, double S0, double r, double sigma) { double d1 = (std::log(S0 / K) + (r + 0.5*sigma*sigma)*T) / (sigma*std::sqrt(T)); double d2 = d1 - sigma*std::sqrt(T); return K*std::exp(-r*T)*normcdf(-d2) - S0*normcdf(-d1); } static double black_scholes_merton_call(double T, double K, double S0, double r, double sigma) { double d1 = (std::log(S0 / K) + (r + 0.5*sigma*sigma)*T) / (sigma*std::sqrt(T)); double d2 = d1 - sigma*std::sqrt(T); return S0*normcdf(d1) - K*std::exp(-r*T)*normcdf(d2); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #ifdef WITH_CPU_REFERENCE extern "C" void dgesvd_(char*, // JOBU char*, // JOBV long*, // M long*, // N double*, // A long*, // LDA double*, // S double*, // U long*, // LDU double*, // VT long*, // LDVT double*, // WORK long*, // LWORK long*); // INFO // ==================================================================================================================== static void dump_to_file(const char *name, int timestep, const double *data, int count) { char buffer[256]; sprintf(buffer, "%s-%d.bin", name, timestep); FILE *file = fopen(buffer, "wb"); if( !file ) { fprintf(stderr, "Error cannot open file %s\n", buffer); exit(1); } printf("> Debug info : Writing %s to binary file %s\n", name, buffer); if( count != fwrite(data, sizeof(double), count, file) ) { fprintf(stderr, "Error when dumping the binary values to %s\n", buffer); exit(1); } fclose(file); } // ==================================================================================================================== template< typename Payoff > static double longstaff_schwartz_cpu(int num_timesteps, int num_paths, const Payoff &payoff, double dt, double S0, double r, double sigma, bool with_debug_info) { // The random samples. double *h_samples = new double[num_timesteps*num_paths]; curandGenerator_t rng; CHECK_CURAND(curandCreateGeneratorHost(&rng, CURAND_RNG_PSEUDO_MRG32K3A)); CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(rng, 12354ull)); CHECK_CURAND(curandGenerateNormalDouble(rng, h_samples, num_timesteps*num_paths, 0.0, 1.0)); CHECK_CURAND(curandDestroyGenerator(rng)); // The paths. double *h_paths = new double[num_timesteps*num_paths]; const double r_min_half_sigma_sq_dt = (r - 0.5*sigma*sigma)*dt; const double sigma_sqrt_dt = sigma*sqrt(dt); // Generate the paths. for( int timestep = 0 ; timestep < num_timesteps ; ++timestep ) { for( int i = 0 ; i < num_paths ; ++i ) { double S = timestep == 0 ? S0 : h_paths[(timestep-1)*num_paths + i]; S = S * exp(r_min_half_sigma_sq_dt + sigma_sqrt_dt*h_samples[timestep*num_paths + i]); h_paths[timestep*num_paths + i] = timestep < num_timesteps-1 ? S : payoff(S); } } // The cashflows (last column of paths). double *h_cashflows = &h_paths[(num_timesteps-1)*num_paths]; // The constant to discount the payoffs. const double exp_min_r_dt = std::exp(-r*dt); // The matrix [1 x x^2]. double *h_matrix = new double[3*num_paths]; // The singular values. double *h_S = new double[3]; // The matrix U of the SVD. double *h_U = new double[3*num_paths]; // The matrix V^T of the SVD. double *h_V = new double[3*3]; // The workspace. double *h_work = new double[num_paths + 3*3]; // Run the main loop. for( int timestep = num_timesteps-2 ; timestep >= 0 ; --timestep ) { long m = 0; // Prepare the matrix [1 x x^2]. for( int i = 0 ; i < num_paths ; ++i ) { double S = h_paths[timestep*num_paths + i]; if( !payoff.is_in_the_money(S) ) continue; h_matrix[0*num_paths + m] = 1.0; h_matrix[1*num_paths + m] = S; h_matrix[2*num_paths + m] = S*S; m++; } if( with_debug_info ) dump_to_file("paths", timestep, &h_matrix[num_paths], m); // Compute the SVD of the matrix. char JOBU = 'S', JOBVT = 'S'; long ldm = num_paths; long N = 3; long LWORK = num_paths + 3*3; long info = 0; dgesvd_(&JOBU, &JOBVT, &m, &N, h_matrix, &ldm, h_S, h_U, &ldm, h_V, &N, h_work, &LWORK, &info); if( info ) { fprintf(stderr, "LAPACK error at line %d: %d\n", __LINE__, info); exit(1); } if( with_debug_info ) printf("> Debug info : Timestep=%3d, svd0=%.8lf svd1=%.8lf svd2=%.8lf\n", timestep, h_S[0], h_S[1], h_S[2]); // Build the pseudo-inverse: V*S^-1*U^T. double inv_S0 = abs(h_S[0]) < 1.0e-12 ? 0.0 : 1.0 / h_S[0]; double inv_S1 = abs(h_S[1]) < 1.0e-12 ? 0.0 : 1.0 / h_S[1]; double inv_S2 = abs(h_S[2]) < 1.0e-12 ? 0.0 : 1.0 / h_S[2]; // V = V^T*S^-1. h_V[0] *= inv_S0; h_V[1] *= inv_S1; h_V[2] *= inv_S2; h_V[3] *= inv_S0; h_V[4] *= inv_S1; h_V[5] *= inv_S2; h_V[6] *= inv_S0; h_V[7] *= inv_S1; h_V[8] *= inv_S2; // U = V*U^T. for( int i = 0 ; i < m ; ++i ) { double a = h_U[0*num_paths + i]; double b = h_U[1*num_paths + i]; double c = h_U[2*num_paths + i]; h_U[0*num_paths + i] = a*h_V[0] + b*h_V[1] + c*h_V[2]; h_U[1*num_paths + i] = a*h_V[3] + b*h_V[4] + c*h_V[5]; h_U[2*num_paths + i] = a*h_V[6] + b*h_V[7] + c*h_V[8]; } // Compute beta. double beta0 = 0.0, beta1 = 0.0, beta2 = 0.0; for( int i = 0, k = 0 ; i < num_paths ; ++i ) { double S = h_paths[timestep*num_paths + i]; if( !payoff.is_in_the_money(S) ) continue; double cashflow = h_cashflows[i]; beta0 += h_U[0*num_paths + k]*cashflow; beta1 += h_U[1*num_paths + k]*cashflow; beta2 += h_U[2*num_paths + k]*cashflow; k++; } if( with_debug_info ) { double *h_tmp_cashflows = new double[m]; for( int i = 0, k = 0 ; i < num_paths ; ++i ) { double S = h_paths[timestep*num_paths + i]; if( !payoff.is_in_the_money(S) ) continue; h_tmp_cashflows[k++] = h_cashflows[i]; } dump_to_file("cashflows", timestep, h_tmp_cashflows, m); delete[] h_tmp_cashflows; } if( with_debug_info ) printf("> Debug info : Timestep=%3d, beta0=%.8lf beta1=%.8lf beta2=%.8lf\n", timestep, beta0, beta1, beta2); // Update the cashflow. for( int i = 0 ; i < num_paths ; ++i ) { double S = h_paths[timestep*num_paths + i]; double p = payoff(S); double estimated_payoff = exp_min_r_dt*(beta0 + beta1*S + beta2*S*S); if( p <= 1.0e-8 || p <= estimated_payoff ) p = exp_min_r_dt*h_cashflows[i]; h_cashflows[i] = p; } } // Compute the final sum. double sum = 0.0; for( int i = 0 ; i < num_paths ; ++i ) sum += h_cashflows[i]; delete[] h_V; delete[] h_U; delete[] h_S; delete[] h_matrix; delete[] h_paths; delete[] h_samples; return exp_min_r_dt*sum / (double) num_paths; } // ==================================================================================================================== static double longstaff_schwartz_cpu(int num_timesteps, int num_paths, bool price_put, double K, double dt, double S0, double r, double sigma, bool with_debug_info) { if( price_put ) return longstaff_schwartz_cpu(num_timesteps, num_paths, PayoffPut(K), dt, S0, r, sigma, with_debug_info); else return longstaff_schwartz_cpu(num_timesteps, num_paths, PayoffCall(K), dt, S0, r, sigma, with_debug_info); } #endif /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { const int MAX_GRID_SIZE = 2048; // Simulation parameters. int num_timesteps = 100; int num_paths = 32; int num_runs = 1; // Option parameters. double T = 1.00; double K = 4.00; double S0 = 3.60; double r = 0.06; double sigma = 0.20; // Bool do we price a put or a call. bool price_put = true; // Do we want debug info. #ifdef WITH_CPU_REFERENCE bool with_debug_info = false; #endif // Read command-line options. for( int i = 1 ; i < argc ; ++i ) { if( !strcmp(argv[i], "-timesteps") ) num_timesteps = strtol(argv[++i], NULL, 10); else if( !strcmp(argv[i], "-paths") ) num_paths = strtol(argv[++i], NULL, 10); else if( !strcmp(argv[i], "-runs") ) num_runs = strtol(argv[++i], NULL, 10); else if( !strcmp(argv[i], "-T") ) T = strtod(argv[++i], NULL); else if( !strcmp(argv[i], "-S0") ) S0 = strtod(argv[++i], NULL); else if( !strcmp(argv[i], "-K") ) K = strtod(argv[++i], NULL); else if( !strcmp(argv[i], "-r") ) r = strtod(argv[++i], NULL); else if( !strcmp(argv[i], "-sigma") ) sigma = strtod(argv[++i], NULL); else if( !strcmp(argv[i], "-call") ) price_put = false; #ifdef WITH_CPU_REFERENCE else if( !strcmp(argv[i], "-debug-info") ) with_debug_info = true; #endif else { fprintf(stderr, "Unknown option %s. Aborting!!!\n", argv[i]); exit(1); } } // Print the arguments. printf("==============\n"); printf("Num Timesteps : %d\n", num_timesteps); printf("Num Paths : %dK\n", num_paths); printf("Num Runs : %d\n", num_runs); printf("T : %lf\n", T); printf("S0 : %lf\n", S0); printf("K : %lf\n", K); printf("r : %lf\n", r); printf("sigma : %lf\n", sigma); printf("Option Type : American %s\n", price_put ? "Put" : "Call"); // We want x1024 paths. num_paths *= 1024; // A timestep. double dt = T / num_timesteps; // Create a stream to issue asynchronous results (and create the CUDA context). cudaStream_t stream; CHECK_CUDA(cudaStreamCreate(&stream)); // Memory on the GPU to store normally distributed random numbers. double *d_samples = NULL; CHECK_CUDA(cudaMalloc((void**) &d_samples, num_timesteps*num_paths*sizeof(double))); // Memory on the GPU to store the asset price along the paths. The last column contains the discounted payoffs. double *d_paths = NULL; CHECK_CUDA(cudaMalloc((void**) &d_paths, num_timesteps*num_paths*sizeof(double))); // The discounted payoffs are the last column. double *d_cashflows = d_paths + (num_timesteps-1)*num_paths; // Storage to keep intermediate SVD matrices. double *d_svds = NULL; CHECK_CUDA(cudaMalloc((void**) &d_svds, 16*num_timesteps*sizeof(double))); // Memory on the GPU to flag timesteps where no path is in the money. int *d_all_out_of_the_money = NULL; CHECK_CUDA(cudaMalloc((void**) &d_all_out_of_the_money, num_timesteps*sizeof(int))); // Memory on the GPU to compute the reductions (beta and the option price). int max_temp_storage = 4*MAX_GRID_SIZE; double *d_temp_storage = NULL; CHECK_CUDA(cudaMalloc((void**) &d_temp_storage, max_temp_storage*sizeof(double))); // The price on the host. double *h_price = NULL; CHECK_CUDA(cudaHostAlloc((void**) &h_price, sizeof(double), cudaHostAllocDefault)); // Create the random-number generator and set the seed. curandGenerator_t rng; CHECK_CURAND(curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_MRG32K3A)); CHECK_CURAND(curandSetStream(rng, stream)); CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(rng, 12354ull)); // Create CUDA events to time the runs. cudaEvent_t start, stop; CHECK_CUDA(cudaEventCreate(&start)); CHECK_CUDA(cudaEventCreate(&stop)); CHECK_CUDA(cudaEventRecord(start, stream)); for( int run = 0 ; run < num_runs ; ++run ) { if( run > 0 ) CHECK_CURAND(curandSetGeneratorOffset(rng, 0)); if( price_put ) do_run(stream, rng, num_timesteps, num_paths, PayoffPut(K), dt, S0, r, sigma, d_samples, d_paths, d_cashflows, d_svds, d_all_out_of_the_money, d_temp_storage, h_price); else do_run(stream, rng, num_timesteps, num_paths, PayoffCall(K), dt, S0, r, sigma, d_samples, d_paths, d_cashflows, d_svds, d_all_out_of_the_money, d_temp_storage, h_price); } CHECK_CUDA(cudaEventRecord(stop, stream)); CHECK_CUDA(cudaEventSynchronize(stop)); printf("==============\n"); printf("GPU Longstaff-Schwartz: %.8lf\n", *h_price); double price = 0.0; #ifdef WITH_CPU_REFERENCE price = longstaff_schwartz_cpu(num_timesteps, num_paths, price_put, K, dt, S0, r, sigma, with_debug_info); printf("CPU Longstaff-Schwartz: %.8lf\n", price); #endif if( price_put ) price = binomial_tree(num_timesteps, PayoffPut(K), dt, S0, r, sigma); else price = binomial_tree(num_timesteps, PayoffCall(K), dt, S0, r, sigma); printf("Binonmial : %.8lf\n", price); if( price_put ) price = black_scholes_merton_put(T, K, S0, r, sigma); else price = black_scholes_merton_call(T, K, S0, r, sigma); printf("European Price : %.8lf\n", price); printf("==============\n"); float elapsed_time = 0.0f; CHECK_CUDA(cudaEventElapsedTime(&elapsed_time, start, stop)); printf("Elapsed time : %.3fms\n", elapsed_time / num_runs); printf("==============\n"); CHECK_CUDA(cudaEventDestroy(stop)); CHECK_CUDA(cudaEventDestroy(start)); // Release the GPU memory. CHECK_CUDA(cudaFreeHost(h_price)); CHECK_CURAND(curandDestroyGenerator(rng)); CHECK_CUDA(cudaFree(d_temp_storage)); CHECK_CUDA(cudaFree(d_all_out_of_the_money)); CHECK_CUDA(cudaFree(d_svds)); CHECK_CUDA(cudaFree(d_paths)); CHECK_CUDA(cudaFree(d_samples)); CHECK_CUDA(cudaStreamDestroy(stream)); // Reset the GPU (it's a good practice). CHECK_CUDA(cudaDeviceReset()); return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
the_stack
#ifdef __cplusplus extern "C" { #endif #include <float.h> #include <stdio.h> #include "highway_lstm_kernel.h" #define BLOCK 256 // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(cudaError_t stat, const char *file, int line) { if (stat != cudaSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line); } } #define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); } void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) { if (stat != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line); } } // Device functions __forceinline__ __device__ float sigmoidf(float in) { return 1.f / (1.f + expf(-in)); } __forceinline__ __device__ float dsigmoidf(float in) { float s = sigmoidf(in); return s * (1.f - s); } __forceinline__ __device__ float tanh2f(float in) { float t = tanhf(in); return t*t; } __global__ void elementWise_bp(int hiddenSize, int miniBatch, int numCovered, // Inputs float *out_grad, float *h_out_grad, float *c_out_grad, float *c_in, float *c_out, float *h_out, float *gates_out, float *dropout_in, // Outputs float *c_in_grad, float *i_gates_grad, float *h_gates_grad, int training) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numCovered * hiddenSize) return; int batch = index / hiddenSize; int h_gateIndex = (index % hiddenSize) + 5 * batch * hiddenSize; int i_gateIndex = (index % hiddenSize) + 6 * batch * hiddenSize; float d_h = out_grad[index] + h_out_grad[index]; d_h = d_h * dropout_in[index]; float in_gate = gates_out[i_gateIndex]; float forget_gate = gates_out[i_gateIndex + 1 * hiddenSize]; float act_gate = gates_out[i_gateIndex + 2 * hiddenSize]; float out_gate = gates_out[i_gateIndex + 3 * hiddenSize]; float r_gate = gates_out[i_gateIndex + 4 * hiddenSize]; float lin_gate = gates_out[i_gateIndex + 5 * hiddenSize]; float d_out = d_h * r_gate; float d_c = d_out * out_gate * (1.f - tanh2f(c_out[index])) + c_out_grad[index]; float h_prime = out_gate * tanhf(c_out[index]); float d_in_gate = d_c * act_gate * in_gate * (1.f - in_gate); float d_forget_gate = d_c * c_in[index] * forget_gate * (1.f - forget_gate); float d_act_gate = d_c * in_gate * (1.f - act_gate * act_gate); float d_out_gate = d_out * tanhf(c_out[index]) * out_gate * (1.f - out_gate); float d_r_gate = d_h * (h_prime - lin_gate) * r_gate * (1.f - r_gate); float d_lin_gate = d_h * (1 - r_gate); i_gates_grad[i_gateIndex] = d_in_gate; i_gates_grad[i_gateIndex + 1 * hiddenSize] = d_forget_gate; i_gates_grad[i_gateIndex + 2 * hiddenSize] = d_act_gate; i_gates_grad[i_gateIndex + 3 * hiddenSize] = d_out_gate; i_gates_grad[i_gateIndex + 4 * hiddenSize] = d_r_gate; i_gates_grad[i_gateIndex + 5 * hiddenSize] = d_lin_gate; h_gates_grad[h_gateIndex] = d_in_gate; h_gates_grad[h_gateIndex + 1 * hiddenSize] = d_forget_gate; h_gates_grad[h_gateIndex + 2 * hiddenSize] = d_act_gate; h_gates_grad[h_gateIndex + 3 * hiddenSize] = d_out_gate; h_gates_grad[h_gateIndex + 4 * hiddenSize] = d_r_gate; c_in_grad[index] = forget_gate * d_c; } // Fused forward kernel __global__ void elementWise_fp(int hiddenSize, int miniBatch, int numCovered, float *tmp_h, float *tmp_i, float *bias, float *linearGates, float *h_out, float *dropout_in, float *c_in, float *c_out, int training) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= numCovered * hiddenSize) return; int batch = index / hiddenSize; int h_gateIndex = (index % hiddenSize) + 5 * batch * hiddenSize; int i_gateIndex = (index % hiddenSize) + 6 * batch * hiddenSize; float g[6]; for (int i = 0; i < 5; i++) { g[i] = tmp_i[i * hiddenSize + i_gateIndex] + tmp_h[i * hiddenSize + h_gateIndex]; g[i] += bias[i * hiddenSize + index % hiddenSize]; } // extra for highway g[5] = tmp_i[5 * hiddenSize + i_gateIndex]; float in_gate = sigmoidf(g[0]); float forget_gate = sigmoidf(g[1]); float act_gate = tanhf(g[2]); float out_gate = sigmoidf(g[3]); float r_gate = sigmoidf(g[4]); float lin_gate = g[5]; if (training == 1) { linearGates[i_gateIndex] = in_gate; linearGates[i_gateIndex + 1 * hiddenSize] = forget_gate; linearGates[i_gateIndex + 2 * hiddenSize] = act_gate; linearGates[i_gateIndex + 3 * hiddenSize] = out_gate; linearGates[i_gateIndex + 4 * hiddenSize] = r_gate; linearGates[i_gateIndex + 5 * hiddenSize] = lin_gate; } float val = (forget_gate * c_in[index]) + (in_gate * act_gate); c_out[index] = val; val = out_gate * tanhf(val); val = val * r_gate + (1. - r_gate) * lin_gate; val = val * dropout_in[index]; h_out[index] = val; } void highway_lstm_backward_ongpu(int inputSize, int hiddenSize, int miniBatch, int numLayers, int seqLength, float *out_grad, int *lengths, float *h_data_grad, float * c_data_grad, float *x, float *h_data, float *c_data, float *T, float *gates_out, float *dropout_in, float *h_gates_grad, float *i_gates_grad, float *h_out_grad, float *x_grad, float *T_grad, float *bias_grad, int isTraining, int do_weight_grad, cudaStream_t stream, cublasHandle_t handle) { const int numElements = hiddenSize * miniBatch; cudaStream_t stream_i; cudaStream_t stream_h; cudaStream_t stream_wi; cudaStream_t stream_wh; cudaStream_t stream_wb; cudaErrCheck(cudaStreamCreate(&stream_i)); cudaErrCheck(cudaStreamCreate(&stream_h)); cudaErrCheck(cudaStreamCreate(&stream_wi)); cudaErrCheck(cudaStreamCreate(&stream_wh)); cudaErrCheck(cudaStreamCreate(&stream_wb)); float one = 1.f; float zero = 0.f; float *ones_host = new float[miniBatch]; for (int i=0; i < miniBatch; i++) { ones_host[i] = 1.f; } float *ones; cudaErrCheck(cudaMalloc((void**)&ones, miniBatch * sizeof(float))); cudaErrCheck(cudaMemcpy(ones, ones_host, miniBatch * sizeof(float), cudaMemcpyHostToDevice)); for (int layer = numLayers-1; layer >= 0; layer--) { int direction; int startInd; int currNumCovered; if (layer % 2 == 0) { // forward direction direction = -1; startInd = seqLength-1; currNumCovered = 0; } else { // backward direction direction = 1; startInd = 0; currNumCovered = miniBatch; } for (int t = startInd; t < seqLength && t >= 0; t = t + direction) { int prevIndex; int prevGradIndex; if (direction == 1) { while (lengths[currNumCovered-1] <= t) { currNumCovered--; } prevGradIndex = t; prevIndex = (t+2)%(seqLength+1); } else { while ((currNumCovered < miniBatch) && (lengths[currNumCovered] > t)) { currNumCovered++; } prevGradIndex = (t+2)%(seqLength+1); prevIndex = t; } float * gradPtr; if (layer == numLayers-1) { gradPtr = out_grad + t * numElements; } else { gradPtr = h_out_grad + t * numElements + layer * seqLength * numElements; } cublasErrCheck(cublasSetStream(handle, stream_i)); dim3 blockDim; dim3 gridDim; blockDim.x = BLOCK; gridDim.x = ((currNumCovered * hiddenSize) + blockDim.x - 1) / blockDim.x; elementWise_bp <<< gridDim, blockDim , 0, stream>>> (hiddenSize, miniBatch, currNumCovered, gradPtr, h_data_grad + prevGradIndex * numElements + layer * (seqLength + 1) * numElements, c_data_grad + prevGradIndex * numElements + layer * (seqLength + 1) * numElements, c_data + prevIndex * numElements + layer * (seqLength + 1) * numElements, c_data + (t+1) * numElements + layer * (seqLength + 1) * numElements, h_data + (t+1) * numElements + layer * (seqLength + 1) * numElements, gates_out + t * 6 * numElements + layer * seqLength * 6 * numElements, dropout_in + layer * numElements, c_data_grad + (t+1) * numElements + layer * (seqLength + 1) * numElements, i_gates_grad, h_gates_grad, isTraining); cudaErrCheck(cudaGetLastError()); // END cudaErrCheck(cudaDeviceSynchronize()); float *out_grad_ptr; int weightStart; int inSize; if (layer == 0) { inSize = inputSize; out_grad_ptr = x_grad + t * inputSize * miniBatch; weightStart = 0; } else { inSize = hiddenSize; out_grad_ptr = h_out_grad + t * numElements + (layer-1) * seqLength * numElements; weightStart = 6 * hiddenSize * inputSize + 5 * hiddenSize * hiddenSize + (layer - 1) * 11 * hiddenSize * hiddenSize; } cublasErrCheck(cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, inSize, currNumCovered, 6*hiddenSize, &one, &T[weightStart], 6 * hiddenSize, i_gates_grad, 6 * hiddenSize, &zero, out_grad_ptr, inSize)); cublasErrCheck(cublasSetStream(handle, stream_h)); cublasErrCheck(cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, hiddenSize, currNumCovered, 5*hiddenSize, &one, &T[weightStart + 6*hiddenSize*inSize], 5 * hiddenSize, h_gates_grad, 5 * hiddenSize, &zero, h_data_grad + (t+1) * numElements + layer * (seqLength+1) * numElements, hiddenSize)); if (do_weight_grad == 1) { float *inputPtr; if (layer == 0) { inputPtr = x + t * inputSize * miniBatch; } else { inputPtr = h_data + (t+1) * numElements + (layer - 1) * (seqLength+1) * numElements; } cublasErrCheck(cublasSetStream(handle, stream_wi)); // Update i_weights cublasErrCheck(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, 6 * hiddenSize, inSize, currNumCovered, &one, i_gates_grad, 6 * hiddenSize, inputPtr, inSize, &one, &T_grad[weightStart], 6 * hiddenSize)); cublasErrCheck(cublasSetStream(handle, stream_wh)); // Update h_weights cublasErrCheck(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, 5 * hiddenSize, hiddenSize, currNumCovered, &one, h_gates_grad, 5 * hiddenSize, h_data + prevIndex * numElements + layer * (seqLength+1) * numElements, hiddenSize, &one, &T_grad[weightStart + 6 *hiddenSize*inSize], 5 * hiddenSize)); cublasErrCheck(cublasSetStream(handle, stream_wb)); // Update bias_weights cublasErrCheck(cublasSgemv(handle, CUBLAS_OP_N, 5 * hiddenSize, currNumCovered, &one, h_gates_grad, 5 * hiddenSize, ones, 1, &one, &bias_grad[layer * 5 * hiddenSize], 1)); } cudaErrCheck(cudaDeviceSynchronize()); } } cublasErrCheck(cublasSetStream(handle, stream)); cudaErrCheck(cudaStreamDestroy(stream_i)); cudaErrCheck(cudaStreamDestroy(stream_h)); cudaErrCheck(cudaStreamDestroy(stream_wi)); cudaErrCheck(cudaStreamDestroy(stream_wh)); cudaErrCheck(cudaStreamDestroy(stream_wb)); cudaErrCheck(cudaFree(ones)); delete [] ones_host; cudaErrCheck(cudaDeviceSynchronize()); } void highway_lstm_forward_ongpu(int inputSize, int hiddenSize, int miniBatch, int numLayers, int seqLength, float *x, int *lengths, float *h_data, float *c_data, float *tmp_i, float *tmp_h, float *T, float *bias, float *dropout, float *gates, int is_training, cudaStream_t stream, cublasHandle_t handle) { const int numElements = hiddenSize * miniBatch; float zero = 0.f; float one = 1.f; cudaStream_t stream_i; cudaStream_t stream_h; cudaErrCheck(cudaStreamCreate(&stream_i)); cudaErrCheck(cudaStreamCreate(&stream_h)); for (int layer = 0; layer < numLayers; layer++) { int direction; int startInd; int currNumCovered; if (layer % 2 == 0) { // forward direction direction = 1; startInd = 0; currNumCovered = miniBatch; } else { // backward direction direction = -1; startInd = seqLength-1; currNumCovered = 0; } cublasErrCheck(cublasSetStream(handle, stream)); for (int t = startInd; t < seqLength && t >= 0; t = t + direction) { int prevIndex; if (direction == 1) { while (lengths[currNumCovered-1] <= t) { currNumCovered--; } prevIndex = t; } else { while ((currNumCovered < miniBatch) && (lengths[currNumCovered] > t)) { currNumCovered++; } prevIndex = (t+2)%(seqLength+1); } int inSize; int weightStart; float *inputPtr; if (layer == 0) { inSize = inputSize; weightStart = 0; inputPtr = x + t * inputSize * miniBatch; prevIndex = t; } else { inSize = hiddenSize; weightStart = 6 * hiddenSize * inputSize + 5 * hiddenSize * hiddenSize + (layer - 1) * 11 * hiddenSize * hiddenSize; inputPtr = h_data + (t+1) * numElements + (layer - 1) * (seqLength+1) * numElements; } cublasErrCheck(cublasSetStream(handle, stream_i)); cublasErrCheck(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, 6*hiddenSize, currNumCovered, inSize, &one, &T[weightStart], 6 * hiddenSize, inputPtr, inSize, &zero, tmp_i, 6 * hiddenSize)); cublasErrCheck(cublasSetStream(handle, stream_h)); cublasErrCheck(cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, 5*hiddenSize, currNumCovered, hiddenSize, &one, &T[6 * hiddenSize * inSize + weightStart], 5 * hiddenSize, h_data + prevIndex * numElements + layer * (seqLength + 1) * numElements, hiddenSize, &zero, tmp_h, 5 * hiddenSize)); cudaErrCheck(cudaDeviceSynchronize()); dim3 blockDim; dim3 gridDim; blockDim.x = BLOCK; gridDim.x = ((currNumCovered * hiddenSize) + blockDim.x - 1) / blockDim.x; elementWise_fp <<< gridDim, blockDim , 0, stream>>> (hiddenSize, miniBatch, currNumCovered, tmp_h, tmp_i, bias + 5 * layer * hiddenSize, is_training ? gates + 6 * (t * numElements + layer * seqLength * numElements) : NULL, h_data + (t + 1) * numElements + layer * (seqLength + 1) * numElements, dropout + layer * numElements, c_data + prevIndex * numElements + layer * (seqLength + 1) * numElements, c_data + (t + 1) * numElements + layer * (seqLength + 1) * numElements, is_training); cudaErrCheck(cudaGetLastError()); cudaErrCheck(cudaDeviceSynchronize()); } } cublasErrCheck(cublasSetStream(handle, stream)); cudaErrCheck(cudaStreamDestroy(stream_i)); cudaErrCheck(cudaStreamDestroy(stream_h)); cudaErrCheck(cudaDeviceSynchronize()); } #ifdef __cplusplus } #endif
the_stack
inline __device__ void operator+=(float4 &a, float4 b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } inline __device__ float4 operator+(float4 a, float4 b) { return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); } inline __device__ int4 operator+(int4 a, int4 b) { return make_int4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); } inline __device__ float4 operator*(float4 a, float4 b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } inline __device__ float4 operator-(float4 a, float4 b) { return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); } inline __device__ float4 operator*(float4 a, float b) { return make_float4(a.x * b, a.y * b, a.z * b, a.w * b); } inline __device__ float4 operator*(float b, float4 a) { return make_float4(b * a.x, b * a.y, b * a.z, b * a.w); } inline __device__ void operator*=(float4 &a, const float b) { a.x *= b; a.y *= b; a.z *= b; a.w *= b; } //////////////////////////////////////////////////////////////////////////////// // Euler integration //////////////////////////////////////////////////////////////////////////////// __global__ void integrateSystemK( float4* d_Pos, //input/output float4* d_Vel, //input/output const simParams_t params, const float deltaTime, const unsigned int numParticles) { const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if(index >= numParticles) return; float4 pos = d_Pos[index]; float4 vel = d_Vel[index]; pos.w = 1.0f; vel.w = 0.0f; //Gravity float4 g = {params.gravity.x, params.gravity.y, params.gravity.z, 0}; vel += g * deltaTime; vel *= params.globalDamping; //Advance pos pos += vel * deltaTime; //printf("before %d %3.f %3.f %3.f\n", index, pos.x, pos.y, pos.z); //Collide with cube if(pos.x < -1.0f + params.particleRadius){ pos.x = -1.0f + params.particleRadius; vel.x *= params.boundaryDamping; } if(pos.x > 1.0f - params.particleRadius){ pos.x = 1.0f - params.particleRadius; vel.x *= params.boundaryDamping; } if(pos.y < -1.0f + params.particleRadius){ pos.y = -1.0f + params.particleRadius; vel.y *= params.boundaryDamping; } if(pos.y > 1.0f - params.particleRadius){ pos.y = 1.0f - params.particleRadius; vel.y *= params.boundaryDamping; } if(pos.z < -1.0f + params.particleRadius){ pos.z = -1.0f + params.particleRadius; vel.z *= params.boundaryDamping; } if(pos.z > 1.0f - params.particleRadius){ pos.z = 1.0f - params.particleRadius; vel.z *= params.boundaryDamping; } //Store new position and velocity d_Pos[index] = pos; d_Vel[index] = vel; //printf("after %d %3.f %3.f %3.f\n", index, pos.x, pos.y, pos.z); } //////////////////////////////////////////////////////////////////////////////// // Save particle grid cell hashes and indices //////////////////////////////////////////////////////////////////////////////// __device__ int4 getGridPos(const float4 p, const simParams_t &params) { int4 gridPos; gridPos.x = (int)floor((p.x - params.worldOrigin.x) / params.cellSize.x); gridPos.y = (int)floor((p.y - params.worldOrigin.y) / params.cellSize.y); gridPos.z = (int)floor((p.z - params.worldOrigin.z) / params.cellSize.z); gridPos.w = 0; return gridPos; } //Calculate address in grid from position (clamping to edges) __device__ unsigned int getGridHash(int4 gridPos, const simParams_t &params) { //Wrap addressing, assume power-of-two grid dimensions gridPos.x = gridPos.x & (params.gridSize.x - 1); gridPos.y = gridPos.y & (params.gridSize.y - 1); gridPos.z = gridPos.z & (params.gridSize.z - 1); return UMAD( UMAD(gridPos.z, params.gridSize.y, gridPos.y), params.gridSize.x, gridPos.x ); } //Calculate grid hash value for each particle __global__ void calcHashK( unsigned int* d_Hash, //output unsigned int* d_Index, //output const float4* d_Pos, //input: positions const simParams_t params, unsigned int numParticles) { const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if(index >= numParticles) return; float4 p = d_Pos[index]; //Get address in grid int4 gridPos = getGridPos(p, params); unsigned int gridHash = getGridHash(gridPos, params); //Store grid hash and particle index d_Hash[index] = gridHash; d_Index[index] = index; } //////////////////////////////////////////////////////////////////////////////// // Find cell bounds and reorder positions+velocities by sorted indices //////////////////////////////////////////////////////////////////////////////// __global__ void memSetK( unsigned int* d_Data, const unsigned int val, const unsigned int N) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < N) d_Data[i] = val; } __global__ void findCellBoundsAndReorderK( unsigned int* d_CellStart, //output: cell start index unsigned int* d_CellEnd, //output: cell end index float4* d_ReorderedPos, //output: reordered by cell hash positions float4* d_ReorderedVel, //output: reordered by cell hash velocities const unsigned int* d_Hash, //input: sorted grid hashes const unsigned int* d_Index, //input: particle indices sorted by hash const float4* d_Pos, //input: positions array sorted by hash const float4* d_Vel, //input: velocity array sorted by hash const unsigned int numParticles) { unsigned int hash; const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int lid = threadIdx.x; extern __shared__ unsigned int localHash[]; //Handle case when no. of particles not multiple of block size if(index < numParticles){ hash = d_Hash[index]; //Load hash data into local memory so that we can look //at neighboring particle's hash value without loading //two hash values per thread localHash[lid + 1] = hash; //First thread in block must load neighbor particle hash if(index > 0 && lid == 0) localHash[0] = d_Hash[index - 1]; } __syncthreads(); if(index < numParticles){ //Border case if(index == 0) d_CellStart[hash] = 0; //Main case else{ if(hash != localHash[lid]) d_CellEnd[localHash[lid]] = d_CellStart[hash] = index; }; //Another border case if(index == numParticles - 1) d_CellEnd[hash] = numParticles; //Now use the sorted index to reorder the pos and vel arrays unsigned int sortedIndex = d_Index[index]; float4 pos = d_Pos[sortedIndex]; float4 vel = d_Vel[sortedIndex]; d_ReorderedPos[index] = pos; d_ReorderedVel[index] = vel; } } //////////////////////////////////////////////////////////////////////////////// // Process collisions (calculate accelerations) //////////////////////////////////////////////////////////////////////////////// __device__ float4 collideSpheres( float4 posA, float4 posB, float4 velA, float4 velB, float radiusA, float radiusB, float spring, float damping, float shear, float attraction) { //Calculate relative position float4 relPos = {posB.x - posA.x, posB.y - posA.y, posB.z - posA.z, 0}; float dist = sqrt(relPos.x * relPos.x + relPos.y * relPos.y + relPos.z * relPos.z); float collideDist = radiusA + radiusB; float4 force = {0, 0, 0, 0}; if(dist < collideDist){ float4 norm = {relPos.x / dist, relPos.y / dist, relPos.z / dist, 0}; //Relative velocity float4 relVel = {velB.x - velA.x, velB.y - velA.y, velB.z - velA.z, 0}; //Relative tangential velocity float relVelDotNorm = relVel.x * norm.x + relVel.y * norm.y + relVel.z * norm.z; float4 tanVel = {relVel.x - relVelDotNorm * norm.x, relVel.y - relVelDotNorm * norm.y, relVel.z - relVelDotNorm * norm.z, 0}; //Spring force (potential) float springFactor = -spring * (collideDist - dist); force = { springFactor * norm.x + damping * relVel.x + shear * tanVel.x + attraction * relPos.x, springFactor * norm.y + damping * relVel.y + shear * tanVel.y + attraction * relPos.y, springFactor * norm.z + damping * relVel.z + shear * tanVel.z + attraction * relPos.z, 0 }; } return force; } __global__ void collideK( float4* d_Vel, //output: new velocity const float4* d_ReorderedPos, //input: reordered positions const float4* d_ReorderedVel, //input: reordered velocities const unsigned int* d_Index, //input: reordered particle indices const unsigned int* d_CellStart, //input: cell boundaries const unsigned int* d_CellEnd, const simParams_t params, const unsigned int numParticles) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if(index >= numParticles) return; float4 pos = d_ReorderedPos[index]; float4 vel = d_ReorderedVel[index]; float4 force = {0, 0, 0, 0}; //Get address in grid int4 gridPos = getGridPos(pos, params); //Accumulate surrounding cells for(int z = -1; z <= 1; z++) for(int y = -1; y <= 1; y++) for(int x = -1; x <= 1; x++){ //Get start particle index for this cell int4 t = {x, y, z, 0}; unsigned int hash = getGridHash(gridPos + t, params); unsigned int startI = d_CellStart[hash]; //Skip empty cell if(startI == 0xFFFFFFFFU) continue; //Iterate over particles in this cell unsigned int endI = d_CellEnd[hash]; for(unsigned int j = startI; j < endI; j++){ if(j == index) continue; float4 pos2 = d_ReorderedPos[j]; float4 vel2 = d_ReorderedVel[j]; //Collide two spheres force += collideSpheres( pos, pos2, vel, vel2, params.particleRadius, params.particleRadius, params.spring, params.damping, params.shear, params.attraction ); } } //Collide with cursor sphere force += collideSpheres( pos, {params.colliderPos.x, params.colliderPos.y, params.colliderPos.z, 0}, vel, {0, 0, 0, 0}, params.particleRadius, params.colliderRadius, params.spring, params.damping, params.shear, params.attraction ); //Write new velocity back to original unsorted location d_Vel[d_Index[index]] = vel + force; }
the_stack
#include <amgx_types/util.h> #include <amgx_types/io.h> #include <amgx_types/math.h> namespace amgx { template <class T_Config> void MatrixAnalysis<T_Config>::valueDistribution(double *minAbs, double *maxAbs) { const int bx = A->get_block_dimx(); const int by = A->get_block_dimy(); const int nnz = A->get_num_nz(); const int bs = bx * by; const int bs_alloc = bs + 1; const double magicDb = 1.23456789e300; std::vector<double> mind(bs_alloc, magicDb), maxd(bs_alloc, 0); std::vector<int> minc(bs_alloc, 0), maxc(bs_alloc, 0), nullc(bs_alloc, 0); //constPODVector<mat_value_type, index_type> Av= A->values.const_pod(); if (TConfig::memSpace == AMGX_device) { FatalError("Device version not implemented", AMGX_ERR_NOT_IMPLEMENTED); } else if (TConfig::memSpace == AMGX_host) { fprintf(fout, "Value distribution for matrix: A %8dx%8d, nnz %8d, block %2dx%2d\n", A->get_num_rows(), A->get_num_cols(), nnz, by, bx); for (int b = 0; b < nnz; b++) { for (int c = 0; c < bs; c++) { ValueTypeA temp = A->values[b * bs + c]; PODTypeA atemp = types::util<ValueTypeA>::abs(temp); if (atemp == 0.0) { nullc[c]++; } else { if (atemp < mind[c]) { mind[c] = atemp; } if (atemp > maxd[c]) { maxd[c] = atemp; } if (atemp < minAbs[c]) { minc[c]++; /*fprintf(fout, "b %8d, c %2d: %10.3E\n", b, c, temp);*/ } if (atemp > maxAbs[c]) { maxc[c]++; /*fprintf(fout, "b %8d, c %2d: %10.3E\n", b, c, temp);*/ } } } } for (int c = 0; c < bs; c++) { fprintf(fout, "c %2d: min %10.3E (#%8d < %10.3E), max %10.3E (#%8d > %10.3E), (#%8d == 0)\n", c, mind[c], minc[c], minAbs[c], maxd[c], maxc[c], maxAbs[c], nullc[c]); if (mind[c] < mind[bs]) { mind[bs] = mind[c]; } if (maxd[c] > maxd[bs]) { maxd[bs] = maxd[c]; } minc[bs] += minc[c]; maxc[bs] += maxc[c]; nullc[bs] += nullc[c]; } fprintf(fout, "all : min %10.3E (#%8d < %10s), max %10.3E (#%8d > %10s), (#%8d == 0)\n", mind[bs], minc[bs], "threshold", maxd[bs], maxc[bs], "threshold", nullc[bs]); fprintf(fout, "\n"); fprintf(fout, "Ranges for block coefficients:\n"); for (int y = 0; y < by; y++) { for (int x = 0; x < bx; x++) { int c = y * bx + x; if (mind[c] == magicDb && maxd[c] == 0.0) { fprintf(fout, "[%22.0f] ", 0.0); } else { fprintf(fout, "[%10.3E, %10.3E] ", mind[c], maxd[c]); } } fprintf(fout, "\n"); } fprintf(fout, "\n"); } } template <class T_Config> void MatrixAnalysis<T_Config>::checkSymmetry(bool &structuralSymmetric, bool &symmetric, bool &verbose) { // initially assume symmetric structuralSymmetric = true; symmetric = true; if (TConfig::memSpace == AMGX_device) { FatalError("Device version not implemented", AMGX_ERR_NOT_IMPLEMENTED); } else if (TConfig::memSpace == AMGX_host) { // choose epsilon double eps; if (types::PODTypes<ValueTypeA>::vec_prec == AMGX_vecDouble) { eps = 1e-12; } else if (types::PODTypes<ValueTypeA>::vec_prec == AMGX_vecFloat) { eps = 1e-7; } else { eps = 1e-12; } const int bx = A->get_block_dimx(); const int by = A->get_block_dimy(); const int nnz = A->get_num_nz(); if (verbose) { fprintf(fout, "Checking Symmetry of Matrix: A %8dx%8d, nnz %8d, block %2dx%2d\n", A->get_num_rows(), A->get_num_cols(), nnz, bx, by); } for (int i = 0; i < A->get_num_rows(); i++) { for (int jj = A->row_offsets[i]; jj < A->row_offsets[i + 1]; jj++) { // check structure exists int j = A->col_indices[jj]; // ignore diagonal if (i == j) { continue; } // loop over row j, search for column i bool found_on_row = false; for (int kk = A->row_offsets[j]; kk < A->row_offsets[j + 1]; kk++) { int k = A->col_indices[kk]; if (k == i) { found_on_row = true; // check values // check all elements const int blocksize = bx * by; for (int m = 0; m < bx * by; m++) { if (types::util<ValueTypeA>::abs(A->values[jj * blocksize + m] - A->values[kk * blocksize + m]) > eps) { symmetric = false; } } break; } } // if we didn't find the element, non-symmetric if (!found_on_row) { structuralSymmetric = false; symmetric = false; } } // if non structurally symmetric, cannot be symmetric if (!structuralSymmetric) { if (verbose) { fprintf(fout, "A: non-symmetric, non-structurally symmetric\n"); } return; } } } // end host path // structurally symmetric, at this point - print symmetric or not if (symmetric) { if (verbose) { fprintf(fout, "A: non-symmetric, structurally symmetric\n"); } } else { if (verbose) { fprintf(fout, "A: symmetric, structurally symmetric\n"); } } return; } template <class T_Config> void MatrixAnalysis<T_Config>::checkDiagDominate() { const int num_rows = A->get_num_rows(); const int bx = A->get_block_dimx(); const int by = A->get_block_dimy(); const int nnz = A->get_num_nz(); const int bs = bx * by; //const int bs_alloc= bs+1; int k = 0; //typename T_Config::MatPrec *sum = new typename T_Config::MatPrec[bx]; std::vector<PODTypeA> sum(bx); //std::vector<int> rowp(nnz, 0); double eps; if (types::PODTypes<ValueTypeA>::vec_prec == AMGX_vecDouble) { eps = 1e-12; } else if (types::PODTypes<ValueTypeA>::vec_prec == AMGX_vecFloat) { eps = 1e-7; } else { eps = 1e-12; } if (TConfig::memSpace == AMGX_device) { FatalError("Device version not implemented", AMGX_ERR_NOT_IMPLEMENTED); } else if (TConfig::memSpace == AMGX_host) { fprintf(fout, "Check whether the sparse matrix is diagonal dominate on every row for matrix: A %8dx%8d, nnz %8d, block %2dx%2d\n", A->get_num_rows(), A->get_num_cols(), nnz, bx, by); //std::cout<<"Check whether the sparse matrix is diagonal on every row for matrix: A: "<<num_rows<<"x"<<num_rows<<", nnz: "<<nnz<<", block: "<<bx<<"x"<<by<<std::endl; //for (int i=0;i < num_rows;i++) // for (int j=A->row_offsets[i];j<A->row_offsets[i+1];j++) // for (int m=0; m<by; m++)rowp[bs*j+m] = i; for (int i = 0; i < num_rows; i++) { for (int m = 0; m < bx; m++) { sum[m] = 0.; } for (int j = A->row_offsets[i]; j < A->row_offsets[i + 1]; j++) { for (int m = 0; m < bx; m++) for (int n = 0; n < by; n++) if ((A->col_indices[j] == i) && (m == n)) {sum[m] += types::util<ValueTypeA>::abs(A->values[bs * j + m * by + n]);} else {sum[m] -= types::util<ValueTypeA>::abs(A->values[bs * j + m * by + n]);} } for (int m = 0; m < bx; m++) if (sum[m] < -eps) { ++k; } } fprintf(fout, "Percentage of the diagonal-dominant rows is %8f %% \n", 100.0 * (num_rows * bx - k) / num_rows); std::cout << "Percentage of the diagonal-dominant rows is " << 100.0 * (num_rows * bx - k) / num_rows << "%" << std::endl; } } template <class AMatrix, bool complex> struct Z_matrix_check; template <class AMatrix> struct Z_matrix_check<AMatrix, true> { static bool check(const AMatrix *A, FILE *fout) { FatalError("Z matrix is not defined for complex field", AMGX_ERR_NOT_IMPLEMENTED);} }; template <class AMatrix> struct Z_matrix_check<AMatrix, false> { static bool check(const AMatrix *A, FILE *fout) { bool isZmatrix = true; const int num_rows = A->get_num_rows(); const int bx = A->get_block_dimx(); const int by = A->get_block_dimy(); const int nnz = A->get_num_nz(); const int bs = bx * by; int *positive_diag = new int[bx]; int *zero_diag = new int[bx]; int *pos_off_diag = new int[bx]; int pos_sum = 0; for (int m = 0; m < bx; m++) { positive_diag[m] = 0; zero_diag[m] = 0; pos_off_diag[m] = 0; } const typename AMatrix::index_type *A_row_offsets_ptr = A->row_offsets.raw(); const typename AMatrix::index_type *A_column_indices_ptr = A->col_indices.raw(); const typename AMatrix::value_type *A_values_ptr = A->values.raw(); const typename AMatrix::index_type *A_dia_ptr = A->diag.raw(); double eps; if (types::PODTypes<typename AMatrix::value_type>::vec_prec == AMGX_vecDouble) { eps = 1e-12; } else if (types::PODTypes<typename AMatrix::value_type>::vec_prec == AMGX_vecFloat) { eps = 1e-7; } else { eps = 1e-12; } fprintf(fout, "Check whether the sparse matrix is Z-matrix for matrix: A %8dx%8d, nnz %8d, block %2dx%2d\n", A->get_num_rows(), A->get_num_cols(), nnz, bx, by); //std::cout<<"Check whether off-diagonal pelement of the sparse matrix is negtive for matrix: A: "<<num_rows<<"x"<<num_rows<<", nnz: "<<nnz<<", block: "<<bx<<"x"<<by<<std::endl; for (int i = 0; i < num_rows; i++) { for (int j = A->row_offsets[i]; j < A->row_offsets[i + 1]; j++) { int k = A->col_indices[j]; if (j == i) { for (int m = 0; m < bx; m++) { if (A->values[j * bs + m * by + m] > eps) {positive_diag[m] ++;} else if (A->values[j * bs + m * by + m] > -eps) {zero_diag[m]++;} for (int n = 0; n < by; n ++) if ((m != n) && (A->values[j * bs + m * by + n] > eps)) { pos_off_diag[m] ++; } } } else { for (int m = 0; m < bx; m++) { for (int n = 0; n < by; n ++) if (A->values[j * bs + m * by + n] > eps) { pos_off_diag[m] ++; } } } } } std::cout << std::endl << "Percentage of the positive diagonal element is "; for (int m = 0; m < bx; m++) { std::cout << "Block " << m << " is " << 100 * positive_diag[m] / num_rows << "% " << "\t"; } std::cout << std::endl << "Number of the zero diagonal element is "; for (int m = 0; m < bx; m++) { std::cout << "Block " << m << " is " << 100 * zero_diag[m] << "\t"; } for (int m = 0; m < bx; m++) { pos_sum += pos_off_diag[m]; } std::cout << std::endl << "Percentage of the positive off-diagonal diagonal element is " << 100.0 * pos_sum / (nnz - num_rows * bx) << "%" << std::endl; for (int m = 0; m < bx; m++) { if (positive_diag[m] < num_rows) { isZmatrix = false; } } if (pos_sum < (nnz - num_rows * bx)) { isZmatrix = false; } delete[] positive_diag; delete[] zero_diag; delete[] pos_off_diag; return isZmatrix; } }; template <class T_Config> bool MatrixAnalysis<T_Config>::check_Z_matrix() { if (TConfig::memSpace == AMGX_device) { FatalError("Device version not implemented", AMGX_ERR_NOT_IMPLEMENTED); } else { if (A == NULL) { FatalError("MatrixAnalisys: A is not initialized", AMGX_ERR_BAD_PARAMETERS); } else { return Z_matrix_check<Matrix<TConfig>, types::util<ValueTypeA>::is_complex>::check(this->A, fout); } } } template <class T_Config> void MatrixAnalysis<T_Config>::draw_matrix_connection() { //int idx_i, idx_j; const int N = A->get_num_rows(); const int bx = A->get_block_dimx(); const int by = A->get_block_dimy(); const int bs = bx * by; double eps; int dim = 3; if (types::PODTypes<ValueTypeA>::vec_prec == AMGX_vecDouble) { eps = 1e-12; } else if (types::PODTypes<ValueTypeA>::vec_prec == AMGX_vecFloat) { eps = 1e-7; } else { eps = 1e-12; } //if ((idx_i > bx)||(idx_j > by)) FatalError("idx_i/idx_j is larger than the block size.", AMGX_ERR_BAD_PARAMETERS); if (geo_x == NULL) { FatalError("NO geometry input Should call load_geometry(Vector<TConfig>* geox, Vector<TConfig>* geoy, Vector<TConfig>* geoz ).", AMGX_ERR_BAD_PARAMETERS); } if (geo_z == NULL) { dim = 2; } int num_line = 0; std::vector< std::vector<int> > bd_idx(bx); for (int i = 0; i < bx; i++) { bd_idx[i].resize(N); } bool *is_diag = new bool[bx]; for (int i = 0; i < N; i++) { for (int m = 0; m < bx; m++) { is_diag[m] = true; } for (int j = A->row_offsets[i]; j < A->row_offsets[i + 1]; j++) { int k = A->col_indices[j]; if (i == k) { for (int m = 0; m < bx; m++) { if ((types::util<ValueTypeA>::abs(A->values[j * bs + m * by + m]) > 1 + eps) || (types::util<ValueTypeA>::abs(A->values[j * bs + m * by + m]) < 1 - eps)) { is_diag[m] = false; } for (int n = 0; n < by; n++) if ((m != n) && (types::util<ValueTypeA>::abs(A->values[j * bs + m * by + n]) > eps)) { is_diag[m] = false; } } } else { for (int m = 0; m < bx; m++) for (int n = 0; n < by; n++) if ((types::util<ValueTypeA>::abs(A->values[j * bs + m * by + n]) > eps) ) { is_diag[m] = false; } } } for (int m = 0; m < bx; m++) if (is_diag[m]) { bd_idx[m][i] = 1;} else {bd_idx[m][i] = 0;} } std::cout << "number of point: " << (int) geo_x->size() << " row: " << b->size() << "size of diag: " << A->diag.size() << std::endl; for (int i = 0; i < N; i++) { for (int j = A->row_offsets[i]; j < A->row_offsets[i + 1]; j++) { int k = A->col_indices[j]; if (i == k) { for (int m = 0; m < bx; m++) if (bd_idx[m][k] == 1) { for (int n = 0; n < bx; n++) { if (m != n) { (*b)[k * bx + n] = (*b)[k * bx + n] - A->values[j * bs + n * by + m] * (*b)[i * bx + m]; //A->values[j*bs+n*by+m] = 0.0; } } } } else { for (int m = 0; m < bx; m++) if (bd_idx[m][k] == 1) { for (int n = 0; n < bx; n++) { (*b)[k * bx + n] = (*b)[k * bx + n] - A->values[j * bs + n * by + m] * (*b)[i * bx + m]; //A->values[j*bs+n*by+m] = 0.0; } } } } } for (int i = 0; i < A->get_num_rows(); i++) { for (int j = A->row_offsets[i]; j < A->row_offsets[i + 1]; j++) { int k = A->col_indices[j]; if ((bd_idx[0][k] == 0) && (types::util<ValueTypeA>::abs(A->values[j * bs]) > eps)) { num_line++; } } } //Part 1: Header fprintf(fout, "# vtk DataFile Version 3.0\n"); //Part 2: Title fprintf(fout, "show matrix connection by edges\n"); //Part 3: Data Type ASCII/BINARY fprintf(fout, "ASCII\n\n"); //Part 4: Geometry/Topology: STRUCTURED_POINTS/STRUCTURED_GRID/UNSTRUCURED_GRID/POLYDATA/RECTILINEAR_GRID/FIELD fprintf(fout, "DATASET POLYDATA\n"); fprintf(fout, "POINTS %8d double\n", (int) geo_x->size()); for (int i = 0; i < geo_x->size(); i++) { fprintf(fout, "%8g %8g ", (double)(*geo_x)[i], (double)(*geo_y)[i]); if (dim == 3) {fprintf(fout, " %8g \n", (double)(*geo_z)[i]);} else {fprintf(fout, " %8f \n", 0.0);} } std::cout << "number of points: " << (int) geo_x->size() << " row: " << A->get_num_rows() << std::endl; fprintf(fout, "LINES %8d %8d\n", num_line, 3 * num_line); for (int i = 0; i < A->get_num_rows(); i++) { for (int j = A->row_offsets[i]; j < A->row_offsets[i + 1]; j++) { int k = A->col_indices[j]; //if ((A->values[j*bs+bs-1] > eps) || (A->values[j*bs+bs-1] < -eps)) fprintf(fout,"%8d %8d %8d\n", 2, i, k); if ((bd_idx[0][k] == 0) && (types::util<ValueTypeA>::abs(A->values[j * bs]) > eps)) { fprintf(fout, "%8d %8d %8d\n", 2, i, k); } } } //Part 5: Dataset attributes.The number of data items n of each type must match the number of points or cells in the dataset. (If type is FIELD, point and cell data should be omitted.) fprintf(fout, "\n POINT_DATA %8d\n", N); for (int m = 0; m < bx; m++) { fprintf(fout, "SCALARS boundary_%d double\n LOOKUP_TABLE default\n", m); for (int i = 0; i < N; i++) { fprintf(fout, " %d \n", bd_idx[m][i]); } } delete[] is_diag; } template<class T_Config> float MatrixAnalysis<T_Config>::aggregatesQuality(typename Matrix<T_Config>::IVector &aggregates, DevVectorFloat &edge_weights) { typedef TemplateConfig<AMGX_host, T_Config::vecPrec, T_Config::matPrec, T_Config::indPrec> TConfig_h; Matrix<TConfig_h> Ah = *A; typename Matrix<TConfig_h>::IVector aggregates_h; aggregates_h.copy(aggregates); float score = 0.; for ( int i = 0; i < Ah.get_num_rows(); i++ ) { for ( int j = Ah.row_offsets[i]; j < Ah.row_offsets[i + 1]; j++ ) { int k = Ah.col_indices[j]; if ( k != i && aggregates_h[k] == aggregates_h[i]) { score += edge_weights[j]; } } } return score; } template<class T_Config> void MatrixAnalysis<T_Config>::aggregatesQuality2(const typename Matrix<T_Config>::IVector &aggregates, int num_aggregates, const Matrix<T_Config> &Aorig) { typedef TemplateConfig<AMGX_host, T_Config::vecPrec, T_Config::matPrec, T_Config::indPrec> TConfig_h; typename Matrix<TConfig_h>::IVector aggs = aggregates; int lvl = A->template getParameter <int> ("level"); Matrix<TConfig_h> Ah = *A; Matrix<TConfig_h> Ahorig = Aorig; std::vector<int> agg_cnt(aggs.size(), 0); // counters int max_nnz_per_row = -1, singletons_number = 0, empty_rows = 0, max_edges_in_aggregate = 0; double avg_nnz_per_row, max_nnz_variance; ValueTypeA avg_nnz_variance = types::util<ValueTypeA>::get_zero(), avg_nnz_sum = types::util<ValueTypeA>::get_zero(); unsigned long long int nnz_num2; avg_nnz_per_row = (double)( Ah.get_num_nz() - (Ah.hasProps(DIAG) ? 0 : Ah.get_num_rows()) ) / Ah.get_num_rows(); //avg_nnz_per_row = (double)( Ah.values.size()/Ah.get_block_size() - Ah.get_num_rows() ) / Ah.get_num_rows(); for (int row = 0; row < Ah.get_num_rows(); row++) { int start_co = Ah.row_offsets[row]; int end_co = Ah.row_offsets[row + 1]; if (start_co == end_co) { empty_rows++; } max_nnz_per_row = max(end_co - start_co - (Ah.hasProps(DIAG) ? 0 : 1), max_nnz_per_row); ValueTypeA avg_nnz = types::util<ValueTypeA>::get_zero(), avg_nnz2 = types::util<ValueTypeA>::get_zero(), nnz_var = types::util<ValueTypeA>::get_zero(); int nnz_cnt = 0; for (int co = start_co; co < end_co; co++) { int col = Ah.col_indices[co]; if (col != row) { avg_nnz = avg_nnz + Ah.values[ co * Ah.get_block_size() ]; avg_nnz2 = avg_nnz2 + (Ah.values[ co * Ah.get_block_size() ]) * (Ah.values[ co * Ah.get_block_size() ]); nnz_cnt ++; } } avg_nnz = nnz_cnt > 0 ? (avg_nnz / (double)nnz_cnt) : types::util<ValueTypeA>::get_zero(); avg_nnz2 = nnz_cnt > 0 ? (avg_nnz2 / (double)nnz_cnt) : types::util<ValueTypeA>::get_zero(); avg_nnz_sum = avg_nnz_sum + avg_nnz; // avg_nnz normalized by the num of nnz nnz_var = avg_nnz2 - avg_nnz * avg_nnz; avg_nnz_variance = avg_nnz_variance + nnz_var; max_nnz_variance = max_nnz_variance < types::util<ValueTypeA>::abs(nnz_var) ? types::util<ValueTypeA>::abs(nnz_var) : max_nnz_variance; nnz_num2 += nnz_cnt * nnz_cnt; } avg_nnz_sum = avg_nnz_sum / (double)Ah.get_num_rows(); avg_nnz_variance = avg_nnz_variance / (double)Ah.get_num_rows(); double nnz_per_row_var = (double)nnz_num2 / Ah.get_num_rows() - avg_nnz_per_row * avg_nnz_per_row; for (unsigned int i = 0; i < aggs.size(); i++) { agg_cnt[aggs[i]]++; } singletons_number = (int)(std::count(agg_cnt.begin(), agg_cnt.end(), 1)); max_edges_in_aggregate = *(std::max_element(agg_cnt.begin(), agg_cnt.end())); std::stringstream ss; agg_cnt.resize(aggs.size()); std::fill(agg_cnt.begin(), agg_cnt.end(), 0); std::vector< std::vector<int> > agg_list(aggs.size()); for (unsigned int agg = 0; agg < aggs.size(); agg++) { agg_list[aggs[agg]].push_back(agg); } for (unsigned int agg = 0; agg < aggs.size(); agg++) if (agg_list[agg].size() > 0) { std::vector<int> clusters(agg_list[agg].size()); thrust::sequence(clusters.begin(), clusters.end()); for (int iter = 0; iter < clusters.size(); iter++) { //if (Ah.get_num_rows() < 20) printf("Processing aggregate ") for (int edge_id = (int)(agg_list[agg].size()) - 1; edge_id >= 0 ; edge_id--) { int edge = agg_list[agg][edge_id]; int max_id = clusters[edge_id]; for (int ro = Ahorig.row_offsets[edge]; ro < Ahorig.row_offsets[edge + 1]; ro++) { std::vector<int>::iterator finding = std::find(agg_list[agg].begin(), agg_list[agg].end(), Ahorig.col_indices[ro]); if (finding != agg_list[agg].end()) { max_id = max(max_id, clusters[finding - agg_list[agg].begin()]); } } clusters[edge_id] = max_id; } } std::set<int> clusters_counter (clusters.begin(), clusters.end()); agg_cnt[agg] = clusters_counter.size(); } cudaCheckError(); int max_uncon = *(std::max_element(agg_cnt.begin(), agg_cnt.end())) - 1; int num_uncon = num_aggregates - (int)(std::count(agg_cnt.begin(), agg_cnt.end(), 1)); if (lvl == 1) { ss << "\nMatrix aggregation and galerkin product information:\n"; ss << std::setw(4) << "LVL" << std::setw(10) << "from rows" << std::setw(10) << "to rows" << std::setw(12) << "empty rows" << std::setw(20) << "max nodes in aggr" << std::setw(15) << "singletons#" \ << std::setw(18) << "max nnz# per row" << std::setw(18) << "avg nnz# per row" << std::setw(18) << "nnz# per row var" << std::setw(24) << "agg# with uncon nodes" << std::setw(18) << "max# uncon nodes" << std::endl; ss << " ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n"; } ss << std::setw(4) << lvl << std::setw(10) << Ahorig.get_num_rows() << std::setw(10) << Ah.get_num_rows() << std::setw(12) << empty_rows << std::setw(20) << max_edges_in_aggregate << std::setw(15) << singletons_number \ << std::setw(18) << max_nnz_per_row << std::setw(18) << avg_nnz_per_row << std::setw(18) << nnz_per_row_var << std::setw(24) << num_uncon << std::setw(18) << max_uncon << std::endl; amgx_printf("%s", ss.str().c_str()); /*printf("Matrix with %d number of rows, level %d\nNumber of empty rows is %d.\nMaximum edges in the aggregate is %d.\nNumber of singletons is %d.\n", Ah.get_num_rows(), empty_rows, max_edges_in_aggregate, singletons_number); printf("Max nnz per row: %d\nAverage nnz per row: %f\nMax nnz variance: %f\nAverage nnz variance: %f\nAverage nnz sum: %f\n", max_nnz_per_row, avg_nnz_per_row, max_nnz_variance, avg_nnz_variance, avg_nnz_sum); */ fflush(stdout); return; } template<class T_Config> void MatrixAnalysis<T_Config>::visualizeAggregates(typename Matrix<TConfig>::IVector &aggregates) { typedef TemplateConfig<AMGX_host, TConfig::vecPrec, TConfig::matPrec, TConfig::indPrec> TConfig_h; typedef Vector< TConfig_h > Vector_h; Matrix<TConfig_h> Ah = *A; typename Matrix<TConfig_h>::IVector aggregates_h; aggregates_h.copy(aggregates); int dim = 0; if ( Ah.hasParameter("geo.x") && Ah.hasParameter("geo.y")) { if (Ah.hasParameter("geo.z")) { dim = 3; } else { dim = 2; } } else { std::cout << "Cannot visualize data, no geometry information attached" << std::endl; return; } PODVecHost *geo_x = Ah.template getParameterPtr< PODVecHost >("geo.x"); PODVecHost *geo_y = Ah.template getParameterPtr< PODVecHost >("geo.y"); PODVecHost *geo_z = Ah.template getParameterPtr< PODVecHost >("geo.z"); // Count the number of lines to create int num_line = 0; for (int i = 0; i < Ah.get_num_rows(); i++) { for (int j = Ah.row_offsets[i]; j < Ah.row_offsets[i + 1]; j++) { int k = Ah.col_indices[j]; if (k != i && aggregates_h[k] == aggregates_h[i] ) { num_line++; } } } std::stringstream file_name; file_name << "aggregates_" << Ah.get_num_rows() << ".vtk"; FILE *fout; fout = fopen(file_name.str().c_str(), "w"); //Part 1: Header fprintf(fout, "# vtk DataFile Version 3.0\n"); //Part 2: Title fprintf(fout, "show matrix connection by edges\n"); //Part 3: Data Type ASCII/BINARY fprintf(fout, "ASCII\n\n"); //Part 4: Geometry/Topology: STRUCTURED_POINTS/STRUCTURED_GRID/UNSTRUCURED_GRID/POLYDATA/RECTILINEAR_GRID/FIELD fprintf(fout, "DATASET POLYDATA\n"); fprintf(fout, "POINTS %8d double\n", (int) geo_x->size()); for (int i = 0; i < geo_x->size(); i++) { fprintf(fout, "%8g %8g ", (double)(*geo_x)[i], (double)(*geo_y)[i]); if (dim == 3) {fprintf(fout, " %8g \n", (double)(*geo_z)[i]);} else {fprintf(fout, " %8f \n", 0.0);} } std::cout << "number of points: " << (int) geo_x->size() << " row: " << A->get_num_rows() << std::endl; fprintf(fout, "LINES %8d %8d\n", num_line, 3 * num_line); for (int i = 0; i < Ah.get_num_rows(); i++) { for (int j = Ah.row_offsets[i]; j < Ah.row_offsets[i + 1]; j++) { int k = Ah.col_indices[j]; if (k != i && aggregates_h[k] == aggregates_h[i]) { fprintf(fout, "%8d %8d %8d\n", 2, i, k); } } } std::cout << "done writing file" << std::endl; fclose(fout); std::cout << "done closing file" << std::endl; } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class MatrixAnalysis<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE }//end namespace amgx
the_stack
* Kernel utilities for loading tiles of data through global memory * with cache modifiers, marking discontinuities between consecutive elements ******************************************************************************/ #pragma once #include <b40c/util/operators.cuh> #include <b40c/util/vector_types.cuh> #include <b40c/util/io/modified_load.cuh> namespace b40c { namespace util { namespace io { /** * Load a tile of items and initialize discontinuity flags */ template < int LOG_LOADS_PER_TILE, // Number of vector loads (log) int LOG_LOAD_VEC_SIZE, // Number of items per vector load (log) int ACTIVE_THREADS, // Active threads that will be loading ld::CacheModifier CACHE_MODIFIER, // Cache modifier (e.g., CA/CG/CS/NONE/etc.) bool CHECK_ALIGNMENT, // Whether or not to check alignment to see if vector loads can be used bool CONSECUTIVE_SMEM_ASSIST, // Whether nor not to use supplied smem to assist in discontinuity detection bool FIRST_TILE, // Whether or not this is the first tile loaded by the CTA bool FLAG_FIRST_OOB> // Whether or not the first element that is out-of-bounds should also be flagged struct LoadTileDiscontinuity { enum { LOADS_PER_TILE = 1 << LOG_LOADS_PER_TILE, LOAD_VEC_SIZE = 1 << LOG_LOAD_VEC_SIZE, LOG_ELEMENTS_PER_THREAD = LOG_LOADS_PER_TILE + LOG_LOAD_VEC_SIZE, ELEMENTS_PER_THREAD = 1 << LOG_ELEMENTS_PER_THREAD, TILE_SIZE = ACTIVE_THREADS * ELEMENTS_PER_THREAD, }; //--------------------------------------------------------------------- // Iteration Structures //--------------------------------------------------------------------- template <int LOAD, int VEC, int dummy = 0> struct Iterate; /** * First vec element of a vector-load */ template <int LOAD, int dummy> struct Iterate<LOAD, 0, dummy> { // Vector with discontinuity flags (unguarded) template < typename T, typename Flag, typename VectorType, typename EqualityOp> static __device__ __forceinline__ void VectorLoadValid( T smem[ACTIVE_THREADS + 1], T data[][LOAD_VEC_SIZE], Flag flags[][LOAD_VEC_SIZE], VectorType vectors[], VectorType *d_in_vectors, EqualityOp equality_op) { // Load the vector ModifiedLoad<CACHE_MODIFIER>::Ld(vectors[LOAD], d_in_vectors); if (CONSECUTIVE_SMEM_ASSIST) { // Place last vec element into shared buffer smem[threadIdx.x + 1] = data[LOAD][LOAD_VEC_SIZE - 1]; __syncthreads(); // Process first vec element if (FIRST_TILE && (LOAD == 0) && (threadIdx.x == 0)) { // First thread's first load of first tile if (blockIdx.x == 0) { // First CTA: start a new discontinuity flags[LOAD][0] = 1; } else { // Get the previous vector element from global T *d_ptr = (T*) d_in_vectors; T previous; ModifiedLoad<CACHE_MODIFIER>::Ld(previous, d_ptr - 1); flags[LOAD][0] = !equality_op(previous, data[LOAD][0]); } } else { T previous = smem[threadIdx.x]; flags[LOAD][0] = !equality_op(previous, data[LOAD][0]); } __syncthreads(); // Save last vector item for first of next load if (threadIdx.x == ACTIVE_THREADS - 1) { smem[0] = data[LOAD][LOAD_VEC_SIZE - 1]; } } else { // Process first vec element if (FIRST_TILE && (LOAD == 0) && (blockIdx.x == 0) && (threadIdx.x == 0)) { // First thread's first load of first tile of first CTA: start a new discontinuity flags[LOAD][0] = 1; } else { // Get the previous vector element from global T *d_ptr = (T*) d_in_vectors; T previous; ModifiedLoad<CACHE_MODIFIER>::Ld(previous, d_ptr - 1); flags[LOAD][0] = !equality_op(previous, data[LOAD][0]); } } Iterate<LOAD, 1>::VectorLoadValid( smem, data, flags, vectors, d_in_vectors, equality_op); } // With discontinuity flags (unguarded) template < typename T, typename Flag, typename EqualityOp> static __device__ __forceinline__ void LoadValid( T data[][LOAD_VEC_SIZE], Flag flags[][LOAD_VEC_SIZE], T *d_in, EqualityOp equality_op) { int thread_offset = (threadIdx.x << LOG_LOAD_VEC_SIZE) + (LOAD * ACTIVE_THREADS * LOAD_VEC_SIZE) + 0; ModifiedLoad<CACHE_MODIFIER>::Ld(data[LOAD][0], d_in + thread_offset); if (FIRST_TILE && (LOAD == 0) && (blockIdx.x == 0) && (threadIdx.x == 0)) { // First load of first tile of first CTA: discontinuity flags[LOAD][0] = 1; } else { // Get the previous vector element (which is in range b/c this one is in range) T previous; ModifiedLoad<CACHE_MODIFIER>::Ld(previous, d_in + thread_offset - 1); flags[LOAD][0] = !equality_op(previous, data[LOAD][0]); } Iterate<LOAD, 1>::LoadValid( data, flags, d_in, equality_op); } // With discontinuity flags (guarded) template < typename T, typename Flag, typename SizeT, typename EqualityOp> static __device__ __forceinline__ void LoadValid( T data[][LOAD_VEC_SIZE], Flag flags[][LOAD_VEC_SIZE], T *d_in, const SizeT &guarded_elements, EqualityOp equality_op) { SizeT thread_offset = (threadIdx.x << LOG_LOAD_VEC_SIZE) + (LOAD * ACTIVE_THREADS * LOAD_VEC_SIZE) + 0; if (thread_offset < guarded_elements) { ModifiedLoad<CACHE_MODIFIER>::Ld(data[LOAD][0], d_in + thread_offset); if (FIRST_TILE && (LOAD == 0) && (blockIdx.x == 0) && (threadIdx.x == 0)) { // First load of first tile of first CTA: discontinuity flags[LOAD][0] = 1; } else { // Get the previous vector element (which is in range b/c this one is in range) T previous; ModifiedLoad<CACHE_MODIFIER>::Ld(previous, d_in + thread_offset - 1); flags[LOAD][0] = !equality_op(previous, data[LOAD][0]); } } else { flags[LOAD][0] = ((FLAG_FIRST_OOB) && (thread_offset == guarded_elements)); } Iterate<LOAD, 1>::LoadValid( data, flags, d_in, guarded_elements, equality_op); } }; /** * Next vec element of a vector-load */ template <int LOAD, int VEC, int dummy> struct Iterate { // Vector with discontinuity flags template < typename T, typename Flag, typename VectorType, typename EqualityOp> static __device__ __forceinline__ void VectorLoadValid( T smem[ACTIVE_THREADS + 1], T data[][LOAD_VEC_SIZE], Flag flags[][LOAD_VEC_SIZE], VectorType vectors[], VectorType *d_in_vectors, EqualityOp equality_op) { T current = data[LOAD][VEC]; T previous = data[LOAD][VEC - 1]; flags[LOAD][VEC] = !equality_op(previous, current); Iterate<LOAD, VEC + 1>::VectorLoadValid( smem, data, flags, vectors, d_in_vectors, equality_op); } // With discontinuity flags (unguarded) template < typename T, typename Flag, typename EqualityOp> static __device__ __forceinline__ void LoadValid( T data[][LOAD_VEC_SIZE], Flag flags[][LOAD_VEC_SIZE], T *d_in, EqualityOp equality_op) { int thread_offset = (threadIdx.x << LOG_LOAD_VEC_SIZE) + (LOAD * ACTIVE_THREADS * LOAD_VEC_SIZE) + VEC; ModifiedLoad<CACHE_MODIFIER>::Ld(data[LOAD][VEC], d_in + thread_offset); T previous = data[LOAD][VEC - 1]; T current = data[LOAD][VEC]; flags[LOAD][VEC] = !equality_op(previous, current); Iterate<LOAD, VEC + 1>::LoadValid( data, flags, d_in, equality_op); } // With discontinuity flags (guarded) template < typename T, typename Flag, typename SizeT, typename EqualityOp> static __device__ __forceinline__ void LoadValid( T data[][LOAD_VEC_SIZE], Flag flags[][LOAD_VEC_SIZE], T *d_in, const SizeT &guarded_elements, EqualityOp equality_op) { SizeT thread_offset = (threadIdx.x << LOG_LOAD_VEC_SIZE) + (LOAD * ACTIVE_THREADS * LOAD_VEC_SIZE) + VEC; if (thread_offset < guarded_elements) { ModifiedLoad<CACHE_MODIFIER>::Ld(data[LOAD][VEC], d_in + thread_offset); T previous = data[LOAD][VEC - 1]; T current = data[LOAD][VEC]; flags[LOAD][VEC] = !equality_op(previous, current); } else { flags[LOAD][VEC] = ((FLAG_FIRST_OOB) && (thread_offset == guarded_elements)); } Iterate<LOAD, VEC + 1>::LoadValid( data, flags, d_in, guarded_elements, equality_op); } }; /** * Next load */ template <int LOAD, int dummy> struct Iterate<LOAD, LOAD_VEC_SIZE, dummy> { // Vector with discontinuity flags (unguarded) template < typename T, typename Flag, typename VectorType, typename EqualityOp> static __device__ __forceinline__ void VectorLoadValid( T smem[ACTIVE_THREADS + 1], T data[][LOAD_VEC_SIZE], Flag flags[][LOAD_VEC_SIZE], VectorType vectors[], VectorType *d_in_vectors, EqualityOp equality_op) { Iterate<LOAD + 1, 0>::VectorLoadValid( smem, data, flags, vectors, d_in_vectors + ACTIVE_THREADS, equality_op); } // With discontinuity flags (unguarded) template < typename T, typename Flag, typename EqualityOp> static __device__ __forceinline__ void LoadValid( T data[][LOAD_VEC_SIZE], Flag flags[][LOAD_VEC_SIZE], T *d_in, EqualityOp equality_op) { Iterate<LOAD + 1, 0>::LoadValid( data, flags, d_in, equality_op); } // With discontinuity flags (guarded) template < typename T, typename Flag, typename SizeT, typename EqualityOp> static __device__ __forceinline__ void LoadValid( T data[][LOAD_VEC_SIZE], Flag flags[][LOAD_VEC_SIZE], T *d_in, const SizeT &guarded_elements, EqualityOp equality_op) { Iterate<LOAD + 1, 0>::LoadValid( data, flags, d_in, guarded_elements, equality_op); } }; /** * Terminate */ template <int dummy> struct Iterate<LOADS_PER_TILE, 0, dummy> { // Vector with discontinuity flags (unguarded) template < typename T, typename Flag, typename VectorType, typename EqualityOp> static __device__ __forceinline__ void VectorLoadValid( T smem[ACTIVE_THREADS + 1], T data[][LOAD_VEC_SIZE], Flag flags[][LOAD_VEC_SIZE], VectorType vectors[], VectorType *d_in_vectors, EqualityOp equality_op) {} // With discontinuity flags (unguarded) template < typename T, typename Flag, typename EqualityOp> static __device__ __forceinline__ void LoadValid( T data[][LOAD_VEC_SIZE], Flag flags[][LOAD_VEC_SIZE], T *d_in, EqualityOp equality_op) {} // With discontinuity flags (guarded) template < typename T, typename Flag, typename SizeT, typename EqualityOp> static __device__ __forceinline__ void LoadValid( T data[][LOAD_VEC_SIZE], Flag flags[][LOAD_VEC_SIZE], T *d_in, const SizeT &guarded_elements, EqualityOp equality_op) {} }; //--------------------------------------------------------------------- // Interface //--------------------------------------------------------------------- /** * Load a full tile and initialize discontinuity flags when values change * between consecutive elements */ template < typename T, // Tile type typename Flag, // Discontinuity flag type typename SizeT, typename EqualityOp> static __device__ __forceinline__ void LoadValid( T smem[ACTIVE_THREADS + 1], T data[][LOAD_VEC_SIZE], Flag flags[][LOAD_VEC_SIZE], T *d_in, SizeT cta_offset, EqualityOp equality_op) { const size_t MASK = ((sizeof(T) * 8 * LOAD_VEC_SIZE) - 1); if ((CHECK_ALIGNMENT) && (LOAD_VEC_SIZE > 1) && (((size_t) d_in) & MASK)) { Iterate<0, 0>::LoadValid( data, flags, d_in + cta_offset, equality_op); } else { // Use an aliased pointer to keys array to perform built-in vector loads typedef typename VecType<T, LOAD_VEC_SIZE>::Type VectorType; VectorType *vectors = (VectorType *) data; VectorType *d_in_vectors = (VectorType *) (d_in + cta_offset + (threadIdx.x << LOG_LOAD_VEC_SIZE)); Iterate<0, 0>::VectorLoadValid( smem, data, flags, vectors, d_in_vectors, equality_op); } } /** * Load guarded_elements of a tile and initialize discontinuity flags when * values change between consecutive elements */ template < typename T, // Tile type typename Flag, // Discontinuity flag type typename SizeT, // Integer type for indexing into global arrays typename EqualityOp> static __device__ __forceinline__ void LoadValid( T smem[ACTIVE_THREADS + 1], T data[][LOAD_VEC_SIZE], Flag flags[][LOAD_VEC_SIZE], T *d_in, SizeT cta_offset, const SizeT &guarded_elements, EqualityOp equality_op) { if (guarded_elements >= TILE_SIZE) { LoadValid(smem, data, flags, d_in, cta_offset, equality_op); } else { Iterate<0, 0>::LoadValid( data, flags, d_in + cta_offset, guarded_elements, equality_op); } } }; } // namespace io } // namespace util } // namespace b40c
the_stack
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> // #include <THC/THC.h> #include <THC/THCAtomics.cuh> // #include <THC/THCDeviceUtils.cuh> #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename scalar_t> __device__ scalar_t lau_micro_bilinear(const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { scalar_t upper_bound_x = height - 1.; scalar_t upper_bound_y = width - 1.; if (h > upper_bound_x) h = upper_bound_x; if (h < 0.) h = 0.; if (w > upper_bound_y) w = upper_bound_y; if (w < 0.) w = 0.; int h_low = floor(h); int w_low = floor(w); int h_high = ceil(h); int w_high = ceil(w); scalar_t lt_rb_h_ne = 1.; scalar_t lt_rb_w_ne = 1.; if (h_low == h_high) lt_rb_h_ne = 0.; if (w_low == w_high) lt_rb_w_ne = 0.; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1. - lh, hw = 1. - lw; scalar_t v1 = bottom_data[h_low * data_width + w_low]; // left top scalar_t v2 = bottom_data[h_low * data_width + w_high]*lt_rb_w_ne; // right top scalar_t v3 = bottom_data[h_high * data_width + w_low]*lt_rb_h_ne; // left bottom scalar_t v4 = bottom_data[h_high * data_width + w_high]*lt_rb_h_ne*lt_rb_w_ne; // right bottom scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ void ldu_micro_bilinear_multi_output(const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w, scalar_t *output_ptr, scalar_t *output_lt_ptr, scalar_t *output_lb_ptr, scalar_t *output_rt_ptr, scalar_t *output_rb_ptr) { scalar_t upper_bound_x = height - 1.; scalar_t upper_bound_y = width - 1.; if (h > upper_bound_x) h = upper_bound_x; if (h < 0.) h = 0.; if (w > upper_bound_y) w = upper_bound_y; if (w < 0.) w = 0.; int h_low = floor(h); int w_low = floor(w); int h_high = ceil(h); int w_high = ceil(w); scalar_t lt_rb_h_ne = 1.; scalar_t lt_rb_w_ne = 1.; if (h_low == h_high) lt_rb_h_ne = 0.; if (w_low == w_high) lt_rb_w_ne = 0.; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1. - lh, hw = 1. - lw; scalar_t v1 = bottom_data[h_low * data_width + w_low]; // left top scalar_t v2 = bottom_data[h_low * data_width + w_high]; // right top scalar_t v3 = bottom_data[h_high * data_width + w_low]; // left bottom scalar_t v4 = bottom_data[h_high * data_width + w_high]; // right bottom scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 * lt_rb_w_ne + w3 * v3 * lt_rb_h_ne + w4 * v4 * lt_rb_h_ne *lt_rb_w_ne); *output_ptr = val; *output_lt_ptr = v1; *output_lb_ptr = v3; *output_rt_ptr = v2; *output_rb_ptr = v4; } template <typename scalar_t> __device__ void lau_micro_coord_bilinear(const scalar_t grad_out, const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w, scalar_t *grad_x_ptr, scalar_t *grad_y_ptr, scalar_t *grad_input_ptr) { scalar_t upper_bound_x = height - 1; scalar_t upper_bound_y = width - 1; const scalar_t H = h; const scalar_t W = w; if (h > upper_bound_x) h = upper_bound_x; if (h < 0.) h = 0.; if (w > upper_bound_y) w = upper_bound_y; if (w < 0.) w = 0.; int h_low = floor(h); int w_low = floor(w); int h_high = ceil(h); int w_high = ceil(w); scalar_t lt_rb_h_ne = 1.; scalar_t lt_rb_w_ne = 1.; if (h_low == h_high) lt_rb_h_ne = 0.; if (w_low == w_high) lt_rb_w_ne = 0.; scalar_t lh = h - h_low; // x - lower_x scalar_t lw = w - w_low; // y - lower_y scalar_t hh = 1. - lh; // upper_x - x scalar_t hw = 1. - lw; // upper_y - y const int lt_coords = h_low * data_width + w_low; const int rt_coords = h_low * data_width + w_high; const int lb_coords = h_high * data_width + w_low; const int rb_coords = h_high * data_width + w_high; scalar_t v1 = bottom_data[lt_coords]; // left top scalar_t v2 = bottom_data[rt_coords]*lt_rb_w_ne; // right top scalar_t v3 = bottom_data[lb_coords]*lt_rb_h_ne; // left bottom scalar_t v4 = bottom_data[rb_coords]*lt_rb_h_ne*lt_rb_w_ne; // right bottom // input gradients atomicAdd(grad_input_ptr + lt_coords, grad_out * hh * hw); if (lt_rb_w_ne == 1.) atomicAdd(grad_input_ptr + rt_coords, grad_out * hh * lw); // coord gradients *grad_x_ptr = 0.; *grad_y_ptr = 0.; if (H <= upper_bound_x && H >= 0.) *grad_x_ptr += (-1. * hw * v1 - 1. * lw * v2 + hw * v3 + lw * v4) * grad_out; if (W <= upper_bound_y && W >= 0.) *grad_y_ptr += (-1. * hh * v1 + hh * v2 - 1. * lh * v3 + lh * v4) * grad_out; // input gradients if (lt_rb_h_ne == 1.) atomicAdd(grad_input_ptr + lb_coords, grad_out * lh * hw); if (lt_rb_h_ne == 1. && lt_rb_w_ne == 1.) atomicAdd(grad_input_ptr + rb_coords, grad_out * lh * lw); } template <typename scalar_t> __global__ void lau_bilinear_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset_x, const scalar_t *data_offset_y, const int height, const int width, const int batch_size, const int num_channels, const int height_out, const int width_out, scalar_t *data_out) { // launch channels * batch_size * height_col * width_col cores CUDA_KERNEL_LOOP(index, n) { // NOTE(CharlesShang): different from Dai Jifeng's MXNet implementation, col_buffer is of shape (c*kw*kh, N, oh, ow) // here columns is of shape (N, c*kw*kh, oh * ow), need to adapt axis // NOTE(Jiarui XU): different from CharlesShang's implementation, col_buffer is of shape (N, c*kw*kh, oh * ow) // here columns is of shape (c*kw*kh, N, oh, ow), need to adapt axis // index index of output matrix const int size_col = width_out * height_out; const int c_col = index / size_col; const int h_col = (index - c_col*size_col) / width_out; const int w_col = index % width_out; const int c_im = c_col; const int offset_ = (c_col * height_out + h_col) * width_out + w_col; scalar_t *data_col_ptr = data_out + offset_; const scalar_t *data_im_ptr = data_im + c_im * height * width; const scalar_t offset_h = data_offset_x[offset_]; const scalar_t offset_w = data_offset_y[offset_]; scalar_t val = static_cast<scalar_t>(0); const scalar_t k_h = height_out / height; const scalar_t k_w = width_out / width; const scalar_t h_im = h_col / k_h + offset_h; const scalar_t w_im = w_col / k_w + offset_w; val = lau_micro_bilinear(data_im_ptr, width, height, width, h_im, w_im); *data_col_ptr = val; } } template <typename scalar_t> __global__ void ldu_bilinear_gpu_kernel(const int n, const scalar_t *data_im, const int height, const int width, const int batch_size, const int num_channels, const int height_out, const int width_out, scalar_t *data_out) { // launch channels * batch_size * height_col * width_col cores CUDA_KERNEL_LOOP(index, n) { // NOTE(CharlesShang): different from Dai Jifeng's MXNet implementation, col_buffer is of shape (c*kw*kh, N, oh, ow) // here columns is of shape (N, c*kw*kh, oh * ow), need to adapt axis // NOTE(Jiarui XU): different from CharlesShang's implementation, col_buffer is of shape (N, c*kw*kh, oh * ow) // here columns is of shape (c*kw*kh, N, oh, ow), need to adapt axis // index index of output matrix const int size_col = width_out * height_out; const int c_col = index / size_col; const int h_col = (index - c_col*size_col) / width_out; const int w_col = index % width_out; const int c_im = c_col; const int offset_ = (c_col * height_out + h_col) * width_out + w_col; scalar_t *data_col_ptr = data_out + offset_; const scalar_t *data_im_ptr = data_im + c_im * height * width; scalar_t val = static_cast<scalar_t>(0); const scalar_t k_h = height_out / height; const scalar_t k_w = width_out / width; const scalar_t h_im = h_col / k_h; const scalar_t w_im = w_col / k_w; val = lau_micro_bilinear(data_im_ptr, width, height, width, h_im, w_im); *data_col_ptr = val; } } template <typename scalar_t> __global__ void ldu_bilinear_multi_output_gpu_kernel(const int n, const scalar_t *data_im, const int height, const int width, const int batch_size, const int num_channels, const int height_out, const int width_out, scalar_t *data_out, scalar_t *data_out_lt, scalar_t *data_out_lb, scalar_t *data_out_rt, scalar_t *data_out_rb) { // launch channels * batch_size * height_col * width_col cores CUDA_KERNEL_LOOP(index, n) { // NOTE(CharlesShang): different from Dai Jifeng's MXNet implementation, col_buffer is of shape (c*kw*kh, N, oh, ow) // here columns is of shape (N, c*kw*kh, oh * ow), need to adapt axis // NOTE(Jiarui XU): different from CharlesShang's implementation, col_buffer is of shape (N, c*kw*kh, oh * ow) // here columns is of shape (c*kw*kh, N, oh, ow), need to adapt axis // index index of output matrix const int size_col = width_out * height_out; const int c_col = index / size_col; const int h_col = (index - c_col*size_col) / width_out; const int w_col = index % width_out; const int c_im = c_col; const int offset_ = (c_col * height_out + h_col) * width_out + w_col; scalar_t *data_col_ptr = data_out + offset_; scalar_t *data_col_lt_ptr = data_out_lt + offset_; scalar_t *data_col_lb_ptr = data_out_lb + offset_; scalar_t *data_col_rt_ptr = data_out_rt + offset_; scalar_t *data_col_rb_ptr = data_out_rb + offset_; const scalar_t *data_im_ptr = data_im + c_im * height * width; const scalar_t k_h = height_out / height; const scalar_t k_w = width_out / width; const scalar_t h_im = h_col / k_h; const scalar_t w_im = w_col / k_w; ldu_micro_bilinear_multi_output(data_im_ptr, width, height, width, h_im, w_im, data_col_ptr, data_col_lt_ptr, data_col_lb_ptr, data_col_rt_ptr, data_col_rb_ptr); } } template <typename scalar_t> __global__ void lau_bilinear_coord_gpu_kernel(const int n, const scalar_t *grad_col, const scalar_t *data_im, const scalar_t *data_offset_x, const scalar_t *data_offset_y, const int channels, const int height, const int width, const int batch_size, const int height_out, const int width_out, scalar_t *grad_offset_x, scalar_t *grad_offset_y, scalar_t *grad_input) { CUDA_KERNEL_LOOP(index, n) { const int size_col = width_out * height_out; const int c_col = index / size_col; const int h_col = (index - c_col*size_col) / width_out; const int w_col = index % width_out; const int c_im = c_col; const int offset_ = (c_col * height_out + h_col) * width_out + w_col; scalar_t *grad_offset_x_ptr = grad_offset_x + offset_; scalar_t *grad_offset_y_ptr = grad_offset_y + offset_; const scalar_t *grad_out_ptr = grad_col + offset_; const scalar_t *data_im_ptr = data_im + c_im * height * width; scalar_t *grad_im_ptr = grad_input + c_im * height * width; const scalar_t offset_h = data_offset_x[offset_]; const scalar_t offset_w = data_offset_y[offset_]; const scalar_t k_h = height_out / height; const scalar_t k_w = width_out / width; const scalar_t h_im = h_col / k_h + offset_h; const scalar_t w_im = w_col / k_w + offset_w; const scalar_t grad_out = *grad_out_ptr; lau_micro_coord_bilinear(grad_out, data_im_ptr, width, height, width, h_im, w_im, grad_offset_x_ptr, grad_offset_y_ptr, grad_im_ptr); } } template <typename scalar_t> void lau_bilinear_cuda(cudaStream_t stream, const scalar_t* data_im, const scalar_t* data_offset_x, const scalar_t* data_offset_y, const int batch_size, const int channels, const int height_im, const int width_im, const int height_out, const int width_out, scalar_t* data_out) { // num_axes should be smaller than block size const int num_kernels = channels * batch_size * height_out * width_out; lau_bilinear_gpu_kernel<scalar_t> <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(num_kernels, data_im, data_offset_x, data_offset_y, height_im, width_im, batch_size, channels, height_out, width_out, data_out); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); } } template <typename scalar_t> void ldu_bilinear_cuda(cudaStream_t stream, const scalar_t* data_im, const int batch_size, const int channels, const int height_im, const int width_im, const int height_out, const int width_out, scalar_t* data_out) { // num_axes should be smaller than block size const int num_kernels = channels * batch_size * height_out * width_out; ldu_bilinear_gpu_kernel<scalar_t> <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(num_kernels, data_im, height_im, width_im, batch_size, channels, height_out, width_out, data_out); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); } } template <typename scalar_t> void ldu_bilinear_multi_output_cuda(cudaStream_t stream, const scalar_t* data_im, const int batch_size, const int channels, const int height_im, const int width_im, const int height_out, const int width_out, scalar_t* data_out, scalar_t* data_out_lt, scalar_t* data_out_lb, scalar_t* data_out_rt, scalar_t* data_out_rb) { // num_axes should be smaller than block size const int num_kernels = channels * batch_size * height_out * width_out; ldu_bilinear_multi_output_gpu_kernel<scalar_t> <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(num_kernels, data_im, height_im, width_im, batch_size, channels, height_out, width_out, data_out, data_out_lt, data_out_lb, data_out_rt, data_out_rb); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); } } template <typename scalar_t> void lau_bilinear_cuda_backward(cudaStream_t stream, const scalar_t* grad_col, const scalar_t* data_im, const scalar_t* data_offset_x, const scalar_t* data_offset_y, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, scalar_t* grad_offset_x, scalar_t* grad_offset_y, scalar_t* grad_input) { const int num_kernels = batch_size * height_col * width_col * channels; lau_bilinear_coord_gpu_kernel<scalar_t> <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, grad_col, data_im, data_offset_x, data_offset_y, channels, height_im, width_im, batch_size, height_col, width_col, grad_offset_x, grad_offset_y, grad_input); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in location_aware_upsampling_coord_cuda: %s\n", cudaGetErrorString(err)); } }
the_stack
#define PSROIALIGNAVEROTATEDPOOLING_CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) namespace mshadow { namespace cuda { template <typename T> __device__ T bilinear_interpolate( const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } int y_low = static_cast<int>(y); int x_low = static_cast<int>(x); int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename DType> __global__ void PSROIALIGNAVERotatedPoolForwardKernel( const int count, const DType* bottom_data, const DType spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const DType* bottom_rois, const int output_dim, const int group_size, DType* top_data) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const DType* offset_bottom_rois = bottom_rois + n * 6; int roi_batch_ind = offset_bottom_rois[0]; // DType roi_start_w = (offset_bottom_rois[1]) * spatial_scale; // DType roi_start_h = (offset_bottom_rois[2]) * spatial_scale; // DType roi_end_w = (offset_bottom_rois[3]) * spatial_scale; // DType roi_end_h = (offset_bottom_rois[4]) * spatial_scale; // Do not using rounding; this implementation detail is critical DType roi_center_w = offset_bottom_rois[1] * spatial_scale; DType roi_center_h = offset_bottom_rois[2] * spatial_scale; DType roi_width = offset_bottom_rois[3] * spatial_scale; DType roi_height = offset_bottom_rois[4] * spatial_scale; // T theta = offset_bottom_rois[5] * M_PI / 180.0; DType theta = offset_bottom_rois[5]; // // Force too small ROIs to be 1x1 // DType roi_width = max(roi_end_w - roi_start_w, (DType)1.); // avoid 0 // DType roi_height = max(roi_end_h - roi_start_h, (DType)1.); // Force malformed ROIs to be 1x1 roi_width = max(roi_width, (DType)1.); roi_height = max(roi_height, (DType)1.); // Compute w and h at bottom DType bin_size_h = static_cast<DType>(roi_height) / static_cast<DType>(pooled_height); DType bin_size_w = static_cast<DType>(roi_width) / static_cast<DType>(pooled_width); int gw = floor(static_cast<DType>(pw)* group_size / pooled_width); int gh = floor(static_cast<DType>(ph)* group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); int c = (ctop*group_size + gh)*group_size + gw; const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). // Appropriate translation needs to be applied after. DType roi_start_h = -roi_height / 2.0; DType roi_start_w = -roi_width / 2.0; DType cosTheta = cos(theta); DType sinTheta = sin(theta); const DType sample_count = roi_bin_grid_h * roi_bin_grid_w; // e.g., iy = 0, 1 DType output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const DType yy = roi_start_h + ph * bin_size_h + static_cast<DType>(iy + .5f) * bin_size_h / static_cast<DType>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const DType xx = roi_start_w + pw * bin_size_w + static_cast<DType>(ix + .5f) * bin_size_w / static_cast<DType>(roi_bin_grid_w); // Rotate by theta around the center and translate // T x = xx * cosTheta + yy * sinTheta + roi_center_w; // T y = yy * cosTheta - xx * sinTheta + roi_center_h; DType x = xx * cosTheta - yy * sinTheta + roi_center_w; DType y = xx * sinTheta + yy * cosTheta + roi_center_h; DType val = bilinear_interpolate( offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= sample_count; top_data[index] = output_val; // DType out_sum = 0; // for (int h = hstart; h < hend; ++h) { // for (int w = wstart; w < wend; ++w) { // int bottom_index = h*width + w; // out_sum += offset_bottom_data[bottom_index]; // } // } // DType bin_area = (hend - hstart)*(wend - wstart); // top_data[index] = is_empty? (DType)0. : out_sum/bin_area; } } template<typename DType> inline void PSROIALIGNAVERotatedPoolForward(const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 4, DType> &data, const Tensor<gpu, 2, DType> &bbox, const float spatial_scale, const int sampling_ratio, const int output_dim_, const int group_size_) { const DType *bottom_data = data.dptr_; const DType *bottom_rois = bbox.dptr_; DType *top_data = out.dptr_; const int count = out.shape_.Size(); const int channels = data.size(1); const int height = data.size(2); const int width = data.size(3); const int pooled_height = out.size(2); const int pooled_width = out.size(3); cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); PSROIALIGNAVERotatedPoolForwardKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, stream >> >( count, bottom_data, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, bottom_rois, output_dim_, group_size_, top_data); PSROIALIGNAVEROTATEDPOOLING_CUDA_CHECK(cudaPeekAtLastError()); } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T* w1, T* w2, T* w3, T* w4, int* x_low, int* x_high, int* y_low, int* y_high, const int /*index*/ /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty *w1 = *w2 = *w3 = *w4 = 0.; *x_low = *x_high = *y_low = *y_high = -1; return; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } *y_low = static_cast<int>(y); *x_low = static_cast<int>(x); if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = (T)*y_low; } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = (T)*x_low; } else { *x_high = *x_low + 1; } T ly = y - *y_low; T lx = x - *x_low; T hy = 1. - ly, hx = 1. - lx; *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <typename DType> __global__ void PSROIALIGNAVERotatedPoolBackwardAccKernel( const int count, const DType* top_diff, const int num_rois, const DType spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const int group_size, const int output_dim, DType* bottom_diff, const DType* bottom_rois) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const DType* offset_bottom_rois = bottom_rois + n * 6; int roi_batch_ind = offset_bottom_rois[0]; // Do not round DType roi_center_w = offset_bottom_rois[1] * spatial_scale; DType roi_center_h = offset_bottom_rois[2] * spatial_scale; DType roi_width = offset_bottom_rois[3] * spatial_scale; DType roi_height = offset_bottom_rois[4] * spatial_scale; // T theta = offset_bottom_rois[5] * M_PI / 180.0; DType theta = offset_bottom_rois[5]; // DType roi_start_w = offset_bottom_rois[1] * spatial_scale; // DType roi_start_h = offset_bottom_rois[2] * spatial_scale; // DType roi_end_w = offset_bottom_rois[3] * spatial_scale; // DType roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force too small ROIs to be 1x1 // DType roi_width = max(roi_end_w - roi_start_w, (DType)1.); // avoid 0 // DType roi_height = max(roi_end_h - roi_start_h, (DType)1.); roi_width = max(roi_width, (DType)1.); roi_height = max(roi_height, (DType)1.); // Compute w and h at bottom DType bin_size_h = static_cast<DType>(roi_height) / static_cast<DType>(pooled_height); DType bin_size_w = static_cast<DType>(roi_width) / static_cast<DType>(pooled_width); // int hstart = floor(static_cast<DType>(ph)* bin_size_h // + roi_start_h); // int wstart = floor(static_cast<DType>(pw)* bin_size_w // + roi_start_w); // int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h // + roi_start_h); // int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w // + roi_start_w); // // Add roi offsets and clip to input boundaries // hstart = min(max(hstart, 0), height); // hend = min(max(hend, 0), height); // wstart = min(max(wstart, 0), width); // wend = min(max(wend, 0), width); // bool is_empty = (hend <= hstart) || (wend <= wstart); // Compute c at bottom int gw = floor(static_cast<DType>(pw)* group_size / pooled_width); int gh = floor(static_cast<DType>(ph)* group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); int c = (ctop*group_size + gh)*group_size + gw; DType* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; // DType bin_area = (hend - hstart)*(wend - wstart); // DType diff_val = is_empty ? (DType)0. : top_diff[index] / bin_area; // for (int h = hstart; h < hend; ++h) { // for (int w = wstart; w < wend; ++w) { // int bottom_index = h*width + w; // atomicAdd(offset_bottom_diff + bottom_index, diff_val); // } // } // int top_offset = (n * channels + ctop) * pooled_height * pooled_width; // const DType* offset_top_diff = top_diff + top_offset; // const DType top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; const DType top_diff_this_bin = top_diff[index]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). // Appropriate translation needs to be applied after. DType roi_start_h = -roi_height / 2.0; DType roi_start_w = -roi_width / 2.0; DType cosTheta = cos(theta); DType sinTheta = sin(theta); // We do average (integral) pooling inside a bin const DType sample_count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const DType yy = roi_start_h + ph * bin_size_h + static_cast<DType>(iy + .5f) * bin_size_h / static_cast<DType>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const DType xx = roi_start_w + pw * bin_size_w + static_cast<DType>(ix + .5f) * bin_size_w / static_cast<DType>(roi_bin_grid_w); // Rotate by theta around the center and translate // T x = xx * cosTheta + yy * sinTheta + roi_center_w; // T y = yy * cosTheta - xx * sinTheta + roi_center_h; DType x = xx * cosTheta - yy * sinTheta + roi_center_w; DType y = xx * sinTheta + yy * cosTheta + roi_center_h; DType w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, &w1, &w2, &w3, &w4, &x_low, &x_high, &y_low, &y_high, index); // TODO: choose the index DType g1 = top_diff_this_bin * w1 / sample_count; DType g2 = top_diff_this_bin * w2 / sample_count; DType g3 = top_diff_this_bin * w3 / sample_count; DType g4 = top_diff_this_bin * w4 / sample_count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd( offset_bottom_diff + y_low * width + x_low, static_cast<DType>(g1)); atomicAdd( offset_bottom_diff + y_low * width + x_high, static_cast<DType>(g2)); atomicAdd( offset_bottom_diff + y_high * width + x_low, static_cast<DType>(g3)); atomicAdd( offset_bottom_diff + y_high * width + x_high, static_cast<DType>(g4)); } // if } // ix } // iy } } template<typename DType> inline void PSROIALIGNAVERotatedPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad, const Tensor<gpu, 4, DType> &out_grad, const Tensor<gpu, 2, DType> &bbox, const float spatial_scale, const int sampling_ratio, const int output_dim_, const int group_size_) { // LOG(INFO) << "PSROIALIGNAVERotatedPoolBackward"; const DType *top_diff = out_grad.dptr_; const DType *bottom_rois = bbox.dptr_; DType *bottom_diff = in_grad.dptr_; const int count = out_grad.shape_.Size(); const int num_rois = bbox.size(0); const int channels = in_grad.size(1); const int height = in_grad.size(2); const int width = in_grad.size(3); const int pooled_height = out_grad.size(2); const int pooled_width = out_grad.size(3); cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_); PSROIALIGNAVERotatedPoolBackwardAccKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, stream >> >( count, top_diff, num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, group_size_, output_dim_, bottom_diff, bottom_rois); PSROIALIGNAVEROTATEDPOOLING_CUDA_CHECK(cudaPeekAtLastError()); } } // namespace cuda template<typename DType> inline void PSROIALIGNAVERotatedPoolForward(const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 4, DType> &data, const Tensor<gpu, 2, DType> &bbox, const float spatial_scale, const int sampling_ratio, const int output_dim_, const int group_size_) { cuda::PSROIALIGNAVERotatedPoolForward(out, data, bbox, spatial_scale, sampling_ratio,output_dim_, group_size_); } template<typename DType> inline void PSROIALIGNAVERotatedPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad, const Tensor<gpu, 4, DType> &out_grad, const Tensor<gpu, 2, DType> &bbox, const float spatial_scale, const int sampling_ratio, const int output_dim_, const int group_size_) { cuda::PSROIALIGNAVERotatedPoolBackwardAcc(in_grad, out_grad, bbox, spatial_scale, sampling_ratio, output_dim_, group_size_); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(PSROIALIGNAVERotatedPoolingParam param, int dtype) { Operator* op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new PSROIALIGNAVERotatedPoolingOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
the_stack
#include <cuda.h> #include <cuda_runtime.h> const auto THREADS_PER_BLOCK_1D = 16u; //const auto THREADS_PER_BLOCK = 512u; // Max/min functions template<class T> inline __device__ T fastMaxCuda(const T a, const T b) { return (a > b ? a : b); } template<class T> inline __device__ T fastMinCuda(const T a, const T b) { return (a < b ? a : b); } template<class T> inline __device__ T fastTruncateCuda(const T value, const T min = 0, const T max = 1) { return fastMinCuda(max, fastMaxCuda(min, value)); } // Cubic interpolation template <typename T> inline __device__ void cubicSequentialData( int* xIntArray, int* yIntArray, T& dx, T& dy, const T xSource, const T ySource, const int widthSource, const int heightSource) { xIntArray[1] = fastTruncateCuda(int(floor(xSource)), 0, widthSource - 1); xIntArray[0] = fastMaxCuda(0, xIntArray[1] - 1); xIntArray[2] = fastMinCuda(widthSource - 1, xIntArray[1] + 1); xIntArray[3] = fastMinCuda(widthSource - 1, xIntArray[2] + 1); dx = xSource - xIntArray[1]; yIntArray[1] = fastTruncateCuda(int(floor(ySource)), 0, heightSource - 1); yIntArray[0] = fastMaxCuda(0, yIntArray[1] - 1); yIntArray[2] = fastMinCuda(heightSource - 1, yIntArray[1] + 1); yIntArray[3] = fastMinCuda(heightSource - 1, yIntArray[2] + 1); dy = ySource - yIntArray[1]; } template <typename T> inline __device__ T cubicInterpolate(const T v0, const T v1, const T v2, const T v3, const T dx) { // http://www.paulinternet.nl/?page=bicubic // const auto a = (-0.5f * v0 + 1.5f * v1 - 1.5f * v2 + 0.5f * v3); // const auto b = (v0 - 2.5f * v1 + 2.0 * v2 - 0.5 * v3); // const auto c = (-0.5f * v0 + 0.5f * v2); // out = ((a * dx + b) * dx + c) * dx + v1; return (-0.5f * v0 + 1.5f * v1 - 1.5f * v2 + 0.5f * v3) * dx * dx * dx + (v0 - 2.5f * v1 + 2.f * v2 - 0.5f * v3) * dx * dx - 0.5f * (v0 - v2) * dx // + (-0.5f * v0 + 0.5f * v2) * dx + v1; // return v1 + 0.5f * dx * (v2 - v0 + dx * (2.f * v0 - 5.f * v1 + 4.f * v2 - v3 + dx * (3.f * (v1 - v2) + v3 - v0))); } template <typename T> inline __device__ T bicubicInterpolate( const T* const sourcePtr, const T xSource, const T ySource, const int widthSource, const int heightSource, const int widthSourcePtr) { int xIntArray[4]; int yIntArray[4]; T dx; T dy; cubicSequentialData(xIntArray, yIntArray, dx, dy, xSource, ySource, widthSource, heightSource); T temp[4]; for (unsigned char i = 0; i < 4; i++) { const auto offset = yIntArray[i] * widthSourcePtr; temp[i] = cubicInterpolate( sourcePtr[offset + xIntArray[0]], sourcePtr[offset + xIntArray[1]], sourcePtr[offset + xIntArray[2]], sourcePtr[offset + xIntArray[3]], dx); } return cubicInterpolate(temp[0], temp[1], temp[2], temp[3], dy); } template <typename T> inline __device__ T bicubicInterpolate( const unsigned char* const sourcePtr, const T xSource, const T ySource, const int widthSource, const int heightSource, const int widthSourcePtr) { int xIntArray[4]; int yIntArray[4]; T dx; T dy; cubicSequentialData(xIntArray, yIntArray, dx, dy, xSource, ySource, widthSource, heightSource); T temp[4]; for (unsigned char i = 0; i < 4; i++) { const auto offset = yIntArray[i] * widthSourcePtr; temp[i] = cubicInterpolate( T(sourcePtr[offset + xIntArray[0]]), T(sourcePtr[offset + xIntArray[1]]), T(sourcePtr[offset + xIntArray[2]]), T(sourcePtr[offset + xIntArray[3]]), dx); } return cubicInterpolate(temp[0], temp[1], temp[2], temp[3], dy); } template <typename T> inline __device__ T bicubicInterpolate8Times( const T* const sourcePtr, const T xSource, const T ySource, const int widthSource, const int heightSource, const int threadIdxX, const int threadIdxY) { // Now we only need dx and dy const T dx = xSource - fastTruncateCuda(int(floor(xSource)), 0, widthSource - 1); const T dy = ySource - fastTruncateCuda(int(floor(ySource)), 0, heightSource - 1); T temp[4]; for (unsigned char i = 0; i < 4; i++) { const auto offset = 5 * (i + (threadIdxY > 3 ? 1 : 0)) + (threadIdxX > 3 ? 1 : 0); temp[i] = cubicInterpolate( sourcePtr[offset], sourcePtr[offset + 1], sourcePtr[offset + 2], sourcePtr[offset + 3], dx); } return cubicInterpolate(temp[0], temp[1], temp[2], temp[3], dy); } template <typename T> __global__ void resize8TimesKernel( T* targetPtr, const T* const sourcePtr, const int widthSource, const int heightSource, const int widthTarget, const int heightTarget, const unsigned int rescaleFactor) { const auto x = (blockIdx.x * blockDim.x) + threadIdx.x; const auto y = (blockIdx.y * blockDim.y) + threadIdx.y; const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z; if (x < widthTarget && y < heightTarget) { // Normal resize // Note: The first blockIdx of each dimension behaves differently, so applying old version in those if (blockIdx.x < 1 || blockIdx.y < 1) // Actually it is only required for the first 4, but then I would have not loaded the shared memory // if ((blockIdx.x < 1 || blockIdx.y < 1) && (threadIdx.x < 4 || threadIdx.y < 4)) { const auto sourceArea = widthSource * heightSource; const auto targetArea = widthTarget * heightTarget; const T xSource = (x + T(0.5f)) / T(rescaleFactor) - T(0.5f); const T ySource = (y + T(0.5f)) / T(rescaleFactor) - T(0.5f); const T* const sourcePtrChannel = sourcePtr + channel * sourceArea; targetPtr[channel * targetArea + y * widthTarget + x] = bicubicInterpolate( sourcePtrChannel, xSource, ySource, widthSource, heightSource, widthSource); return; } // Load shared memory // If resize >= 5, then #threads per block >= # elements of shared memory const auto sharedSize = 25; // (4+1)^2 __shared__ T sourcePtrShared[sharedSize]; const auto sharedLoadId = threadIdx.x + rescaleFactor * threadIdx.y; if (sharedLoadId < sharedSize) { // Idea: Find minimum possible x and y const auto minTargetX = blockIdx.x * rescaleFactor; const auto minSourceXFloat = (minTargetX + T(0.5f)) / T(rescaleFactor) - T(0.5f); const auto minSourceXInt = int(floor(minSourceXFloat)) - 1; const auto minTargetY = blockIdx.y * rescaleFactor; const auto minSourceYFloat = (minTargetY + T(0.5f)) / T(rescaleFactor) - T(0.5f); const auto minSourceYInt = int(floor(minSourceYFloat)) - 1; // Get current x and y const auto xClean = fastTruncateCuda(minSourceXInt + int(sharedLoadId % 5), 0, widthSource - 1); const auto yClean = fastTruncateCuda(minSourceYInt + int(sharedLoadId / 5), 0, heightSource - 1); // Load into shared memory const auto sourceIndex = (channel * heightSource + yClean) * widthSource + xClean; sourcePtrShared[sharedLoadId] = sourcePtr[sourceIndex]; } __syncthreads(); // Apply resize const auto targetArea = widthTarget * heightTarget; const T xSource = (x + T(0.5f)) / T(rescaleFactor) - T(0.5f); const T ySource = (y + T(0.5f)) / T(rescaleFactor) - T(0.5f); targetPtr[channel * targetArea + y * widthTarget + x] = bicubicInterpolate8Times( sourcePtrShared, xSource, ySource, widthSource, heightSource, threadIdx.x, threadIdx.y); } } template <typename T> __global__ void uCharImageCastKernel( unsigned char* targetPtr, const T* const srcPtr, const int volume) { const auto x = (blockIdx.x * blockDim.x) + threadIdx.x; if (x < volume) targetPtr[x] = (unsigned char)(fastTruncateCuda(srcPtr[x], T(0), T(255))); } template <typename T> void uCharImageCast(unsigned char* targetPtr, const T* const srcPtr, const int volume) { try { const dim3 threadsPerBlock{ 32, 1, 1 }; const dim3 numBlocks{ getNumberCudaBlocks(volume, threadsPerBlock.x) }; uCharImageCastKernel << <numBlocks, threadsPerBlock >> > (targetPtr, srcPtr, volume); } catch (const std::exception& e) { //error(e.what(), __LINE__, __FUNCTION__, __FILE__); } } /* template void uCharImageCast( unsigned char* targetPtr, const float* const srcPtr, const int volume); template void uCharImageCast( unsigned char* targetPtr, const double* const srcPtr, const int volume); */ template <typename T> __global__ void reorderAndNormalizeKernel( T* dstPtr, const unsigned char* const srcPtr, const int width, const int height, const int channels) { const auto x = (blockIdx.x * blockDim.x) + threadIdx.x; const auto y = (blockIdx.y * blockDim.y) + threadIdx.y; const auto c = (blockIdx.z * blockDim.z) + threadIdx.z; if (x < width && y < height) { const auto dstIdx = c * width * height + (y * width + x); const auto srcIdx = (y * width + x) * channels + c; dstPtr[dstIdx] = T(srcPtr[srcIdx]) * T(1 / 256.f) - T(0.5f); } } template <typename T> void reorderAndNormalize( T* targetPtr, const unsigned char* const srcPtr, int width, int height, int channels) { try { const dim3 threadsPerBlock{ 32, 1, 1 }; const dim3 numBlocks{ getNumberCudaBlocks(width, threadsPerBlock.x), getNumberCudaBlocks(height, threadsPerBlock.y), getNumberCudaBlocks(channels, threadsPerBlock.z) }; reorderAndNormalizeKernel<<<numBlocks, threadsPerBlock>>>(targetPtr, srcPtr, width, height, channels); } catch (const std::exception& e) { //error(e.what(), __LINE__, __FUNCTION__, __FILE__); } } template void reorderAndNormalize( float* targetPtr, const unsigned char* const srcPtr, const int width, const int height, const int channels); template void reorderAndNormalize( double* targetPtr, const unsigned char* const srcPtr, const int width, const int height, const int channels); template <typename T> __global__ void resizeAndPadKernel( T* targetPtr, const T* const sourcePtr, const int widthSource, const int heightSource, const int widthTarget, const int heightTarget, const T rescaleFactor) { const auto x = (blockIdx.x * blockDim.x) + threadIdx.x; const auto y = (blockIdx.y * blockDim.y) + threadIdx.y; const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z; if (x < widthTarget && y < heightTarget) { const auto targetArea = widthTarget * heightTarget; if (x < widthSource * rescaleFactor && y < heightSource * rescaleFactor) { const auto sourceArea = widthSource * heightSource; const T xSource = (x + T(0.5f)) / T(rescaleFactor) - T(0.5f); const T ySource = (y + T(0.5f)) / T(rescaleFactor) - T(0.5f); const T* const sourcePtrChannel = sourcePtr + channel * sourceArea; targetPtr[channel * targetArea + y * widthTarget + x] = bicubicInterpolate( sourcePtrChannel, xSource, ySource, widthSource, heightSource, widthSource); } else targetPtr[channel * targetArea + y * widthTarget + x] = 0; } } template <typename T> __global__ void resizeAndPadKernel( T* targetPtr, const unsigned char* const sourcePtr, const int widthSource, const int heightSource, const int widthTarget, const int heightTarget, const T rescaleFactor) { const auto x = (blockIdx.x * blockDim.x) + threadIdx.x; const auto y = (blockIdx.y * blockDim.y) + threadIdx.y; const auto channel = (blockIdx.z * blockDim.z) + threadIdx.z; if (x < widthTarget && y < heightTarget) { const auto targetArea = widthTarget * heightTarget; if (x < widthSource * rescaleFactor && y < heightSource * rescaleFactor) { const auto sourceArea = widthSource * heightSource; const T xSource = (x + T(0.5f)) / T(rescaleFactor) - T(0.5f); // xSource = x; const T ySource = (y + T(0.5f)) / T(rescaleFactor) - T(0.5f); // ySource = y; const unsigned char* sourcePtrChannel = sourcePtr + channel * sourceArea; targetPtr[channel * targetArea + y * widthTarget + x] = bicubicInterpolate( sourcePtrChannel, xSource, ySource, widthSource, heightSource, widthSource); } else targetPtr[channel * targetArea + y * widthTarget + x] = 0; } } template <typename T> void resizeAndPadRbgGpu( T* targetPtr, const T* const srcPtr, const int widthSource, const int heightSource, const int widthTarget, const int heightTarget, const T scaleFactor) { try { const auto channels = 3; const dim3 threadsPerBlock{ THREADS_PER_BLOCK_1D, THREADS_PER_BLOCK_1D, 1 }; const dim3 numBlocks{ getNumberCudaBlocks(widthTarget, threadsPerBlock.x), getNumberCudaBlocks(heightTarget, threadsPerBlock.y), getNumberCudaBlocks(channels, threadsPerBlock.z) }; resizeAndPadKernel<<<numBlocks, threadsPerBlock>>>( targetPtr, srcPtr, widthSource, heightSource, widthTarget, heightTarget, scaleFactor); } catch (const std::exception& e) { //error(e.what(), __LINE__, __FUNCTION__, __FILE__); } } template <typename T> void resizeAndPadRbgGpu( T* targetPtr, const unsigned char* const srcPtr, const int widthSource, const int heightSource, const int widthTarget, const int heightTarget, const T scaleFactor) { try { const auto channels = 3; const dim3 threadsPerBlock{ THREADS_PER_BLOCK_1D, THREADS_PER_BLOCK_1D, 1 }; const dim3 numBlocks{ getNumberCudaBlocks(widthTarget, threadsPerBlock.x), getNumberCudaBlocks(heightTarget, threadsPerBlock.y), getNumberCudaBlocks(channels, threadsPerBlock.z) }; resizeAndPadKernel<<<numBlocks, threadsPerBlock>>>( targetPtr, srcPtr, widthSource, heightSource, widthTarget, heightTarget, scaleFactor); } catch (const std::exception& e) { //error(e.what(), __LINE__, __FUNCTION__, __FILE__); } } template void resizeAndPadRbgGpu( float* targetPtr, const float* const srcPtr, const int widthSource, const int heightSource, const int widthTarget, const int heightTarget, const float scaleFactor); template void resizeAndPadRbgGpu( double* targetPtr, const double* const srcPtr, const int widthSource, const int heightSource, const int widthTarget, const int heightTarget, const double scaleFactor); template void resizeAndPadRbgGpu( float* targetPtr, const unsigned char* const srcPtr, const int widthSource, const int heightSource, const int widthTarget, const int heightTarget, const float scaleFactor); template void resizeAndPadRbgGpu( double* targetPtr, const unsigned char* const srcPtr, const int widthSource, const int heightSource, const int widthTarget, const int heightTarget, const double scaleFactor); template <typename T> void resizeAndMergeGpu( T* targetPtr, const T* sourcePtr, const std::array<int, 4>& targetSize, const std::array<int, 4>& sourceSize, const T& scaleInputToNetInputs) { // Parameters const auto channels = targetSize[1]; // here channels == sourceSize[1] == targetSize[1] must be (18+1)*3 const auto heightTarget = targetSize[2]; const auto widthTarget = targetSize[3]; const auto heightSource = sourceSize[2]; const auto widthSource = sourceSize[3]; const auto num = sourceSize[0]; // No multi-scale merging or no merging required const auto rescaleFactor = (unsigned int)std::ceil(heightTarget / (float)(heightSource)); // == 8 const dim3 threadsPerBlock{ rescaleFactor, rescaleFactor, 1 }; const dim3 numBlocks{ getNumberCudaBlocks(widthTarget, threadsPerBlock.x), getNumberCudaBlocks(heightTarget, threadsPerBlock.y), getNumberCudaBlocks(num * channels, threadsPerBlock.z) }; resize8TimesKernel<<<numBlocks, threadsPerBlock>>>( targetPtr, sourcePtr, widthSource, heightSource, widthTarget, heightTarget, rescaleFactor); } template void resizeAndMergeGpu( float* targetPtr, const float* sourcePtr, const std::array<int, 4>& targetSize, const std::array<int, 4>& sourceSize, const float& scaleInputToNetInputs); template void resizeAndMergeGpu( double* targetPtr, const double* sourcePtr, const std::array<int, 4>& targetSize, const std::array<int, 4>& sourceSize, const double& scaleInputToNetInputs);
the_stack
#include "../../util_math.cuh" #include "../../util_device.cuh" #include "../../util_namespace.cuh" #include "../../agent/agent_merge_sort.cuh" #include <thrust/system/cuda/detail/core/triple_chevron_launch.h> #include <thrust/detail/integer_math.h> CUB_NAMESPACE_BEGIN template <bool UseVShmem, typename ActivePolicyT, typename KeyInputIteratorT, typename ValueInputIteratorT, typename KeyIteratorT, typename ValueIteratorT, typename OffsetT, typename CompareOpT, typename KeyT, typename ValueT> void __global__ __launch_bounds__(ActivePolicyT::BLOCK_THREADS) DeviceMergeSortBlockSortKernel(bool ping, KeyInputIteratorT keys_in, ValueInputIteratorT items_in, KeyIteratorT keys_out, ValueIteratorT items_out, OffsetT keys_count, KeyT *tmp_keys_out, ValueT *tmp_items_out, CompareOpT compare_op, char *vshmem) { extern __shared__ char shmem[]; using AgentBlockSortT = AgentBlockSort<ActivePolicyT, KeyInputIteratorT, ValueInputIteratorT, KeyIteratorT, ValueIteratorT, OffsetT, CompareOpT, KeyT, ValueT>; const OffsetT vshmem_offset = blockIdx.x * AgentBlockSortT::SHARED_MEMORY_SIZE; typename AgentBlockSortT::TempStorage &storage = *reinterpret_cast<typename AgentBlockSortT::TempStorage *>( UseVShmem ? vshmem + vshmem_offset : shmem); AgentBlockSortT agent(ping, storage, THRUST_NS_QUALIFIER::cuda_cub::core::make_load_iterator(ActivePolicyT(), keys_in), THRUST_NS_QUALIFIER::cuda_cub::core::make_load_iterator(ActivePolicyT(), items_in), keys_count, keys_out, items_out, tmp_keys_out, tmp_items_out, compare_op); agent.Process(); } template <typename KeyIteratorT, typename OffsetT, typename CompareOpT, typename KeyT> __global__ void DeviceMergeSortPartitionKernel(bool ping, KeyIteratorT keys_ping, KeyT *keys_pong, OffsetT keys_count, OffsetT num_partitions, OffsetT *merge_partitions, CompareOpT compare_op, OffsetT target_merged_tiles_number, int items_per_tile) { OffsetT partition_idx = blockDim.x * blockIdx.x + threadIdx.x; if (partition_idx < num_partitions) { AgentPartition<KeyIteratorT, OffsetT, CompareOpT, KeyT> agent( ping, keys_ping, keys_pong, keys_count, partition_idx, merge_partitions, compare_op, target_merged_tiles_number, items_per_tile); agent.Process(); } } template < bool UseVShmem, typename ActivePolicyT, typename KeyIteratorT, typename ValueIteratorT, typename OffsetT, typename CompareOpT, typename KeyT, typename ValueT> void __global__ __launch_bounds__(ActivePolicyT::BLOCK_THREADS) DeviceMergeSortMergeKernel(bool ping, KeyIteratorT keys_ping, ValueIteratorT items_ping, OffsetT keys_count, KeyT *keys_pong, ValueT *items_pong, CompareOpT compare_op, OffsetT *merge_partitions, OffsetT target_merged_tiles_number, char *vshmem ) { extern __shared__ char shmem[]; using AgentMergeT = AgentMerge<ActivePolicyT, KeyIteratorT, ValueIteratorT, OffsetT, CompareOpT, KeyT, ValueT>; const OffsetT vshmem_offset = blockIdx.x * AgentMergeT::SHARED_MEMORY_SIZE; typename AgentMergeT::TempStorage &storage = *reinterpret_cast<typename AgentMergeT::TempStorage *>( UseVShmem ? vshmem + vshmem_offset : shmem); AgentMergeT agent( ping, storage, THRUST_NS_QUALIFIER::cuda_cub::core::make_load_iterator(ActivePolicyT(), keys_ping), THRUST_NS_QUALIFIER::cuda_cub::core::make_load_iterator(ActivePolicyT(), items_ping), THRUST_NS_QUALIFIER::cuda_cub::core::make_load_iterator(ActivePolicyT(), keys_pong), THRUST_NS_QUALIFIER::cuda_cub::core::make_load_iterator(ActivePolicyT(), items_pong), keys_count, keys_pong, items_pong, keys_ping, items_ping, compare_op, merge_partitions, target_merged_tiles_number); agent.Process(); } /****************************************************************************** * Policy ******************************************************************************/ template <typename KeyIteratorT> struct DeviceMergeSortPolicy { using KeyT = typename std::iterator_traits<KeyIteratorT>::value_type; //------------------------------------------------------------------------------ // Architecture-specific tuning policies //------------------------------------------------------------------------------ struct Policy350 : ChainedPolicy<350, Policy350, Policy350> { using MergeSortPolicy = AgentMergeSortPolicy<256, Nominal4BItemsToItems<KeyT>(11), cub::BLOCK_LOAD_WARP_TRANSPOSE, cub::LOAD_LDG, cub::BLOCK_STORE_WARP_TRANSPOSE>; }; // NVBug 3384810 #if defined(__NVCOMPILER_CUDA__) using Policy520 = Policy350; #else struct Policy520 : ChainedPolicy<520, Policy520, Policy350> { using MergeSortPolicy = AgentMergeSortPolicy<512, Nominal4BItemsToItems<KeyT>(15), cub::BLOCK_LOAD_WARP_TRANSPOSE, cub::LOAD_LDG, cub::BLOCK_STORE_WARP_TRANSPOSE>; }; #endif struct Policy600 : ChainedPolicy<600, Policy600, Policy520> { using MergeSortPolicy = AgentMergeSortPolicy<256, Nominal4BItemsToItems<KeyT>(17), cub::BLOCK_LOAD_WARP_TRANSPOSE, cub::LOAD_DEFAULT, cub::BLOCK_STORE_WARP_TRANSPOSE>; }; /// MaxPolicy using MaxPolicy = Policy600; }; template <typename KeyInputIteratorT, typename ValueInputIteratorT, typename KeyIteratorT, typename ValueIteratorT, typename OffsetT, typename MergePolicyT, typename CompareOpT, typename KeyT, typename ValueT> struct BlockSortLauncher { int num_tiles; std::size_t block_sort_shmem_size; bool ping; KeyInputIteratorT d_input_keys; ValueInputIteratorT d_input_items; KeyIteratorT d_output_keys; ValueIteratorT d_output_items; OffsetT num_items; CompareOpT compare_op; cudaStream_t stream; KeyT *keys_buffer; ValueT *items_buffer; char* vshmem_ptr; CUB_RUNTIME_FUNCTION __forceinline__ BlockSortLauncher(int num_tiles, std::size_t block_sort_shmem_size, bool ping, KeyInputIteratorT d_input_keys, ValueInputIteratorT d_input_items, KeyIteratorT d_output_keys, ValueIteratorT d_output_items, OffsetT num_items, CompareOpT compare_op, cudaStream_t stream, KeyT *keys_buffer, ValueT *items_buffer, char *vshmem_ptr) : num_tiles(num_tiles) , block_sort_shmem_size(block_sort_shmem_size) , ping(ping) , d_input_keys(d_input_keys) , d_input_items(d_input_items) , d_output_keys(d_output_keys) , d_output_items(d_output_items) , num_items(num_items) , compare_op(compare_op) , stream(stream) , keys_buffer(keys_buffer) , items_buffer(items_buffer) , vshmem_ptr(vshmem_ptr) {} CUB_RUNTIME_FUNCTION __forceinline__ void launch() const { if (vshmem_ptr) { launch_impl<true>(); } else { launch_impl<false>(); } } template <bool UseVShmem> CUB_RUNTIME_FUNCTION __forceinline__ void launch_impl() const { THRUST_NS_QUALIFIER::cuda_cub::launcher::triple_chevron( num_tiles, MergePolicyT::BLOCK_THREADS, block_sort_shmem_size, stream) .doit(DeviceMergeSortBlockSortKernel<UseVShmem, MergePolicyT, KeyInputIteratorT, ValueInputIteratorT, KeyIteratorT, ValueIteratorT, OffsetT, CompareOpT, KeyT, ValueT>, ping, d_input_keys, d_input_items, d_output_keys, d_output_items, num_items, keys_buffer, items_buffer, compare_op, vshmem_ptr); } }; template < typename KeyIteratorT, typename ValueIteratorT, typename OffsetT, typename MergePolicyT, typename CompareOpT, typename KeyT, typename ValueT> struct MergeLauncher { int num_tiles; std::size_t merge_shmem_size; KeyIteratorT d_keys; ValueIteratorT d_items; OffsetT num_items; CompareOpT compare_op; OffsetT *merge_partitions; cudaStream_t stream; KeyT *keys_buffer; ValueT *items_buffer; char *vshmem_ptr; CUB_RUNTIME_FUNCTION __forceinline__ MergeLauncher(int num_tiles, std::size_t merge_shmem_size, KeyIteratorT d_keys, ValueIteratorT d_items, OffsetT num_items, CompareOpT compare_op, OffsetT *merge_partitions, cudaStream_t stream, KeyT *keys_buffer, ValueT *items_buffer, char *vshmem_ptr) : num_tiles(num_tiles) , merge_shmem_size(merge_shmem_size) , d_keys(d_keys) , d_items(d_items) , num_items(num_items) , compare_op(compare_op) , merge_partitions(merge_partitions) , stream(stream) , keys_buffer(keys_buffer) , items_buffer(items_buffer) , vshmem_ptr(vshmem_ptr) {} CUB_RUNTIME_FUNCTION __forceinline__ void launch(bool ping, OffsetT target_merged_tiles_number) const { if (vshmem_ptr) { launch_impl<true>(ping, target_merged_tiles_number); } else { launch_impl<false>(ping, target_merged_tiles_number); } } template <bool UseVShmem> CUB_RUNTIME_FUNCTION __forceinline__ void launch_impl(bool ping, OffsetT target_merged_tiles_number) const { THRUST_NS_QUALIFIER::cuda_cub::launcher::triple_chevron( num_tiles, MergePolicyT::BLOCK_THREADS, merge_shmem_size, stream) .doit(DeviceMergeSortMergeKernel<UseVShmem, MergePolicyT, KeyIteratorT, ValueIteratorT, OffsetT, CompareOpT, KeyT, ValueT>, ping, d_keys, d_items, num_items, keys_buffer, items_buffer, compare_op, merge_partitions, target_merged_tiles_number, vshmem_ptr); } }; template <typename KeyInputIteratorT, typename ValueInputIteratorT, typename KeyIteratorT, typename ValueIteratorT, typename OffsetT, typename CompareOpT, typename SelectedPolicy = DeviceMergeSortPolicy<KeyIteratorT>> struct DispatchMergeSort : SelectedPolicy { using KeyT = typename std::iterator_traits<KeyIteratorT>::value_type; using ValueT = typename std::iterator_traits<ValueIteratorT>::value_type; // Whether or not there are values to be trucked along with keys static constexpr bool KEYS_ONLY = Equals<ValueT, NullType>::VALUE; //------------------------------------------------------------------------------ // Problem state //------------------------------------------------------------------------------ void *d_temp_storage; ///< [in] Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. std::size_t &temp_storage_bytes; ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation KeyInputIteratorT d_input_keys; ///< [in] Pointer to the input sequence of unsorted input keys ValueInputIteratorT d_input_items;///< [in] Pointer to the input sequence of unsorted input values KeyIteratorT d_output_keys; ///< [out] Pointer to the output sequence of sorted input keys ValueIteratorT d_output_items; ///< [out] Pointer to the output sequence of sorted input values OffsetT num_items; ///< [in] Number of items to sort CompareOpT compare_op; ///< [in] Comparison function object which returns true if the first argument is ordered before the second cudaStream_t stream; ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous; ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. int ptx_version; //------------------------------------------------------------------------------ // Constructor //------------------------------------------------------------------------------ CUB_RUNTIME_FUNCTION __forceinline__ std::size_t vshmem_size(std::size_t max_shmem, std::size_t shmem_per_block, std::size_t num_blocks) { if (shmem_per_block > max_shmem) { return shmem_per_block * num_blocks; } else { return 0; } } /// Constructor CUB_RUNTIME_FUNCTION __forceinline__ DispatchMergeSort(void *d_temp_storage, std::size_t &temp_storage_bytes, KeyInputIteratorT d_input_keys, ValueInputIteratorT d_input_items, KeyIteratorT d_output_keys, ValueIteratorT d_output_items, OffsetT num_items, CompareOpT compare_op, cudaStream_t stream, bool debug_synchronous, int ptx_version) : d_temp_storage(d_temp_storage) , temp_storage_bytes(temp_storage_bytes) , d_input_keys(d_input_keys) , d_input_items(d_input_items) , d_output_keys(d_output_keys) , d_output_items(d_output_items) , num_items(num_items) , compare_op(compare_op) , stream(stream) , debug_synchronous(debug_synchronous) , ptx_version(ptx_version) {} /// Invocation template <typename ActivePolicyT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Invoke() { using MergePolicyT = typename ActivePolicyT::MergeSortPolicy; using BlockSortAgentT = AgentBlockSort<MergePolicyT, KeyInputIteratorT, ValueInputIteratorT, KeyIteratorT, ValueIteratorT, OffsetT, CompareOpT, KeyT, ValueT>; using MergeAgentT = AgentMerge<MergePolicyT, KeyIteratorT, ValueIteratorT, OffsetT, CompareOpT, KeyT, ValueT>; cudaError error = cudaSuccess; if (num_items == 0) return error; do { // Get device ordinal int device_ordinal = 0; if (CubDebug(error = cudaGetDevice(&device_ordinal))) { break; } // Get shared memory size int max_shmem = 0; if (CubDebug(error = cudaDeviceGetAttribute(&max_shmem, cudaDevAttrMaxSharedMemoryPerBlock, device_ordinal))) { break; } const auto tile_size = MergePolicyT::ITEMS_PER_TILE; const auto num_tiles = cub::DivideAndRoundUp(num_items, tile_size); const auto block_sort_shmem_size = static_cast<std::size_t>(BlockSortAgentT::SHARED_MEMORY_SIZE); const auto merge_shmem_size = static_cast<std::size_t>(MergeAgentT::SHARED_MEMORY_SIZE); const auto merge_partitions_size = static_cast<std::size_t>(1 + num_tiles) * sizeof(OffsetT); const auto temporary_keys_storage_size = static_cast<std::size_t>(num_items * sizeof(KeyT)); const auto temporary_values_storage_size = static_cast<std::size_t>(num_items * sizeof(ValueT)) * !KEYS_ONLY; const auto virtual_shared_memory_size = vshmem_size(static_cast<std::size_t>(max_shmem), (cub::max)(block_sort_shmem_size, merge_shmem_size), static_cast<std::size_t>(num_tiles)); void *allocations[4] = {nullptr, nullptr, nullptr, nullptr}; std::size_t allocation_sizes[4] = {merge_partitions_size, temporary_keys_storage_size, temporary_values_storage_size, virtual_shared_memory_size}; if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) { break; } if (d_temp_storage == nullptr) { // Return if the caller is simply requesting the size of the storage // allocation break; } const int num_passes = static_cast<int>(THRUST_NS_QUALIFIER::detail::log2_ri(num_tiles)); /* * The algorithm consists of stages. At each stage, there are input and * output arrays. There are two pairs of arrays allocated (keys and items). * One pair is from function arguments and another from temporary storage. * Ping is a helper variable that controls which of these two pairs of * arrays is an input and which is an output for a current stage. If the * ping is true - the current stage stores its result in the temporary * storage. The temporary storage acts as input data otherwise. * * Block sort is executed before the main loop. It stores its result in * the pair of arrays that will be an input of the next stage. The initial * value of the ping variable is selected so that the result of the final * stage is stored in the input arrays. */ bool ping = num_passes % 2 == 0; auto merge_partitions = reinterpret_cast<OffsetT *>(allocations[0]); auto keys_buffer = reinterpret_cast<KeyT *>(allocations[1]); auto items_buffer = reinterpret_cast<ValueT *>(allocations[2]); char *vshmem_ptr = virtual_shared_memory_size > 0 ? reinterpret_cast<char *>(allocations[3]) : nullptr; // Invoke DeviceReduceKernel BlockSortLauncher<KeyInputIteratorT, ValueInputIteratorT, KeyIteratorT, ValueIteratorT, OffsetT, MergePolicyT, CompareOpT, KeyT, ValueT> block_sort_launcher(static_cast<int>(num_tiles), virtual_shared_memory_size > 0 ? 0 : block_sort_shmem_size, ping, d_input_keys, d_input_items, d_output_keys, d_output_items, num_items, compare_op, stream, keys_buffer, items_buffer, vshmem_ptr); block_sort_launcher.launch(); if (debug_synchronous) { if (CubDebug(error = SyncStream(stream))) { break; } } // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) { break; } const OffsetT num_partitions = num_tiles + 1; const int threads_per_partition_block = 256; const int partition_grid_size = static_cast<int>( cub::DivideAndRoundUp(num_partitions, threads_per_partition_block)); MergeLauncher<KeyIteratorT, ValueIteratorT, OffsetT, MergePolicyT, CompareOpT, KeyT, ValueT> merge_launcher(static_cast<int>(num_tiles), virtual_shared_memory_size > 0 ? 0 : merge_shmem_size, d_output_keys, d_output_items, num_items, compare_op, merge_partitions, stream, keys_buffer, items_buffer, vshmem_ptr); for (int pass = 0; pass < num_passes; ++pass, ping = !ping) { OffsetT target_merged_tiles_number = OffsetT(2) << pass; // Partition THRUST_NS_QUALIFIER::cuda_cub::launcher::triple_chevron( partition_grid_size, threads_per_partition_block, 0, stream) .doit(DeviceMergeSortPartitionKernel<KeyIteratorT, OffsetT, CompareOpT, KeyT>, ping, d_output_keys, keys_buffer, num_items, num_partitions, merge_partitions, compare_op, target_merged_tiles_number, tile_size); if (debug_synchronous) { if (CubDebug(error = SyncStream(stream))) { break; } } // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) { break; } // Merge merge_launcher.launch(ping, target_merged_tiles_number); if (debug_synchronous) { if (CubDebug(error = SyncStream(stream))) { break; } } // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) { break; } } } while (0); return error; } CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch(void *d_temp_storage, std::size_t &temp_storage_bytes, KeyInputIteratorT d_input_keys, ValueInputIteratorT d_input_items, KeyIteratorT d_output_keys, ValueIteratorT d_output_items, OffsetT num_items, CompareOpT compare_op, cudaStream_t stream, bool debug_synchronous) { using MaxPolicyT = typename DispatchMergeSort::MaxPolicy; cudaError error = cudaSuccess; do { // Get PTX version int ptx_version = 0; if (CubDebug(error = PtxVersion(ptx_version))) { break; } // Create dispatch functor DispatchMergeSort dispatch(d_temp_storage, temp_storage_bytes, d_input_keys, d_input_items, d_output_keys, d_output_items, num_items, compare_op, stream, debug_synchronous, ptx_version); // Dispatch to chained policy if (CubDebug(error = MaxPolicyT::Invoke(ptx_version, dispatch))) { break; } } while (0); return error; } }; CUB_NAMESPACE_END
the_stack
#include <cuda_helper.h> #include <cuda_vectors.h> #include <miner.h> __constant__ static uint32_t _ALIGN(16) c_midstate112[8]; __constant__ static uint32_t _ALIGN(16) c_midbuffer112[8]; __constant__ static uint32_t _ALIGN(16) c_dataEnd112[12]; __constant__ const uint32_t c_H256[8] = { 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 }; __constant__ static uint32_t c_K[64] = { 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2 }; #ifdef __INTELLISENSE__ #define atomicExch(p,y) y #endif // ------------------------------------------------------------------------------------------------ static const uint32_t cpu_H256[8] = { 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 }; static const uint32_t cpu_K[64] = { 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2 }; __host__ static void sha256_step1_host(uint32_t a, uint32_t b, uint32_t c, uint32_t &d, uint32_t e, uint32_t f, uint32_t g, uint32_t &h, uint32_t in, const uint32_t Kshared) { uint32_t vxandx = (((f) ^ (g)) & (e)) ^ (g); // xandx(e, f, g); uint32_t bsg21 = ROTR32(e, 6) ^ ROTR32(e, 11) ^ ROTR32(e, 25); // bsg2_1(e); uint32_t bsg20 = ROTR32(a, 2) ^ ROTR32(a, 13) ^ ROTR32(a, 22); //bsg2_0(a); uint32_t andorv = ((b) & (c)) | (((b) | (c)) & (a)); //andor32(a,b,c); uint32_t t1 = h + bsg21 + vxandx + Kshared + in; uint32_t t2 = bsg20 + andorv; d = d + t1; h = t1 + t2; } __host__ static void sha256_step2_host(uint32_t a, uint32_t b, uint32_t c, uint32_t &d, uint32_t e, uint32_t f, uint32_t g, uint32_t &h, uint32_t* in, uint32_t pc, const uint32_t Kshared) { int pcidx1 = (pc-2) & 0xF; int pcidx2 = (pc-7) & 0xF; int pcidx3 = (pc-15) & 0xF; uint32_t inx0 = in[pc]; uint32_t inx1 = in[pcidx1]; uint32_t inx2 = in[pcidx2]; uint32_t inx3 = in[pcidx3]; uint32_t ssg21 = ROTR32(inx1, 17) ^ ROTR32(inx1, 19) ^ SPH_T32((inx1) >> 10); //ssg2_1(inx1); uint32_t ssg20 = ROTR32(inx3, 7) ^ ROTR32(inx3, 18) ^ SPH_T32((inx3) >> 3); //ssg2_0(inx3); uint32_t vxandx = (((f) ^ (g)) & (e)) ^ (g); // xandx(e, f, g); uint32_t bsg21 = ROTR32(e, 6) ^ ROTR32(e, 11) ^ ROTR32(e, 25); // bsg2_1(e); uint32_t bsg20 = ROTR32(a, 2) ^ ROTR32(a, 13) ^ ROTR32(a, 22); //bsg2_0(a); uint32_t andorv = ((b) & (c)) | (((b) | (c)) & (a)); //andor32(a,b,c); uint32_t t1,t2; in[pc] = ssg21 + inx2 + ssg20 + inx0; t1 = h + bsg21 + vxandx + Kshared + in[pc]; t2 = bsg20 + andorv; d = d + t1; h = t1 + t2; } __host__ static void sha256_round_body_host(uint32_t* in, uint32_t* state, const uint32_t* Kshared) { uint32_t a = state[0]; uint32_t b = state[1]; uint32_t c = state[2]; uint32_t d = state[3]; uint32_t e = state[4]; uint32_t f = state[5]; uint32_t g = state[6]; uint32_t h = state[7]; sha256_step1_host(a,b,c,d,e,f,g,h,in[0], Kshared[0]); sha256_step1_host(h,a,b,c,d,e,f,g,in[1], Kshared[1]); sha256_step1_host(g,h,a,b,c,d,e,f,in[2], Kshared[2]); sha256_step1_host(f,g,h,a,b,c,d,e,in[3], Kshared[3]); sha256_step1_host(e,f,g,h,a,b,c,d,in[4], Kshared[4]); sha256_step1_host(d,e,f,g,h,a,b,c,in[5], Kshared[5]); sha256_step1_host(c,d,e,f,g,h,a,b,in[6], Kshared[6]); sha256_step1_host(b,c,d,e,f,g,h,a,in[7], Kshared[7]); sha256_step1_host(a,b,c,d,e,f,g,h,in[8], Kshared[8]); sha256_step1_host(h,a,b,c,d,e,f,g,in[9], Kshared[9]); sha256_step1_host(g,h,a,b,c,d,e,f,in[10],Kshared[10]); sha256_step1_host(f,g,h,a,b,c,d,e,in[11],Kshared[11]); sha256_step1_host(e,f,g,h,a,b,c,d,in[12],Kshared[12]); sha256_step1_host(d,e,f,g,h,a,b,c,in[13],Kshared[13]); sha256_step1_host(c,d,e,f,g,h,a,b,in[14],Kshared[14]); sha256_step1_host(b,c,d,e,f,g,h,a,in[15],Kshared[15]); for (int i=0; i<3; i++) { sha256_step2_host(a,b,c,d,e,f,g,h,in,0, Kshared[16+16*i]); sha256_step2_host(h,a,b,c,d,e,f,g,in,1, Kshared[17+16*i]); sha256_step2_host(g,h,a,b,c,d,e,f,in,2, Kshared[18+16*i]); sha256_step2_host(f,g,h,a,b,c,d,e,in,3, Kshared[19+16*i]); sha256_step2_host(e,f,g,h,a,b,c,d,in,4, Kshared[20+16*i]); sha256_step2_host(d,e,f,g,h,a,b,c,in,5, Kshared[21+16*i]); sha256_step2_host(c,d,e,f,g,h,a,b,in,6, Kshared[22+16*i]); sha256_step2_host(b,c,d,e,f,g,h,a,in,7, Kshared[23+16*i]); sha256_step2_host(a,b,c,d,e,f,g,h,in,8, Kshared[24+16*i]); sha256_step2_host(h,a,b,c,d,e,f,g,in,9, Kshared[25+16*i]); sha256_step2_host(g,h,a,b,c,d,e,f,in,10,Kshared[26+16*i]); sha256_step2_host(f,g,h,a,b,c,d,e,in,11,Kshared[27+16*i]); sha256_step2_host(e,f,g,h,a,b,c,d,in,12,Kshared[28+16*i]); sha256_step2_host(d,e,f,g,h,a,b,c,in,13,Kshared[29+16*i]); sha256_step2_host(c,d,e,f,g,h,a,b,in,14,Kshared[30+16*i]); sha256_step2_host(b,c,d,e,f,g,h,a,in,15,Kshared[31+16*i]); } state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; } #define xor3b(a,b,c) (a ^ b ^ c) __device__ __forceinline__ uint32_t bsg2_0(const uint32_t x) { return xor3b(ROTR32(x,2),ROTR32(x,13),ROTR32(x,22)); } __device__ __forceinline__ uint32_t bsg2_1(const uint32_t x) { return xor3b(ROTR32(x,6),ROTR32(x,11),ROTR32(x,25)); } __device__ __forceinline__ uint32_t ssg2_0(const uint32_t x) { return xor3b(ROTR32(x,7),ROTR32(x,18),(x>>3)); } __device__ __forceinline__ uint32_t ssg2_1(const uint32_t x) { return xor3b(ROTR32(x,17),ROTR32(x,19),(x>>10)); } __device__ __forceinline__ uint32_t ssg2_11(const uint32_t x) { return xor3b(ROTR32(x,17),ROTR32(x,19),shr_u32(x,10)); } __device__ __forceinline__ uint2 vectorizeswap(const uint64_t v) { uint2 result; asm volatile ("mov.b64 {%0,%1},%2;" : "=r"(result.y), "=r"(result.x) : "l"(v)); return result; } #define Maj(x, y, z) ((x & (y | z)) | (y & z)) #define Ch(a, b, c) (((b^c) & a) ^ c) __device__ __forceinline__ static void sha2_step(const uint32_t a,const uint32_t b,const uint32_t c, uint32_t &d,const uint32_t e,const uint32_t f,const uint32_t g, uint32_t &h,const uint32_t in, const uint32_t Kshared) { const uint32_t t1 = h + bsg2_1(e) + Ch(e, f, g) + Kshared + in; h = t1 + bsg2_0(a) + Maj(a, b, c); d+= t1; } __device__ __forceinline__ static void sha256_round_first(uint32_t *in,uint32_t* buf,const uint32_t *const __restrict__ state) { uint32_t a = buf[0] + in[11]; uint32_t b = buf[1]; uint32_t c = buf[2]; uint32_t d = buf[3]; uint32_t e = buf[4] + in[11]; uint32_t f = buf[5]; uint32_t g = buf[6]; uint32_t h = buf[7]; // 10 first steps made on host //sha2_step(f,g,h,a,b,c,d,e,in[11],c_K[11]); sha2_step(e,f,g,h,a,b,c,d,in[12],c_K[12]); sha2_step(d,e,f,g,h,a,b,c,in[13],c_K[13]); sha2_step(c,d,e,f,g,h,a,b,in[14],c_K[14]); sha2_step(b,c,d,e,f,g,h,a,in[15],c_K[15]); //in is partially precomputed on host in[2]+= in[11]; in[4]+= ssg2_1(in[2]); in[6]+= ssg2_1(in[4]); in[8]+= ssg2_1(in[6]); in[9]+= in[ 2]; sha2_step(a,b,c,d,e,f,g,h,in[0], c_K[16]); sha2_step(h,a,b,c,d,e,f,g,in[1], c_K[17]); sha2_step(g,h,a,b,c,d,e,f,in[2], c_K[18]); sha2_step(f,g,h,a,b,c,d,e,in[3], c_K[19]); sha2_step(e,f,g,h,a,b,c,d,in[4], c_K[20]); sha2_step(d,e,f,g,h,a,b,c,in[5], c_K[21]); sha2_step(c,d,e,f,g,h,a,b,in[6], c_K[22]); sha2_step(b,c,d,e,f,g,h,a,in[7], c_K[23]); sha2_step(a,b,c,d,e,f,g,h,in[8], c_K[24]); sha2_step(h,a,b,c,d,e,f,g,in[9], c_K[25]); #pragma unroll 6 for (uint32_t j = 10; j < 16; j++){ in[j] = in[j] + in[(j + 9) & 15] + ssg2_0(in[(j + 1) & 15]) + ssg2_1(in[(j + 14) & 15]); } sha2_step(g,h,a,b,c,d,e,f,in[10],c_K[26]); sha2_step(f,g,h,a,b,c,d,e,in[11],c_K[27]); sha2_step(e,f,g,h,a,b,c,d,in[12],c_K[28]); sha2_step(d,e,f,g,h,a,b,c,in[13],c_K[29]); sha2_step(c,d,e,f,g,h,a,b,in[14],c_K[30]); sha2_step(b,c,d,e,f,g,h,a,in[15],c_K[31]); #pragma unroll 16 for (uint32_t j = 0; j < 16; j++){ in[j] = in[j] + in[(j + 9) & 15] + ssg2_0(in[(j + 1) & 15]) + ssg2_1(in[(j + 14) & 15]); } sha2_step(a,b,c,d,e,f,g,h,in[0], c_K[16+16]); sha2_step(h,a,b,c,d,e,f,g,in[1], c_K[17+16]); sha2_step(g,h,a,b,c,d,e,f,in[2], c_K[18+16]); sha2_step(f,g,h,a,b,c,d,e,in[3], c_K[19+16]); sha2_step(e,f,g,h,a,b,c,d,in[4], c_K[20+16]); sha2_step(d,e,f,g,h,a,b,c,in[5], c_K[21+16]); sha2_step(c,d,e,f,g,h,a,b,in[6], c_K[22+16]); sha2_step(b,c,d,e,f,g,h,a,in[7], c_K[23+16]); sha2_step(a,b,c,d,e,f,g,h,in[8], c_K[24+16]); sha2_step(h,a,b,c,d,e,f,g,in[9], c_K[25+16]); sha2_step(g,h,a,b,c,d,e,f,in[10],c_K[26+16]); sha2_step(f,g,h,a,b,c,d,e,in[11],c_K[27+16]); sha2_step(e,f,g,h,a,b,c,d,in[12],c_K[28+16]); sha2_step(d,e,f,g,h,a,b,c,in[13],c_K[29+16]); sha2_step(c,d,e,f,g,h,a,b,in[14],c_K[30+16]); sha2_step(b,c,d,e,f,g,h,a,in[15],c_K[31+16]); #pragma unroll 16 for (uint32_t j = 0; j < 16; j++){ in[j] = in[j] + in[(j + 9) & 15] + ssg2_0(in[(j + 1) & 15]) + ssg2_1(in[(j + 14) & 15]); } sha2_step(a,b,c,d,e,f,g,h,in[0], c_K[16+16*2]); sha2_step(h,a,b,c,d,e,f,g,in[1], c_K[17+16*2]); sha2_step(g,h,a,b,c,d,e,f,in[2], c_K[18+16*2]); sha2_step(f,g,h,a,b,c,d,e,in[3], c_K[19+16*2]); sha2_step(e,f,g,h,a,b,c,d,in[4], c_K[20+16*2]); sha2_step(d,e,f,g,h,a,b,c,in[5], c_K[21+16*2]); sha2_step(c,d,e,f,g,h,a,b,in[6], c_K[22+16*2]); sha2_step(b,c,d,e,f,g,h,a,in[7], c_K[23+16*2]); sha2_step(a,b,c,d,e,f,g,h,in[8], c_K[24+16*2]); sha2_step(h,a,b,c,d,e,f,g,in[9], c_K[25+16*2]); sha2_step(g,h,a,b,c,d,e,f,in[10],c_K[26+16*2]); sha2_step(f,g,h,a,b,c,d,e,in[11],c_K[27+16*2]); sha2_step(e,f,g,h,a,b,c,d,in[12],c_K[28+16*2]); sha2_step(d,e,f,g,h,a,b,c,in[13],c_K[29+16*2]); sha2_step(c,d,e,f,g,h,a,b,in[14],c_K[30+16*2]); sha2_step(b,c,d,e,f,g,h,a,in[15],c_K[31+16*2]); buf[ 0] = state[0] + a; buf[ 1] = state[1] + b; buf[ 2] = state[2] + c; buf[ 3] = state[3] + d; buf[ 4] = state[4] + e; buf[ 5] = state[5] + f; buf[ 6] = state[6] + g; buf[ 7] = state[7] + h; } __device__ __forceinline__ static void sha256_round_body(uint32_t* in, uint32_t* state) { uint32_t a = state[0]; uint32_t b = state[1]; uint32_t c = state[2]; uint32_t d = state[3]; uint32_t e = state[4]; uint32_t f = state[5]; uint32_t g = state[6]; uint32_t h = state[7]; sha2_step(a,b,c,d,e,f,g,h,in[0], c_K[0]); sha2_step(h,a,b,c,d,e,f,g,in[1], c_K[1]); sha2_step(g,h,a,b,c,d,e,f,in[2], c_K[2]); sha2_step(f,g,h,a,b,c,d,e,in[3], c_K[3]); sha2_step(e,f,g,h,a,b,c,d,in[4], c_K[4]); sha2_step(d,e,f,g,h,a,b,c,in[5], c_K[5]); sha2_step(c,d,e,f,g,h,a,b,in[6], c_K[6]); sha2_step(b,c,d,e,f,g,h,a,in[7], c_K[7]); sha2_step(a,b,c,d,e,f,g,h,in[8], c_K[8]); sha2_step(h,a,b,c,d,e,f,g,in[9], c_K[9]); sha2_step(g,h,a,b,c,d,e,f,in[10],c_K[10]); sha2_step(f,g,h,a,b,c,d,e,in[11],c_K[11]); sha2_step(e,f,g,h,a,b,c,d,in[12],c_K[12]); sha2_step(d,e,f,g,h,a,b,c,in[13],c_K[13]); sha2_step(c,d,e,f,g,h,a,b,in[14],c_K[14]); sha2_step(b,c,d,e,f,g,h,a,in[15],c_K[15]); #pragma unroll 3 for (uint32_t i=0; i<3; i++) { #pragma unroll 16 for (uint32_t j = 0; j < 16; j++){ in[j]+= ssg2_11(in[(j + 14) & 15]) + in[(j + 9) & 15] + ssg2_0(in[(j + 1) & 15]); } sha2_step(a, b, c, d, e, f, g, h, in[0], c_K[16 + 16 * i]); sha2_step(h, a, b, c, d, e, f, g, in[1], c_K[17 + 16 * i]); sha2_step(g, h, a, b, c, d, e, f, in[2], c_K[18 + 16 * i]); sha2_step(f, g, h, a, b, c, d, e, in[3], c_K[19 + 16 * i]); sha2_step(e, f, g, h, a, b, c, d, in[4], c_K[20 + 16 * i]); sha2_step(d, e, f, g, h, a, b, c, in[5], c_K[21 + 16 * i]); sha2_step(c, d, e, f, g, h, a, b, in[6], c_K[22 + 16 * i]); sha2_step(b, c, d, e, f, g, h, a, in[7], c_K[23 + 16 * i]); sha2_step(a, b, c, d, e, f, g, h, in[8], c_K[24 + 16 * i]); sha2_step(h, a, b, c, d, e, f, g, in[9], c_K[25 + 16 * i]); sha2_step(g, h, a, b, c, d, e, f, in[10], c_K[26 + 16 * i]); sha2_step(f, g, h, a, b, c, d, e, in[11], c_K[27 + 16 * i]); sha2_step(e, f, g, h, a, b, c, d, in[12], c_K[28 + 16 * i]); sha2_step(d, e, f, g, h, a, b, c, in[13], c_K[29 + 16 * i]); sha2_step(c, d, e, f, g, h, a, b, in[14], c_K[30 + 16 * i]); sha2_step(b, c, d, e, f, g, h, a, in[15], c_K[31 + 16 * i]); } state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; } __device__ __forceinline__ static void sha256_round_body_final(uint32_t* in, uint32_t* state) { uint32_t a = state[0]; uint32_t b = state[1]; uint32_t c = state[2]; uint32_t d = state[3]; uint32_t e = state[4]; uint32_t f = state[5]; uint32_t g = state[6]; uint32_t h = state[7]; sha2_step(a,b,c,d,e,f,g,h,in[0], c_K[0]); sha2_step(h,a,b,c,d,e,f,g,in[1], c_K[1]); sha2_step(g,h,a,b,c,d,e,f,in[2], c_K[2]); sha2_step(f,g,h,a,b,c,d,e,in[3], c_K[3]); sha2_step(e,f,g,h,a,b,c,d,in[4], c_K[4]); sha2_step(d,e,f,g,h,a,b,c,in[5], c_K[5]); sha2_step(c,d,e,f,g,h,a,b,in[6], c_K[6]); sha2_step(b,c,d,e,f,g,h,a,in[7], c_K[7]); sha2_step(a,b,c,d,e,f,g,h,in[8], c_K[8]); sha2_step(h,a,b,c,d,e,f,g,in[9], c_K[9]); sha2_step(g,h,a,b,c,d,e,f,in[10],c_K[10]); sha2_step(f,g,h,a,b,c,d,e,in[11],c_K[11]); sha2_step(e,f,g,h,a,b,c,d,in[12],c_K[12]); sha2_step(d,e,f,g,h,a,b,c,in[13],c_K[13]); sha2_step(c,d,e,f,g,h,a,b,in[14],c_K[14]); sha2_step(b,c,d,e,f,g,h,a,in[15],c_K[15]); #pragma unroll 2 for (uint32_t i=0; i<2; i++) { #pragma unroll 16 for (uint32_t j = 0; j < 16; j++){ in[j] = in[j] + in[(j + 9) & 15] + ssg2_0(in[(j + 1) & 15]) + ssg2_1(in[(j + 14) & 15]); } sha2_step(a, b, c, d, e, f, g, h, in[0], c_K[16 + 16 * i]); sha2_step(h, a, b, c, d, e, f, g, in[1], c_K[17 + 16 * i]); sha2_step(g, h, a, b, c, d, e, f, in[2], c_K[18 + 16 * i]); sha2_step(f, g, h, a, b, c, d, e, in[3], c_K[19 + 16 * i]); sha2_step(e, f, g, h, a, b, c, d, in[4], c_K[20 + 16 * i]); sha2_step(d, e, f, g, h, a, b, c, in[5], c_K[21 + 16 * i]); sha2_step(c, d, e, f, g, h, a, b, in[6], c_K[22 + 16 * i]); sha2_step(b, c, d, e, f, g, h, a, in[7], c_K[23 + 16 * i]); sha2_step(a, b, c, d, e, f, g, h, in[8], c_K[24 + 16 * i]); sha2_step(h, a, b, c, d, e, f, g, in[9], c_K[25 + 16 * i]); sha2_step(g, h, a, b, c, d, e, f, in[10], c_K[26 + 16 * i]); sha2_step(f, g, h, a, b, c, d, e, in[11], c_K[27 + 16 * i]); sha2_step(e, f, g, h, a, b, c, d, in[12], c_K[28 + 16 * i]); sha2_step(d, e, f, g, h, a, b, c, in[13], c_K[29 + 16 * i]); sha2_step(c, d, e, f, g, h, a, b, in[14], c_K[30 + 16 * i]); sha2_step(b, c, d, e, f, g, h, a, in[15], c_K[31 + 16 * i]); } #pragma unroll 14 for (uint32_t j = 0; j < 14; j++){ in[j] = in[j] + in[(j + 9) & 15] + ssg2_0(in[(j + 1) & 15]) + ssg2_1(in[(j + 14) & 15]); } sha2_step(a, b, c, d, e, f, g, h, in[0], c_K[16 + 16 * 2]); sha2_step(h, a, b, c, d, e, f, g, in[1], c_K[17 + 16 * 2]); sha2_step(g, h, a, b, c, d, e, f, in[2], c_K[18 + 16 * 2]); sha2_step(f, g, h, a, b, c, d, e, in[3], c_K[19 + 16 * 2]); sha2_step(e, f, g, h, a, b, c, d, in[4], c_K[20 + 16 * 2]); sha2_step(d, e, f, g, h, a, b, c, in[5], c_K[21 + 16 * 2]); sha2_step(c, d, e, f, g, h, a, b, in[6], c_K[22 + 16 * 2]); sha2_step(b, c, d, e, f, g, h, a, in[7], c_K[23 + 16 * 2]); sha2_step(a, b, c, d, e, f, g, h, in[8], c_K[24 + 16 * 2]); sha2_step(h, a, b, c, d, e, f, g, in[9], c_K[25 + 16 * 2]); sha2_step(g, h, a, b, c, d, e, f, in[10], c_K[26 + 16 * 2]); sha2_step(f, g, h, a, b, c, d, e, in[11], c_K[27 + 16 * 2]); sha2_step(e, f, g, h, a, b, c, d, in[12], c_K[28 + 16 * 2]); sha2_step(d, e, f, g, h, a, b, c, in[13], c_K[29 + 16 * 2]); state[6] = cuda_swab32(state[6] + g); state[7] = cuda_swab32(state[7] + h); } __global__ __launch_bounds__(768,2) /* to force 32 regs */ void lbry_sha256d_gpu_hash_112(const uint32_t threads, const uint32_t startNonce, uint64_t *outputHash) { const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); uint32_t buf[8], state[8]; if (thread < threads) { uint32_t dat[16]; #pragma unroll 11 for (uint32_t i=0; i<11; i++) dat[i] = c_dataEnd112[i]; dat[11] = startNonce + thread; dat[12] = 0x80000000; dat[13] = 0; dat[14] = 0; dat[15] = 0x380; *(uint2x4*)&state[0] = *(uint2x4*)&c_midstate112[0]; *(uint2x4*)&buf[0] = *(uint2x4*)&c_midbuffer112[0]; sha256_round_first(dat, buf, state); // no shared mem here // second sha256 *(uint2x4*)&dat[0] = *(uint2x4*)&buf[0]; dat[8] = 0x80000000; #pragma unroll 6 for (uint32_t i=9; i<15; i++) dat[i] = 0; dat[15] = 0x100; *(uint2x4*)&buf[0] = *(uint2x4*)&c_H256[0]; sha256_round_body(dat, buf); //no shared mem at all // output *(uint2*)&buf[0] = vectorizeswap(((uint64_t*)buf)[0]); *(uint2*)&buf[2] = vectorizeswap(((uint64_t*)buf)[1]); *(uint2*)&buf[4] = vectorizeswap(((uint64_t*)buf)[2]); *(uint2*)&buf[6] = vectorizeswap(((uint64_t*)buf)[3]); *(uint2x4*)&outputHash[thread<<3] = *(uint2x4*)&buf[0]; } } __host__ void lbry_sha256d_hash_112(int thr_id, uint32_t threads, uint32_t startNonce, uint32_t *d_outputHash){ const int threadsperblock = 768; dim3 grid((threads + threadsperblock - 1) / threadsperblock); dim3 block(threadsperblock); lbry_sha256d_gpu_hash_112 <<<grid, block>>> (threads, startNonce, (uint64_t*) d_outputHash); } __host__ void lbry_sha256_init(int thr_id) { cudaMemcpyToSymbol(c_K, cpu_K, sizeof(cpu_K), 0, cudaMemcpyHostToDevice); } __host__ void lbry_sha256_setBlock_112(uint32_t *pdata){ uint32_t in[16], buf[8], end[16]; for (int i=0;i<16;i++) in[i] = cuda_swab32(pdata[i]); for (int i=0; i<8;i++) buf[i] = cpu_H256[i]; for (int i=0;i<11;i++) end[i] = cuda_swab32(pdata[16+i]); sha256_round_body_host(in, buf, cpu_K); cudaMemcpyToSymbol(c_midstate112, buf, 32, 0, cudaMemcpyHostToDevice); uint32_t a = buf[0]; uint32_t b = buf[1]; uint32_t c = buf[2]; uint32_t d = buf[3]; uint32_t e = buf[4]; uint32_t f = buf[5]; uint32_t g = buf[6]; uint32_t h = buf[7]; sha256_step1_host(a,b,c,d,e,f,g,h,end[0], cpu_K[0]); sha256_step1_host(h,a,b,c,d,e,f,g,end[1], cpu_K[1]); sha256_step1_host(g,h,a,b,c,d,e,f,end[2], cpu_K[2]); sha256_step1_host(f,g,h,a,b,c,d,e,end[3], cpu_K[3]); sha256_step1_host(e,f,g,h,a,b,c,d,end[4], cpu_K[4]); sha256_step1_host(d,e,f,g,h,a,b,c,end[5], cpu_K[5]); sha256_step1_host(c,d,e,f,g,h,a,b,end[6], cpu_K[6]); sha256_step1_host(b,c,d,e,f,g,h,a,end[7], cpu_K[7]); sha256_step1_host(a,b,c,d,e,f,g,h,end[8], cpu_K[8]); sha256_step1_host(h,a,b,c,d,e,f,g,end[9], cpu_K[9]); sha256_step1_host(g,h,a,b,c,d,e,f,end[10],cpu_K[10]); sha256_step1_host(f, g, h, a, b, c, d, e, 0, cpu_K[11]); buf[0] = a; buf[1] = b; buf[2] = c; buf[3] = d; buf[4] = e; buf[5] = f; buf[6] = g; buf[7] = h; cudaMemcpyToSymbol(c_midbuffer112, buf, 32, 0, cudaMemcpyHostToDevice); end[12] = 0x80000000; end[13] = 0; end[14] = 0; end[15] = 0x380; uint32_t x2_0,x2_1; x2_0 = ROTR32(end[1], 7) ^ ROTR32(end[1], 18) ^ SPH_T32(end[1] >> 3); //ssg2_0(end[1]); // x2_1 = ROTR32(end[14], 17) ^ ROTR32(end[14], 19) ^ SPH_T32(end[14] >> 10) + x2_0; //ssg2_1(end[14]) + x2_0; end[0] = end[0] + end[9] + x2_0; x2_0 = ROTR32(end[2], 7) ^ ROTR32(end[2], 18) ^ SPH_T32(end[2] >> 3); x2_1 = (ROTR32(end[15], 17) ^ ROTR32(end[15], 19) ^ SPH_T32(end[15] >> 10)) + x2_0; end[1] = end[1] + end[10] + x2_1; x2_0 = ROTR32(end[3], 7) ^ ROTR32(end[3], 18) ^ SPH_T32(end[3] >> 3);//ssg2_0(end[3]); x2_1 = (ROTR32(end[0], 17) ^ ROTR32(end[0], 19) ^ SPH_T32(end[0] >> 10)) + x2_0; end[2]+= x2_1; x2_0 = ROTR32(end[4], 7) ^ ROTR32(end[4], 18) ^ SPH_T32(end[4] >> 3);//ssg2_0(end[4]); x2_1 = (ROTR32(end[1], 17) ^ ROTR32(end[1], 19) ^ SPH_T32(end[1] >> 10)) + x2_0; end[3] = end[3] + end[12] + x2_1; x2_0 = ROTR32(end[5], 7) ^ ROTR32(end[5], 18) ^ SPH_T32(end[5] >> 3);//ssg2_0(end[4]); end[4] = end[4] + end[13] + x2_0; x2_0 = ROTR32(end[6], 7) ^ ROTR32(end[6], 18) ^ SPH_T32(end[6] >> 3);//ssg2_0(end[6]); x2_1 = (ROTR32(end[3], 17) ^ ROTR32(end[3], 19) ^ SPH_T32(end[3] >> 10)) + x2_0; end[5] = end[5] + end[14] + x2_1; x2_0 = ROTR32(end[7], 7) ^ ROTR32(end[7], 18) ^ SPH_T32(end[7] >> 3);//ssg2_0(end[7]); end[6] = end[6] + end[15] + x2_0; x2_0 = ROTR32(end[8], 7) ^ ROTR32(end[8], 18) ^ SPH_T32(end[8] >> 3);//ssg2_0(end[8]); x2_1 = (ROTR32(end[5], 17) ^ ROTR32(end[5], 19) ^ SPH_T32(end[5] >> 10)) + x2_0; end[7] = end[7] + end[0] + x2_1; x2_0 = ROTR32(end[9], 7) ^ ROTR32(end[9], 18) ^ SPH_T32(end[9] >> 3);//ssg2_0(end[9]); end[8] = end[8] + end[1] + x2_0; x2_0 = ROTR32(end[10], 7) ^ ROTR32(end[10], 18) ^ SPH_T32(end[10] >> 3);//ssg2_0(end[10]); x2_1 = (ROTR32(end[7], 17) ^ ROTR32(end[7], 19) ^ SPH_T32(end[7] >> 10)) + x2_0; end[9] = end[9] + x2_1; cudaMemcpyToSymbol(c_dataEnd112, end, 12*sizeof(uint32_t), 0, cudaMemcpyHostToDevice); } // RIPEMD MACROS----------------------------------------------------------------------------- /* * Round constants for RIPEMD-160. */ static __constant__ const uint32_t c_IV[5] = {0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0}; static __constant__ const uint32_t KL[5] = {0x00000000, 0x5A827999, 0x6ED9EBA1, 0x8F1BBCDC, 0xA953FD4E}; static __constant__ const uint32_t KR[5] = {0x50A28BE6, 0x5C4DD124, 0x6D703EF3, 0x7A6D76E9, 0x00000000}; /* Left line */ static __constant__ const uint32_t RL[5][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, /* Round 1: id */ { 7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8 }, /* Round 2: rho */ { 3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12 }, /* Round 3: rho^2 */ { 1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2 }, /* Round 4: rho^3 */ { 4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13 } /* Round 5: rho^4 */ }; /* Right line */ static __constant__ const uint32_t RR[5][16] = { { 5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12 }, /* Round 1: pi */ { 6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2 }, /* Round 2: rho pi */ { 15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13 }, /* Round 3: rho^2 pi */ { 8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14 }, /* Round 4: rho^3 pi */ { 12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11 } /* Round 5: rho^4 pi */ }; /* Shifts, left line */ static __constant__ const uint32_t SL[5][16] = { { 11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8 }, /* Round 1 */ { 7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12 }, /* Round 2 */ { 11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5 }, /* Round 3 */ { 11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12 }, /* Round 4 */ { 9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6 } /* Round 5 */ }; /* Shifts, right line */ static __constant__ const uint32_t SR[5][16] = { { 8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6 }, /* Round 1 */ { 9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11 }, /* Round 2 */ { 9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5 }, /* Round 3 */ { 15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8 }, /* Round 4 */ { 8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11 } /* Round 5 */ }; __device__ __forceinline__ static uint32_t ROTATE(const uint32_t x,const uint32_t r){ if(r==8) return __byte_perm(x, 0, 0x2103); else return ROTL32(x,r); } /* * Round functions for RIPEMD-160. */ //#define F1(x, y, z) xor3x(x, y, z) __device__ __forceinline__ uint32_t F1(const uint32_t a,const uint32_t b,const uint32_t c){ uint32_t result; #if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050 asm ("lop3.b32 %0, %1, %2, %3, 0x96;" : "=r"(result) : "r"(a), "r"(b),"r"(c)); #else result = a^b^c; #endif return result; } //#define F2(x, y, z) ((x & (y ^ z)) ^ z) __device__ __forceinline__ uint32_t F2(const uint32_t a,const uint32_t b,const uint32_t c){ uint32_t result; #if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050 asm ("lop3.b32 %0, %1, %2, %3, 0xCA;" : "=r"(result) : "r"(a), "r"(b),"r"(c)); //0xCA=((F0∧(CC⊻AA))⊻AA) #else result = ((a & (b ^ c)) ^ c); #endif return result; } //#define F3(x, y, z) ((x | ~y) ^ z) __device__ __forceinline__ uint32_t F3(const uint32_t x,const uint32_t y,const uint32_t z){ uint32_t result; #if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050 asm ("lop3.b32 %0, %1, %2, %3, 0x59;" : "=r"(result) : "r"(x), "r"(y),"r"(z)); //0x59=((F0∨(¬CC))⊻AA) #else result = ((x | ~y) ^ z); #endif return result; } //#define F4(x, y, z) (y ^ ((x ^ y) & z)) __device__ __forceinline__ uint32_t F4(const uint32_t x,const uint32_t y,const uint32_t z){ uint32_t result; #if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050 asm ("lop3.b32 %0, %1, %2, %3, 0xE4;" : "=r"(result) : "r"(x), "r"(y),"r"(z)); //0xE4=(CC⊻((F0⊻CC)∧AA)) #else result = (y ^ ((x ^ y) & z)); #endif return result; } //#define F5(x, y, z) (x ^ (y | ~z)) __device__ __forceinline__ uint32_t F5(const uint32_t x,const uint32_t y,const uint32_t z){ uint32_t result; #if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050 asm ("lop3.b32 %0, %1, %2, %3, 0x2D;" : "=r"(result) : "r"(x), "r"(y),"r"(z)); //0x2D=(F0⊻(CC∨(¬AA))) #else result = (x ^ (y | ~z)); #endif return result; } __device__ __forceinline__ static void RIPEMD160_ROUND_BODY(const uint32_t *in, uint32_t *h){ uint32_t T; uint32_t AL, BL, CL, DL, EL; /* left line */ uint32_t AR, BR, CR, DR, ER; /* right line */ AL = AR = h[0]; BL = BR = h[1]; CL = CR = h[2]; DL = DR = h[3]; EL = ER = h[4]; /* Round 1 */ #pragma unroll 16 for (uint32_t w = 0; w < 16; w++) { T = ROTATE(AL + F1(BL, CL, DL) + in[RL[0][w]] + KL[0], SL[0][w]) + EL; AL = EL; EL = DL; DL = ROTL32(CL,10); CL = BL; BL = T; T = ROTATE(AR + F5(BR, CR, DR) + in[RR[0][w]] + KR[0], SR[0][w]) + ER; AR = ER; ER = DR; DR = ROTL32(CR,10); CR = BR; BR = T; } /* Round 2 */ #pragma unroll 16 for (uint32_t w = 0; w < 16; w++) { T = ROTATE(AL + F2(BL, CL, DL) + in[RL[1][w]] + KL[1], SL[1][w]) + EL; AL = EL; EL = DL; DL = ROTL32(CL,10); CL = BL; BL = T; T = ROTATE(AR + F4(BR, CR, DR) + in[RR[1][w]] + KR[1], SR[1][w]) + ER; AR = ER; ER = DR; DR = ROTL32(CR,10); CR = BR; BR = T; } /* Round 3 */ #pragma unroll 16 for (uint32_t w = 0; w < 16; w++) { T = ROTATE(AL + F3(BL, CL, DL) + in[RL[2][w]] + KL[2], SL[2][w]) + EL; AL = EL; EL = DL; DL = ROTL32(CL,10); CL = BL; BL = T; T = ROTATE(AR + F3(BR, CR, DR) + in[RR[2][w]] + KR[2], SR[2][w]) + ER; AR = ER; ER = DR; DR = ROTL32(CR,10); CR = BR; BR = T; } /* Round 4 */ #pragma unroll 16 for (uint32_t w = 0; w < 16; w++) { T = ROTATE(AL + F4(BL, CL, DL) + in[RL[3][w]] + KL[3], SL[3][w]) + EL; AL = EL; EL = DL; DL = ROTL32(CL,10); CL = BL; BL = T; T = ROTATE(AR + F2(BR, CR, DR) + in[RR[3][w]] + KR[3], SR[3][w]) + ER; AR = ER; ER = DR; DR = ROTL32(CR,10); CR = BR; BR = T; } /* Round 5 */ #pragma unroll 16 for (uint32_t w = 0; w < 16; w++) { T = ROTATE(AL + F5(BL, CL, DL) + in[RL[4][w]] + KL[4], SL[4][w]) + EL; AL = EL; EL = DL; DL = ROTL32(CL,10); CL = BL; BL = T; T = ROTATE(AR + F1(BR, CR, DR) + in[RR[4][w]] + KR[4], SR[4][w]) + ER; AR = ER; ER = DR; DR = ROTL32(CR,10); CR = BR; BR = T; } T = h[1] + CL + DR; h[1] = h[2] + DL + ER; h[2] = h[3] + EL + AR; h[3] = h[4] + AL + BR; h[4] = h[0] + BL + CR; h[0] = T; } // END OF RIPEMD MACROS---------------------------------------------------------------------- __global__ __launch_bounds__(1024,2) /* to force 32 regs */ void lbry_ripemd(const uint32_t threads, uint32_t *const __restrict__ Hash512){ const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); uint32_t dat[16]; uint32_t h[5]; uint32_t buf[8]; // align for vectorize if(thread<threads){ const uint32_t* input = &Hash512[thread<<4]; *(uint2x4*)&dat[0] = __ldg4((uint2x4*)&input[0]); dat[8] = 0x80; #pragma unroll 7 for (int i=9;i<16;i++) dat[i] = 0; dat[14] = 0x100; // size in bits #pragma unroll 5 for (int i=0; i<5; i++) h[i] = c_IV[i]; RIPEMD160_ROUND_BODY(dat, h); #pragma unroll 5 for (int i=0; i<5; i++) buf[i] = h[i]; // second 32 bytes block hash *(uint2x4*)&dat[0] = __ldg4((uint2x4*)&input[8]); dat[8] = 0x80; #pragma unroll 7 for (int i=9;i<16;i++) dat[i] = 0; dat[14] = 0x100; // size in bits #pragma unroll 5 for (int i=0; i<5; i++) h[i] = c_IV[i]; RIPEMD160_ROUND_BODY(dat, h); #pragma unroll 5 for (int i=0;i<5;i++) dat[i] = cuda_swab32(buf[i]); #pragma unroll 5 for (int i=0;i<5;i++) dat[i+5] = cuda_swab32(h[i]); *(uint2x4*)&Hash512[(thread<<4)] = *(uint2x4*)&dat[ 0]; *(uint2*)&Hash512[(thread<<4)+8] = *(uint2*)&dat[ 8]; } } __global__ __launch_bounds__(768,2) /* to force 32 regs */ void lbry_sha256d_gpu_hash_final(const uint32_t threads,const uint32_t* __restrict__ Hash512, uint32_t *resNonces,const uint64_t target64) { const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); uint32_t dat[16]; uint32_t buf[8]; // align for vectorize if (thread < threads) { // first final sha256 const uint32_t* input = &Hash512[thread<<4]; *(uint2x4*)&dat[0] = __ldg4((uint2x4*)&input[0]); *(uint2*)&dat[8] = __ldg((uint2*)&input[8]); dat[10] = 0x80000000; #pragma unroll 4 for (int i=11; i<15; i++) dat[i] = 0; dat[15] = 0x140; *(uint2x4*)&buf[0] = *(uint2x4*)&c_H256[0]; sha256_round_body(dat, buf); // second sha256 *(uint2x4*)&dat[0] = *(uint2x4*)&buf[0]; *(uint2x4*)&buf[0] = *(uint2x4*)&c_H256[0]; dat[8] = 0x80000000; #pragma unroll 6 for (int i=9; i<15; i++) dat[i] = 0; dat[15] = 0x100; sha256_round_body_final(dat, buf); // valid nonces if (*(uint64_t*)&buf[ 6] <= target64){ uint32_t tmp = atomicExch(&resNonces[0], thread); if (tmp != UINT32_MAX) resNonces[1] = tmp; } } } __host__ void lbry_sha256d_hash_final(int thr_id, uint32_t threads, uint32_t *d_inputHash, uint32_t *d_resNonce, const uint64_t target64) { int threadsperblock; threadsperblock = 1024; dim3 grid1((threads + threadsperblock - 1) / threadsperblock); dim3 block1(threadsperblock); threadsperblock = 768; dim3 grid2((threads + threadsperblock - 1) / threadsperblock); dim3 block2(threadsperblock); lbry_ripemd <<<grid1, block1>>> (threads,d_inputHash); lbry_sha256d_gpu_hash_final <<<grid2, block2>>> (threads, d_inputHash, d_resNonce, target64); }
the_stack
#include "cuda-align.cuh" #include "../../../include/librealsense2/rsutil.h" #include "../../cuda/rscuda_utils.cuh" // CUDA headers #include <cuda_runtime.h> #ifdef _MSC_VER // Add library dependencies if using VS #pragma comment(lib, "cudart_static") #endif #define RS2_CUDA_THREADS_PER_BLOCK 32 using namespace librealsense; using namespace rscuda; template<int N> struct bytes { unsigned char b[N]; }; int calc_block_size(int pixel_count, int thread_count) { return ((pixel_count % thread_count) == 0) ? (pixel_count / thread_count) : (pixel_count / thread_count + 1); } __device__ void kernel_transfer_pixels(int2* mapped_pixels, const rs2_intrinsics* depth_intrin, const rs2_intrinsics* other_intrin, const rs2_extrinsics* depth_to_other, float depth_val, int depth_x, int depth_y, int block_index) { float shift = block_index ? 0.5 : -0.5; auto depth_size = depth_intrin->width * depth_intrin->height; auto mapped_index = block_index * depth_size + (depth_y * depth_intrin->width + depth_x); if (mapped_index >= depth_size * 2) return; // Skip over depth pixels with the value of zero, we have no depth data so we will not write anything into our aligned images if (depth_val == 0) { mapped_pixels[mapped_index] = { -1, -1 }; return; } //// Map the top-left corner of the depth pixel onto the other image float depth_pixel[2] = { depth_x + shift, depth_y + shift }, depth_point[3], other_point[3], other_pixel[2]; rscuda::rs2_deproject_pixel_to_point(depth_point, depth_intrin, depth_pixel, depth_val); rscuda::rs2_transform_point_to_point(other_point, depth_to_other, depth_point); rscuda::rs2_project_point_to_pixel(other_pixel, other_intrin, other_point); mapped_pixels[mapped_index].x = static_cast<int>(other_pixel[0] + 0.5f); mapped_pixels[mapped_index].y = static_cast<int>(other_pixel[1] + 0.5f); } __global__ void kernel_map_depth_to_other(int2* mapped_pixels, const uint16_t* depth_in, const rs2_intrinsics* depth_intrin, const rs2_intrinsics* other_intrin, const rs2_extrinsics* depth_to_other, float depth_scale) { int depth_x = blockIdx.x * blockDim.x + threadIdx.x; int depth_y = blockIdx.y * blockDim.y + threadIdx.y; int depth_pixel_index = depth_y * depth_intrin->width + depth_x; if (depth_pixel_index >= depth_intrin->width * depth_intrin->height) return; float depth_val = depth_in[depth_pixel_index] * depth_scale; kernel_transfer_pixels(mapped_pixels, depth_intrin, other_intrin, depth_to_other, depth_val, depth_x, depth_y, blockIdx.z); } template<int BPP> __global__ void kernel_other_to_depth(unsigned char* aligned, const unsigned char* other, const int2* mapped_pixels, const rs2_intrinsics* depth_intrin, const rs2_intrinsics* other_intrin) { int depth_x = blockIdx.x * blockDim.x + threadIdx.x; int depth_y = blockIdx.y * blockDim.y + threadIdx.y; auto depth_size = depth_intrin->width * depth_intrin->height; int depth_pixel_index = depth_y * depth_intrin->width + depth_x; if (depth_pixel_index >= depth_intrin->width * depth_intrin->height) return; int2 p0 = mapped_pixels[depth_pixel_index]; int2 p1 = mapped_pixels[depth_size + depth_pixel_index]; if (p0.x < 0 || p0.y < 0 || p1.x >= other_intrin->width || p1.y >= other_intrin->height) return; // Transfer between the depth pixels and the pixels inside the rectangle on the other image auto in_other = (const bytes<BPP> *)(other); auto out_other = (bytes<BPP> *)(aligned); for (int y = p0.y; y <= p1.y; ++y) { for (int x = p0.x; x <= p1.x; ++x) { auto other_pixel_index = y * other_intrin->width + x; out_other[depth_pixel_index] = in_other[other_pixel_index]; } } } __global__ void kernel_depth_to_other(uint16_t* aligned_out, const uint16_t* depth_in, const int2* mapped_pixels, const rs2_intrinsics* depth_intrin, const rs2_intrinsics* other_intrin) { int depth_x = blockIdx.x * blockDim.x + threadIdx.x; int depth_y = blockIdx.y * blockDim.y + threadIdx.y; auto depth_size = depth_intrin->width * depth_intrin->height; int depth_pixel_index = depth_y * depth_intrin->width + depth_x; if (depth_pixel_index >= depth_intrin->width * depth_intrin->height) return; int2 p0 = mapped_pixels[depth_pixel_index]; int2 p1 = mapped_pixels[depth_size + depth_pixel_index]; if (p0.x < 0 || p0.y < 0 || p1.x >= other_intrin->width || p1.y >= other_intrin->height) return; // Transfer between the depth pixels and the pixels inside the rectangle on the other image unsigned int new_val = depth_in[depth_pixel_index]; unsigned int* arr = (unsigned int*)aligned_out; for (int y = p0.y; y <= p1.y; ++y) { for (int x = p0.x; x <= p1.x; ++x) { auto other_pixel_index = y * other_intrin->width + x; new_val = new_val << 16 | new_val; atomicMin(&arr[other_pixel_index / 2], new_val); } } } __global__ void kernel_replace_to_zero(uint16_t* aligned_out, const rs2_intrinsics* other_intrin) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; auto other_pixel_index = y * other_intrin->width + x; if (aligned_out[other_pixel_index] == 0xffff) aligned_out[other_pixel_index] = 0; } void align_cuda_helper::align_other_to_depth(unsigned char* h_aligned_out, const uint16_t* h_depth_in, float depth_scale, const rs2_intrinsics& h_depth_intrin, const rs2_extrinsics& h_depth_to_other, const rs2_intrinsics& h_other_intrin, const unsigned char* h_other_in, rs2_format other_format, int other_bytes_per_pixel) { int depth_pixel_count = h_depth_intrin.width * h_depth_intrin.height; int other_pixel_count = h_other_intrin.width * h_other_intrin.height; int depth_size = depth_pixel_count * 2; int other_size = other_pixel_count * other_bytes_per_pixel; int aligned_pixel_count = depth_pixel_count; int aligned_size = aligned_pixel_count * other_bytes_per_pixel; // allocate and copy objects to cuda device memory if (!_d_depth_intrinsics) _d_depth_intrinsics = make_device_copy(h_depth_intrin); if (!_d_other_intrinsics) _d_other_intrinsics = make_device_copy(h_other_intrin); if (!_d_depth_other_extrinsics) _d_depth_other_extrinsics = make_device_copy(h_depth_to_other); if (!_d_depth_in) _d_depth_in = alloc_dev<uint16_t>(aligned_pixel_count); cudaMemcpy(_d_depth_in.get(), h_depth_in, depth_size, cudaMemcpyHostToDevice); if (!_d_other_in) _d_other_in = alloc_dev<unsigned char>(other_size); cudaMemcpy(_d_other_in.get(), h_other_in, other_size, cudaMemcpyHostToDevice); if (!_d_aligned_out) _d_aligned_out = alloc_dev<unsigned char>(aligned_size); cudaMemset(_d_aligned_out.get(), 0, aligned_size); if (!_d_pixel_map) _d_pixel_map = alloc_dev<int2>(depth_pixel_count * 2); // config threads dim3 threads(RS2_CUDA_THREADS_PER_BLOCK, RS2_CUDA_THREADS_PER_BLOCK); dim3 depth_blocks(calc_block_size(h_depth_intrin.width, threads.x), calc_block_size(h_depth_intrin.height, threads.y)); dim3 mapping_blocks(depth_blocks.x, depth_blocks.y, 2); kernel_map_depth_to_other <<<mapping_blocks,threads>>> (_d_pixel_map.get(), _d_depth_in.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get(), _d_depth_other_extrinsics.get(), depth_scale); switch (other_bytes_per_pixel) { case 1: kernel_other_to_depth<1> <<<depth_blocks,threads>>> (_d_aligned_out.get(), _d_other_in.get(), _d_pixel_map.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get()); break; case 2: kernel_other_to_depth<2> <<<depth_blocks,threads>>> (_d_aligned_out.get(), _d_other_in.get(), _d_pixel_map.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get()); break; case 3: kernel_other_to_depth<3> <<<depth_blocks,threads>>> (_d_aligned_out.get(), _d_other_in.get(), _d_pixel_map.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get()); break; case 4: kernel_other_to_depth<4> <<<depth_blocks,threads>>> (_d_aligned_out.get(), _d_other_in.get(), _d_pixel_map.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get()); break; } cudaDeviceSynchronize(); cudaMemcpy(h_aligned_out, _d_aligned_out.get(), aligned_size, cudaMemcpyDeviceToHost); } void align_cuda_helper::align_depth_to_other(unsigned char* h_aligned_out, const uint16_t* h_depth_in, float depth_scale, const rs2_intrinsics& h_depth_intrin, const rs2_extrinsics& h_depth_to_other, const rs2_intrinsics& h_other_intrin) { int depth_pixel_count = h_depth_intrin.width * h_depth_intrin.height; int other_pixel_count = h_other_intrin.width * h_other_intrin.height; int aligned_pixel_count = other_pixel_count; int depth_byte_size = depth_pixel_count * 2; int aligned_byte_size = aligned_pixel_count * 2; // allocate and copy objects to cuda device memory if (!_d_depth_intrinsics) _d_depth_intrinsics = make_device_copy(h_depth_intrin); if (!_d_other_intrinsics) _d_other_intrinsics = make_device_copy(h_other_intrin); if (!_d_depth_other_extrinsics) _d_depth_other_extrinsics = make_device_copy(h_depth_to_other); if (!_d_depth_in) _d_depth_in = alloc_dev<uint16_t>(depth_pixel_count); cudaMemcpy(_d_depth_in.get(), h_depth_in, depth_byte_size, cudaMemcpyHostToDevice); if (!_d_aligned_out) _d_aligned_out = alloc_dev<unsigned char>(aligned_byte_size); cudaMemset(_d_aligned_out.get(), 0xff, aligned_byte_size); if (!_d_pixel_map) _d_pixel_map = alloc_dev<int2>(depth_pixel_count * 2); // config threads dim3 threads(RS2_CUDA_THREADS_PER_BLOCK, RS2_CUDA_THREADS_PER_BLOCK); dim3 depth_blocks(calc_block_size(h_depth_intrin.width, threads.x), calc_block_size(h_depth_intrin.height, threads.y)); dim3 other_blocks(calc_block_size(h_other_intrin.width, threads.x), calc_block_size(h_other_intrin.height, threads.y)); dim3 mapping_blocks(depth_blocks.x, depth_blocks.y, 2); kernel_map_depth_to_other <<<mapping_blocks,threads>>> (_d_pixel_map.get(), _d_depth_in.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get(), _d_depth_other_extrinsics.get(), depth_scale); kernel_depth_to_other <<<depth_blocks,threads>>> ((uint16_t*)_d_aligned_out.get(), _d_depth_in.get(), _d_pixel_map.get(), _d_depth_intrinsics.get(), _d_other_intrinsics.get()); kernel_replace_to_zero <<<other_blocks, threads>>> ((uint16_t*)_d_aligned_out.get(), _d_other_intrinsics.get()); cudaDeviceSynchronize(); cudaMemcpy(h_aligned_out, _d_aligned_out.get(), aligned_pixel_count * 2, cudaMemcpyDeviceToHost); } #endif //RS2_USE_CUDA
the_stack
* Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file group_norm.cu * \author Yuntao Chen */ #include <vector> #include <algorithm> #include "../mxnet_op.h" #include "./group_norm-inl.h" #include "./group_norm_helper.h" namespace mshadow { namespace cuda { template <typename DType> __device__ inline DType Cube(const DType x) { return x * x * x; } template <typename DType> __global__ void GroupNormForwardCUDAKernel( const int size, const int G, const int D, const int HxW, const DType* X, const DType* mu, const DType* rsig, const DType* gamma, const DType* beta, DType* Y) { const int C = G * D; CUDA_1D_KERNEL_LOOP(i, size) { const int i_mu = i / (D * HxW); const int i_gamma = (i / HxW) % C; Y[i] = __ldg(gamma + i_gamma) * (__ldg(X + i) - __ldg(mu + i_mu)) * __ldg(rsig + i_mu) + __ldg(beta + i_gamma); } } template <typename DType> __global__ void ComputeInternalGradientsCUDAKernel( const int N, const int G, const int D, const int HxW, const DType* dY, const DType* X, const DType* gamma, DType* ds, DType* db) { const int outer_size = N * G; const int inner_size = D * HxW; __shared__ typename BlockReduce<DType>::TempStorage ds_storage; __shared__ typename BlockReduce<DType>::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { DType ds_val = 0; DType db_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int i_gamma = i % G * D + j / HxW; const int index = i * inner_size + j; ds_val += __ldg(gamma + i_gamma) * __ldg(dY + index) * __ldg(X + index); db_val += __ldg(gamma + i_gamma) * __ldg(dY + index); } ds_val = BlockReduce<DType>(ds_storage).Reduce(ds_val, cub::Sum()); db_val = BlockReduce<DType>(db_storage).Reduce(db_val, cub::Sum()); if (threadIdx.x == 0) { ds[i] = ds_val; db[i] = db_val; } __syncthreads(); } } // Math: // Y = gamma * (X - mu) * rsig + beta // let s = gamma * rsig // let b = beta - mu * rsig // Y = s * X + b // let n = D * HxW // dL/dX = dL/dY * dY/dX = dL/dY * (d(s * X)/dX + db/dX) // d(s * X)/dX = s + X * ds/dX = s + gamma * X * drsig/dX // db/dX = -u * drsig/dX - rsig * dmu/dX // drsig/dX = -rsig^3 * (X - mu) / n // dmu/dX = 1 / n template <typename DType> __global__ void GroupNormBackwardCUDAKernel( const int size, const int G, const int D, const int HxW, const DType* dY, const DType* X, const DType* mu, const DType* rsig, const DType* gamma, const DType* ds, const DType* db, DType* dX) { const int C = G * D; const DType denom = DType(1) / static_cast<DType>(D * HxW); CUDA_1D_KERNEL_LOOP(i, size) { const int i_mu = i / (D * HxW); const int i_gamma = (i / HxW) % C; const DType u = (__ldg(db + i_mu) * __ldg(mu + i_mu) - __ldg(ds + i_mu)) * (__ldg(X + i) - __ldg(mu + i_mu)) * Cube<DType>(__ldg(rsig + i_mu)); const DType v = __ldg(db + i_mu) * __ldg(rsig + i_mu); dX[i] = __ldg(gamma + i_gamma) * __ldg(dY + i) * __ldg(rsig + i_mu) + (u - v) * denom; } } template <typename DType> __global__ void GammaBetaBackwardCUDAKernel( const int N, const int G, const int D, const int HxW, const DType* dY, const DType* X, const DType* mu, const DType* rsig, DType* dgamma, DType* dbeta) { const int outer_size = G * D; const int inner_size = N * HxW; __shared__ typename BlockReduce<DType>::TempStorage dg_storage; __shared__ typename BlockReduce<DType>::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { DType dg_val = 0; DType db_val = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int n = j / HxW; const int index = (n * outer_size + i) * HxW + j % HxW; const int i_mu = n * G + i / D; dg_val += __ldg(dY + index) * (__ldg(X + index) - __ldg(mu + i_mu)) * __ldg(rsig + i_mu); db_val += __ldg(dY + index); } dg_val = BlockReduce<DType>(dg_storage).Reduce(dg_val, cub::Sum()); db_val = BlockReduce<DType>(db_storage).Reduce(db_val, cub::Sum()); if (threadIdx.x == 0) { dgamma[i] = dg_val; dbeta[i] = db_val; } __syncthreads(); } } template<typename T> inline void GroupNormForward(cudaStream_t stream, T eps, const int N, const int G, const int D, const int HxW, const T* X_data, const T* gamma_data, const T* beta_data, T* Y_data, T* mu_data, T* rsig_data){ const int size = N * G * D * HxW; const std::array<int, 2> dims = {N * G, D * HxW}; const int axis = 1; Moments<T>(2, dims.data(), 1, &axis, X_data, mu_data, rsig_data, stream); InvStd<T>(N * G, eps, rsig_data, rsig_data, stream); GroupNormForwardCUDAKernel<T> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, stream>>>( size, G, D, HxW, X_data, mu_data, rsig_data, gamma_data, beta_data, Y_data); } // Math: // let: s = gamma * rsig // let: b = beta - mu * gamma * rsig // then: Y = s * X + b template<typename T> inline void GroupNormBackward(cudaStream_t stream, const int N, const int G, const int D, const int HxW, const T* dY_data, const T* X_data, const T* mu_data, const T* rsig_data, const T* gamma_data, T* ds_data, T* db_data, T* dX_data, T* dgamma_data, T* dbeta_data) { const int size = N * G * D * HxW; const int C = G * D; ComputeInternalGradientsCUDAKernel<T> <<<std::min(N * G, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, stream>>>( N, G, D, HxW, dY_data, X_data, gamma_data, ds_data, db_data); // Computes dL/dX. GroupNormBackwardCUDAKernel<T> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, stream>>>( size, G, D, HxW, dY_data, X_data, mu_data, rsig_data, gamma_data, ds_data, db_data, dX_data); // Computes dL/dgamma and dL/dbeta. GammaBetaBackwardCUDAKernel<T> <<<std::min(C, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, stream>>>( N, G, D, HxW, dY_data, X_data, mu_data, rsig_data, dgamma_data, dbeta_data); } } // namespace cuda inline void GroupNormForward(float eps, const int N, const int G, const int D, const int HxW, const Tensor<gpu, 1> &X, const Tensor<gpu, 1> &gamma, const Tensor<gpu, 1> &beta, Tensor<gpu, 1> &Y, Tensor<gpu, 1> &mu, Tensor<gpu, 1> &rsig) { cudaStream_t stream = Stream<gpu>::GetStream(Y.stream_); const float *X_data = X.dptr_; const float *gamma_data = gamma.dptr_; const float *beta_data = beta.dptr_; float *Y_data = Y.dptr_; float *mu_data = mu.dptr_; float *rsig_data = rsig.dptr_; cuda::GroupNormForward<float>(stream, eps, N, G, D, HxW, X_data, gamma_data, beta_data, Y_data, mu_data, rsig_data); } inline void GroupNormBackward(const int N, const int G, const int D, const int HxW, const Tensor<gpu, 1> &dY, const Tensor<gpu, 1> &X, const Tensor<gpu, 1> &mu, const Tensor<gpu, 1> &rsig, const Tensor<gpu, 1> &gamma, Tensor<gpu, 1> &ds, Tensor<gpu, 1> &db, Tensor<gpu, 1> &dX, Tensor<gpu, 1> &dgamma, Tensor<gpu, 1> &dbeta) { cudaStream_t stream = Stream<gpu>::GetStream(dX.stream_); const float *dY_data = dY.dptr_; const float *X_data = X.dptr_; const float *mu_data = mu.dptr_; const float *rsig_data = rsig.dptr_; const float *gamma_data = gamma.dptr_; float *ds_data = ds.dptr_; float *db_data = db.dptr_; float *dX_data = dX.dptr_; float *dgamma_data = dgamma.dptr_; float *dbeta_data = dbeta.dptr_; cuda::GroupNormBackward<float>(stream, N, G, D, HxW, dY_data, X_data, mu_data, rsig_data, gamma_data, ds_data, db_data, dX_data, dgamma_data, dbeta_data); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator *CreateOp<gpu>(GroupNormParam param, int dtype) { Operator *op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new GroupNormOp<gpu>(param); }); return op; } } // namespace op } // namespace mxnet
the_stack
// host-no-diagnostics #include "Inputs/cuda.h" int func(); struct A { int x; static int host_var; }; int A::host_var; // dev-note {{host variable declared here}} namespace X { int host_var; // dev-note {{host variable declared here}} } // struct with non-empty ctor. struct B1 { int x; B1() { x = 1; } }; // struct with non-empty dtor. struct B2 { int x; B2() {} ~B2() { x = 0; } }; static int static_host_var; // dev-note {{host variable declared here}} __device__ int global_dev_var; __constant__ int global_constant_var; __shared__ int global_shared_var; int global_host_var; // dev-note 8{{host variable declared here}} const int global_const_var = 1; constexpr int global_constexpr_var = 1; int global_host_array[2] = {1, 2}; // dev-note {{host variable declared here}} const int global_const_array[2] = {1, 2}; constexpr int global_constexpr_array[2] = {1, 2}; A global_host_struct_var{1}; // dev-note 2{{host variable declared here}} const A global_const_struct_var{1}; constexpr A global_constexpr_struct_var{1}; // Check const host var initialized with non-empty ctor is not allowed in // device function. const B1 b1; // dev-note {{const variable cannot be emitted on device side due to dynamic initialization}} // Check const host var having non-empty dtor is not allowed in device function. const B2 b2; // dev-note {{const variable cannot be emitted on device side due to dynamic initialization}} // Check const host var initialized by non-constant initializer is not allowed // in device function. const int b3 = func(); // dev-note {{const variable cannot be emitted on device side due to dynamic initialization}} template<typename F> __global__ void kernel(F f) { f(); } // dev-note2 {{called by 'kernel<(lambda}} __device__ void dev_fun(int *out) { // Check access device variables are allowed. int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; // Check access of non-const host variables are not allowed. *out = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __device__ function}} *out = global_const_var; *out = global_constexpr_var; *out = b1.x; // dev-error {{reference to __host__ variable 'b1' in __device__ function}} *out = b2.x; // dev-error {{reference to __host__ variable 'b2' in __device__ function}} *out = b3; // dev-error {{reference to __host__ variable 'b3' in __device__ function}} global_host_var = 1; // dev-error {{reference to __host__ variable 'global_host_var' in __device__ function}} // Check reference of non-constexpr host variables are not allowed. int &ref_host_var = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __device__ function}} const int &ref_const_var = global_const_var; const int &ref_constexpr_var = global_constexpr_var; *out = ref_host_var; *out = ref_constexpr_var; *out = ref_const_var; // Check access member of non-constexpr struct type host variable is not allowed. *out = global_host_struct_var.x; // dev-error {{reference to __host__ variable 'global_host_struct_var' in __device__ function}} *out = global_const_struct_var.x; *out = global_constexpr_struct_var.x; global_host_struct_var.x = 1; // dev-error {{reference to __host__ variable 'global_host_struct_var' in __device__ function}} // Check address taking of non-constexpr host variables is not allowed. int *p = &global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __device__ function}} const int *cp = &global_const_var; const int *cp2 = &global_constexpr_var; // Check access elements of non-constexpr host array is not allowed. *out = global_host_array[1]; // dev-error {{reference to __host__ variable 'global_host_array' in __device__ function}} *out = global_const_array[1]; *out = global_constexpr_array[1]; // Check ODR-use of host variables in namespace is not allowed. *out = X::host_var; // dev-error {{reference to __host__ variable 'host_var' in __device__ function}} // Check ODR-use of static host varables in class or file scope is not allowed. *out = A::host_var; // dev-error {{reference to __host__ variable 'host_var' in __device__ function}} *out = static_host_var; // dev-error {{reference to __host__ variable 'static_host_var' in __device__ function}} // Check function-scope static variable is allowed. static int static_var; *out = static_var; // Check non-ODR use of host varirables are allowed. *out = sizeof(global_host_var); *out = sizeof(global_host_struct_var.x); decltype(global_host_var) var1; decltype(global_host_struct_var.x) var2; } __global__ void global_fun(int *out) { int &ref_host_var = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __global__ function}} int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; const int &ref_constexpr_var = global_constexpr_var; const int &ref_const_var = global_const_var; *out = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __global__ function}} *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; *out = global_constexpr_var; *out = global_const_var; *out = ref_host_var; *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = ref_constexpr_var; *out = ref_const_var; } __host__ __device__ void host_dev_fun(int *out) { int &ref_host_var = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __host__ __device__ function}} int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; const int &ref_constexpr_var = global_constexpr_var; const int &ref_const_var = global_const_var; *out = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __host__ __device__ function}} *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; *out = global_constexpr_var; *out = global_const_var; *out = ref_host_var; *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = ref_constexpr_var; *out = ref_const_var; } inline __host__ __device__ void inline_host_dev_fun(int *out) { int &ref_host_var = global_host_var; int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; const int &ref_constexpr_var = global_constexpr_var; const int &ref_const_var = global_const_var; *out = global_host_var; *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; *out = global_constexpr_var; *out = global_const_var; *out = ref_host_var; *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = ref_constexpr_var; *out = ref_const_var; } void dev_lambda_capture_by_ref(int *out) { int &ref_host_var = global_host_var; kernel<<<1,1>>>([&]() { int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; const int &ref_constexpr_var = global_constexpr_var; const int &ref_const_var = global_const_var; *out = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __host__ __device__ function}} // dev-error@-1 {{capture host variable 'out' by reference in device or host device lambda function}} *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; *out = global_constexpr_var; *out = global_const_var; *out = ref_host_var; // dev-error {{capture host variable 'ref_host_var' by reference in device or host device lambda function}} *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = ref_constexpr_var; *out = ref_const_var; }); } void dev_lambda_capture_by_copy(int *out) { int &ref_host_var = global_host_var; kernel<<<1,1>>>([=]() { int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; const int &ref_constexpr_var = global_constexpr_var; const int &ref_const_var = global_const_var; *out = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __host__ __device__ function}} *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; *out = global_constexpr_var; *out = global_const_var; *out = ref_host_var; *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = ref_constexpr_var; *out = ref_const_var; }); } // Texture references are special. As far as C++ is concerned they are host // variables that are referenced from device code. However, they are handled // very differently by the compiler under the hood and such references are // allowed. Compiler should produce no warning here, but it should diagnose the // same case without the device_builtin_texture_type attribute. template <class, int = 1, int = 1> struct __attribute__((device_builtin_texture_type)) texture { static texture<int> ref; __device__ void c() { auto &x = ref; } }; template <class, int = 1, int = 1> struct not_a_texture { static not_a_texture<int> ref; __device__ void c() { auto &x = ref; // dev-error {{reference to __host__ variable 'ref' in __device__ function}} } }; template<> not_a_texture<int> not_a_texture<int>::ref; // dev-note {{host variable declared here}} __device__ void test_not_a_texture() { not_a_texture<int> inst; inst.c(); // dev-note {{in instantiation of member function 'not_a_texture<int, 1, 1>::c' requested here}} } // Test static variable in host function used by device function. void test_static_var_host() { for (int i = 0; i < 10; i++) { static int x; // dev-note {{host variable declared here}} struct A { __device__ int f() { return x; // dev-error{{reference to __host__ variable 'x' in __device__ function}} } }; } } // Test static variable in device function used by device function. __device__ void test_static_var_device() { for (int i = 0; i < 10; i++) { static int x; int y = x; struct A { __device__ int f() { return x; } }; } }
the_stack
namespace anakin { namespace saber { template <typename Dtype> __global__ void cal_lstm_kernel_batch_with_peephole_anyactivate( const Dtype* w_x, const Dtype* b_i, const Dtype* b_f, const Dtype* b_c, const Dtype* b_o, const Dtype* w_ci, const Dtype* w_cf, const Dtype* w_co, Dtype* cell,const int hidden_size, const int aligned_hidden_size,const int batch_size,const int word_start_id, const ActiveType gate_activity, const ActiveType cell_activity,const ActiveType candidate_activity,Dtype* output ) { const int thread_id = blockIdx.x*blockDim.x+threadIdx.x; const int batch_id = thread_id/aligned_hidden_size; const int tid=thread_id%aligned_hidden_size; if (tid < hidden_size && batch_id<batch_size) { Dtype(*gat_act)(const Dtype)=Activate_inner<Dtype>(gate_activity); Dtype(*cell_act)(const Dtype)=Activate_inner<Dtype>(cell_activity); Dtype(*candi_act)(const Dtype)=Activate_inner<Dtype>(candidate_activity); const int emit_wx_offset = (word_start_id + batch_id) * hidden_size * 4; const Dtype* w_x_i = w_x + emit_wx_offset; const Dtype* w_x_f = w_x_i + hidden_size ; const Dtype* w_x_c = w_x_f + hidden_size; const Dtype* w_x_o = w_x_c + hidden_size; Dtype* gate_h_p = output + batch_id * hidden_size; Dtype* gate_c_p = cell + batch_id * hidden_size; const Dtype c_1 = gate_c_p[tid]; const Dtype gate_i = gat_act(w_x_i[tid] + b_i[tid] + w_ci[tid] * c_1); const Dtype gate_f = gat_act(w_x_f[tid] + b_f[tid] + w_cf[tid] * c_1); const Dtype gate_c_s = cell_act(w_x_c[tid] + b_c[tid]); const Dtype gate_c = gate_f * c_1 + gate_i * gate_c_s; const Dtype gate_o = gat_act(w_x_o[tid] + b_o[tid] + gate_c * w_co[tid]); gate_c_p[tid] = gate_c; gate_h_p[tid] = gate_o * candi_act(gate_c); } } template <typename Dtype> __global__ void cal_lstm_kernel_batch_without_peephole_anyactivate( const Dtype* w_x,const Dtype* b_i, const Dtype* b_f, const Dtype* b_c, const Dtype* b_o, Dtype* cell, const int hidden_size, const int aligned_hidden_size,const int batch_size,const int word_start_id, const ActiveType gate_activity,const ActiveType cell_activity,const ActiveType candidate_activity, Dtype* output) { const int thread_id = blockIdx.x*blockDim.x+threadIdx.x; const int batch_id = thread_id/aligned_hidden_size; const int tid=thread_id%aligned_hidden_size; if (tid < hidden_size && batch_id<batch_size) { Dtype(*gat_act)(const Dtype)=Activate_inner<Dtype>(gate_activity); Dtype(*cell_act)(const Dtype)=Activate_inner<Dtype>(cell_activity); Dtype(*candi_act)(const Dtype)=Activate_inner<Dtype>(candidate_activity); const int emit_wx_offset = (word_start_id + batch_id) * hidden_size * 4; const Dtype* w_x_i = w_x + emit_wx_offset; const Dtype* w_x_f = w_x_i + hidden_size ; const Dtype* w_x_c = w_x_f + hidden_size; const Dtype* w_x_o = w_x_c + hidden_size; Dtype* gate_h_p = output + batch_id * hidden_size; Dtype* gate_c_p = cell + batch_id * hidden_size; const Dtype c_1 = gate_c_p[tid]; const Dtype gate_i = gat_act(w_x_i[tid] + b_i[tid]); const Dtype gate_f = gat_act(w_x_f[tid] + b_f[tid]); const Dtype gate_c_s = cell_act(w_x_c[tid] + b_c[tid]); const Dtype gate_c = gate_f * c_1 + gate_i * gate_c_s; const Dtype gate_o = gat_act(w_x_o[tid] + b_o[tid]); gate_c_p[tid] = gate_c; gate_h_p[tid] = gate_o * candi_act(gate_c); // printf("tid = %d, f = %f, i = %f, o = %f, hout = %f, w_x_i = %f, c_i = %f,c_out = %f, batch_id = %d\n",tid,gate_f,gate_i,gate_o,gate_h_p[tid],w_x_i[tid],c_1,gate_c,batch_id); } } template <typename Dtype> __global__ void cal_lstm_kernel_batch_with_peephole( const Dtype* w_x, const Dtype* b_i, const Dtype* b_f, const Dtype* b_c, const Dtype* b_o, const Dtype* w_ci, const Dtype* w_cf, const Dtype* w_co, Dtype* cell,const int hidden_size, const int aligned_hidden_size,const int batch_size, const int word_start_id, Dtype* output) { const int thread_id = blockIdx.x*blockDim.x+threadIdx.x; const int batch_id = thread_id/aligned_hidden_size; const int tid=thread_id%aligned_hidden_size; if (tid < hidden_size && batch_id<batch_size) { const int emit_wx_offset = (word_start_id + batch_id) * hidden_size * 4; const Dtype* w_x_i = w_x + emit_wx_offset; const Dtype* w_x_f = w_x_i + hidden_size ; const Dtype* w_x_c = w_x_f + hidden_size; const Dtype* w_x_o = w_x_c + hidden_size; Dtype* gate_h_p = output + batch_id * hidden_size; Dtype* gate_c_p = cell + batch_id * hidden_size; const Dtype c_1 = gate_c_p[tid]; const Dtype gate_i = Sigmoid(w_x_i[tid] + b_i[tid] + w_ci[tid] * c_1); const Dtype gate_f = Sigmoid(w_x_f[tid] + b_f[tid] + w_cf[tid] * c_1); const Dtype gate_c_s = Tanh(w_x_c[tid] + b_c[tid]); const Dtype gate_c = gate_f * c_1 + gate_i * gate_c_s; const Dtype gate_o = Sigmoid(w_x_o[tid] + b_o[tid] + gate_c * w_co[tid]); gate_c_p[tid] = gate_c; gate_h_p[tid] = gate_o * Tanh(gate_c); // printf("tid = %d, f = %f, i = %f, o = %f, hout = %f, w_x_i = %f, c_i = %f,c_out = %f, batch_id = %d\n",tid,gate_f,gate_i,gate_o,gate_h_p[tid],w_x_i[tid],c_1,gate_c,batch_id); } } template <typename Dtype> __global__ void cal_lstm_kernel_batch_without_peephole( const Dtype* w_x,const Dtype* b_i, const Dtype* b_f, const Dtype* b_c, const Dtype* b_o, Dtype* cell, const int hidden_size, const int aligned_hidden_size,const int batch_size,const int word_start_id, Dtype* output) { const int thread_id = blockIdx.x*blockDim.x+threadIdx.x; const int batch_id = thread_id/aligned_hidden_size; const int tid=thread_id%aligned_hidden_size; if (tid < hidden_size && batch_id<batch_size) { const int emit_wx_offset = (word_start_id + batch_id) * hidden_size * 4; const Dtype* w_x_i = w_x + emit_wx_offset; const Dtype* w_x_f = w_x_i + hidden_size ; const Dtype* w_x_c = w_x_f + hidden_size; const Dtype* w_x_o = w_x_c + hidden_size; Dtype* gate_h_p = output + batch_id * hidden_size; Dtype* gate_c_p = cell + batch_id * hidden_size; const Dtype c_1 = gate_c_p[tid]; const Dtype gate_i = Sigmoid_fluid(w_x_i[tid] + b_i[tid]); const Dtype gate_f = Sigmoid_fluid(w_x_f[tid] + b_f[tid]); const Dtype gate_c_s = Tanh_fluid(w_x_c[tid] + b_c[tid]); const Dtype gate_c = gate_f * c_1 + gate_i * gate_c_s; const Dtype gate_o = Sigmoid_fluid(w_x_o[tid] + b_o[tid]); gate_c_p[tid] = gate_c; gate_h_p[tid] = gate_o * Tanh_fluid(gate_c); } } template<> SaberStatus SaberLstm<NV, AK_FLOAT>::dispatch_batch( const std::vector < Tensor<NV>* >& inputs, std::vector < Tensor<NV>* >& outputs, LstmParam < NV >& param) { Tensor<NV>* x = inputs[0]; std::vector<int> offset_vec = x->get_seq_offset()[x->get_seq_offset().size()-1]; int seq_sum = x->num(); int batch_size = offset_vec.size() - 1; const OpDataType* x_data = (const OpDataType*)x->data(); const OpDataType *weight_h = (const OpDataType *)(param.weight()->data())+4*_hidden_size*_word_size; const OpDataType *weight_w = (const OpDataType *)param.weight()->data(); const OpDataType *bias = (const OpDataType *)param.bias()->data(); const OpDataType *weight_peephole = (const OpDataType *)(param.bias()->data())+4*_hidden_size; const OpDataType* h_init = nullptr; const OpDataType* inner_x = (const OpDataType *)inputs[0]->data(); OpDataType* inner_h_out = (OpDataType *)outputs[0]->mutable_data(); OpDataType* inner_cell = nullptr; _gemm_wx = saber_find_fast_sass_gemm(false, false, seq_sum, 4 * _hidden_size,_word_size); _gemm_wh = saber_find_fast_sass_gemm(false, false, batch_size, 4 * _hidden_size, _hidden_size); utils::try_expand_tensor(_temp_map_dev,seq_sum); bool transform = _seq_util.get_sorted_map(offset_vec, this->_ctx->get_compute_stream()); std::vector<int> emit_offset_vec=_seq_util.get_emit_offset_vec(); int emit_length = emit_offset_vec.size()-1; if (inputs.size() > 1) { h_init = (const OpDataType *)inputs[1]->data(); utils::try_expand_tensor(_init_hidden,batch_size * _hidden_size); h_init = (const OpDataType *)_init_hidden.data(); } else if (param.init_hidden() != nullptr) { h_init = (const OpDataType *)param.init_hidden()->data(); //FIXME:is it correct? } else { if (_temp_zero.valid_size() < batch_size * _hidden_size) { utils::try_expand_tensor(_temp_zero,batch_size * _hidden_size); CUDA_CHECK(cudaMemsetAsync(_temp_zero.mutable_data(), 0, sizeof(OpDataType)*batch_size * _hidden_size, _ctx->get_compute_stream())); } h_init = (const OpDataType *)_temp_zero.data(); } utils::try_expand_tensor(_temp_wx,seq_sum * 4 * _hidden_size); utils::try_expand_tensor(_temp_wh,batch_size * 4 * _hidden_size); utils::try_expand_tensor(_temp_out,seq_sum * _hidden_size * param.num_direction); utils::try_expand_tensor(_temp_cell,batch_size * _hidden_size); if (transform) { utils::try_expand_tensor(_temp_x,seq_sum * _word_size); _seq_util.seq_2_sorted_seq(x_data, (OpDataType *)_temp_x.mutable_data(), _word_size, _ctx->get_compute_stream()); inner_h_out = (OpDataType *)_temp_out.mutable_data(); inner_x = (OpDataType *)_temp_x.mutable_data(); if (inputs.size() > 1 || param.init_hidden() != nullptr) { CHECK(false) << "not support inner_h_init != nullptr"; } } inner_cell = (OpDataType *)_temp_cell.mutable_data(); CUDA_CHECK(cudaMemsetAsync(inner_cell, 0, sizeof(OpDataType)*batch_size * _hidden_size, _ctx->get_compute_stream())); OpDataType* temp_wh = (OpDataType *)_temp_wh.mutable_data(); OpDataType* temp_wx = (OpDataType *)_temp_wx.mutable_data(); _gemm_wx(seq_sum, 4 * _hidden_size, _word_size, 1.0, inner_x, 0.0, weight_w, temp_wx, _ctx->get_compute_stream()); const int i_offset = 0; const int f_offset = 1; const int c_offset = 2; const int o_offset = 3; const OpDataType* b_i = bias + i_offset * _hidden_size; const OpDataType* b_f = bias + f_offset * _hidden_size; const OpDataType* b_c = bias + c_offset * _hidden_size; const OpDataType* b_o = bias + o_offset * _hidden_size; const OpDataType* w_ci = nullptr; const OpDataType* w_cf =nullptr; const OpDataType* w_co =nullptr; if(param.with_peephole){ w_ci = weight_peephole + 0 * _hidden_size; w_cf = weight_peephole + 1 * _hidden_size; w_co = weight_peephole + 2 * _hidden_size; } for (int word_id = 0; word_id < emit_length; word_id++) { int real_word_id = word_id; int last_word_id = word_id - 1; if (param.is_reverse && batch_size == 1) { real_word_id = emit_length - word_id - 1; last_word_id = real_word_id + 1; } int emit_word_id_start = emit_offset_vec[real_word_id]; int emit_word_id_end = emit_offset_vec[real_word_id + 1]; int emit_word_length = emit_word_id_end - emit_word_id_start; const OpDataType* hin; if (word_id == 0) { hin = h_init; } else { hin = inner_h_out + emit_offset_vec[last_word_id] * _hidden_size; } // DLOG(INFO) << "word_id = " << word_id << ",emit_start = " << emit_word_id_start << ",emit_end=" <<emit_word_id_end; OpDataType* hout = nullptr; hout = emit_offset_vec[real_word_id] * _hidden_size + inner_h_out; //wh _gemm_wh(emit_word_length, 4 * _hidden_size, _hidden_size, 1.0, hin, 1.f, weight_h, temp_wx+emit_word_id_start*4*_hidden_size, _ctx->get_compute_stream()); const int block_dim=512; const int grid_dim=utils::div_up(emit_word_length*_aligned_hidden_size,block_dim); if (param.gate_activity == Active_sigmoid && param.cell_activity == Active_tanh && param.candidate_activity == Active_tanh) { if (param.with_peephole) { cal_lstm_kernel_batch_with_peephole << <grid_dim, block_dim , 0 , _ctx->get_compute_stream() >> > (temp_wx, b_i,b_f,b_c,b_o, w_ci,w_cf,w_co, inner_cell, _hidden_size,_aligned_hidden_size,emit_word_length, emit_word_id_start, hout); } else { cal_lstm_kernel_batch_without_peephole << < grid_dim, block_dim , 0 , _ctx->get_compute_stream() >> > (temp_wx, b_i,b_f,b_c,b_o, inner_cell, _hidden_size, _aligned_hidden_size,emit_word_length,emit_word_id_start, hout); } } else { if (param.with_peephole) { cal_lstm_kernel_batch_with_peephole_anyactivate << < grid_dim, block_dim , 0 , _ctx->get_compute_stream() >> > (temp_wx, b_i, b_f, b_c, b_o, w_ci, w_cf, w_co, inner_cell, _hidden_size, _aligned_hidden_size,emit_word_length,emit_word_id_start, param.gate_activity, param.cell_activity, param.candidate_activity, hout); } else{ cal_lstm_kernel_batch_without_peephole_anyactivate << < grid_dim, block_dim , 0 , _ctx->get_compute_stream() >> > (temp_wx, b_i, b_f, b_c, b_o, inner_cell, _hidden_size,_aligned_hidden_size,emit_word_length, emit_word_id_start, param.gate_activity, param.cell_activity, param.candidate_activity, hout); } } } if (transform) { _seq_util.sorted_seq_2_seq((const OpDataType *)_temp_out.data(), (OpDataType *)outputs[0]->mutable_data(), _hidden_size, _ctx->get_compute_stream()); } outputs[0]->set_seq_offset(inputs[0]->get_seq_offset()); return SaberSuccess; }; //TODO:complate dispatch_once template<> SaberStatus SaberLstm<NV, AK_FLOAT>::dispatch_once( const std::vector < Tensor<NV>* >& inputs, std::vector < Tensor<NV>* >& outputs, LstmParam < NV >& param) { return SaberSuccess; }; template<> SaberStatus SaberLstm<NV, AK_FLOAT>::dispatch( const std::vector < Tensor<NV>* >& inputs, std::vector < Tensor<NV>* >& outputs, LstmParam < NV >& param) { CHECK_EQ(inputs.size(),1)<<"only support input size = 1"; CHECK_EQ(outputs.size(),1)<<"only support outputs size = 1"; CHECK_EQ(param.init_hidden()==nullptr, true )<<"only support param.init_hidden() == nullptr"; CHECK_EQ(param.num_layers,1)<<"only support param.num_layers==1"; return dispatch_batch(inputs, outputs, param); } DEFINE_OP_TEMPLATE(SaberLstm, LstmParam, NV, AK_HALF); DEFINE_OP_TEMPLATE(SaberLstm, LstmParam, NV, AK_INT8); } }
the_stack
#pragma once #include "cuda/Complex.cuh" #include "cuda/ComputeCapabilities.cuh" #include "cuda/CudaUtils.cuh" #include "cuda/DeviceTensor.cuh" #include "cuda/fbfft/FBFFTCommon.cuh" #include "cuda/fbfft/FBFFTParameters.h" #include "cuda/fbfft/FFT2D32.cuh" #include "cuda/util/CachedDeviceProperties.h" #include <cuda_runtime.h> #include <glog/logging.h> using namespace facebook::cuda; namespace facebook { namespace cuda { namespace fbfft { namespace detail { template <int FFTSize> __device__ __forceinline__ void load2D( const DeviceTensor<float, 3>& real, FFT1DCoeffs<FFTSize>& coeffs, const int batch, const int indexX, const int indexY, const int padL, const int padU) { int LogFFTSize = getMSB(FFTSize); // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int x = adjustedThreadIdxX<FFTSize>() + indexX * blockDim.x; // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int y = adjustedThreadIdxY<FFTSize>() + indexY * blockDim.y; // Zero padding without a need to copy the input data to a larger array. coeffs[indexX] = Complex(inBounds(y, x, padU, padL, real) ? real[batch][y - padU][x - padL].ldg() : 0.0f, 0.0f); } template <int FFTSize, bool EvenDivideBatches> __device__ __forceinline__ void load2DR2C( const DeviceTensor<float, 3>& real, FFT1DCoeffs<FFTSize>& coeffs, const int batch, const int indexX, const int indexY, const int padL, const int padU) { int LogFFTSize = getMSB(FFTSize); // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int x = adjustedThreadIdxX<FFTSize>() + indexX * blockDim.x; // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int y = adjustedThreadIdxY<FFTSize>() + indexY * blockDim.y; // Zero padding without a need to copy the input data to a larger array. coeffs[indexX] = (inBounds(y, x, padU, padL, real)) ? Complex(real[batch][y - padU][x - padL].ldg(), (EvenDivideBatches || batch + 1 < real.getSize(0)) ? real[batch + 1][y - padU][x - padL].ldg() : 0.0f) : Complex(0.0f); } template <int FFTSize> __device__ __forceinline__ void store2D( DeviceTensor<float, 4>& complexAsFloat, const FFT1DCoeffs<FFTSize>& coeffs, const int batch, const int indexX, const int indexY) { // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int x = adjustedThreadIdxX<FFTSize>() + indexX * blockDim.x; // adjustedThreadIdxX<FFTSize>() crams multiple < WARP_SIZE FFTs in a warp int y = adjustedThreadIdxY<FFTSize>() + indexY * blockDim.y; if (y < complexAsFloat.getSize(1) && x < complexAsFloat.getSize(2)) { complexAsFloat[batch][y][x][0].as<Complex>() = coeffs[indexX]; } } // Performs cross warp transpose of the data in registers, synchronously for // each register at a time and takes advantage of Hermitian symmetry. // // Supports multiple FFTs per warp. // // Invariants are: // - not synchronized on entry of the loop // - synchronized at each step of the loop // - synchronized on exit template <int FFTSize, int SMemRows, int RowsPerWarp, int FFTPerWarp> __device__ __forceinline__ void transpose2DHermitianMultiple( FFT1DCoeffs<FFTSize> (&coeffsArray)[RowsPerWarp], Complex(*buffer)[SMemRows + 1]) { const int LogFFTSize = getMSB(FFTSize); const int thx0 = (threadIdx.x >> LogFFTSize) << LogFFTSize; #pragma unroll for (int row = 0; row < RowsPerWarp; ++row) { int y = adjustedThreadIdxY<FFTSize>() + row * blockDim.y; FFT1DCoeffs<FFTSize>& coeffs = coeffsArray[row]; buffer[y][threadIdx.x] = coeffs.coeff[0]; } __syncthreads(); #pragma unroll for (int row = 0; row < RowsPerWarp; ++row) { int y = adjustedThreadIdxY<FFTSize>() + row * blockDim.y; FFT1DCoeffs<FFTSize>& coeffs = coeffsArray[row]; coeffs.coeff[0] = buffer [adjustedThreadIdxX<FFTSize>()] [thx0 + y]; } __syncthreads(); } template <int FFTSize, int FFTPerWarp, int RowsPerWarp, bool BitReverse> __global__ void decimateInFrequencyHermitian2DWarpKernel( DeviceTensor<float, 3> real, DeviceTensor<float, 4> complexAsFloat, const int padL, const int padU) { cuda_static_assert(!(FFTPerWarp & (FFTPerWarp - 1))); cuda_static_assert(FFTPerWarp * FFTSize <= WARP_SIZE); // Only let FFTs <= 8 have multiple per warp, 16 and 32 perform better with // 1 per warp. cuda_static_assert(FFTSize <= WARP_SIZE); assert(FFTPerWarp * FFTSize == blockDim.x); assert(real.getSize(0) % FFTPerWarp == 0); int LogFFTSize = getMSB(FFTSize); // Enforce that the number of FFTs we perform is divisible by the number of // FFTs per warp, otherwise weird divergence will occur and possibly bugs. const int batch = adjustedBatchR2C<FFTSize, FFTPerWarp, true>(); if (batch >= real.getSize(0)) { return; } // Can support multiple rows of FFT per warp if needed, atm use 1 FFT1DCoeffs<FFTSize> coeffsArray[RowsPerWarp]; FFT1DCoeffs<FFTSize> coeffsArray2[2][RowsPerWarp]; #pragma unroll for (int i = 0; i < RowsPerWarp; ++i) { load2DR2C<FFTSize, false>(real, coeffsArray[i], batch, 0, i, padL, padU); } // Twiddles is the same as for 1D but fully data parallel across threadIdx.y FFT1DRoots<FFTSize> roots; roots.template twiddles<true>(); #pragma unroll for (int i = 0; i < RowsPerWarp; ++i) { decimateInFrequency1DWarp<FFTSize>(coeffsArray[i][0], roots[0]); } FFT1DBitReversal<FFTSize> bits; if (BitReverse) { bits.computeBitReversal(0); #pragma unroll for (int i = 0; i < RowsPerWarp; ++i) { bitReverse1DWarp<FFTSize, FFTPerWarp>(coeffsArray[i], bits, 0); } } #pragma unroll for (int i = 0; i < RowsPerWarp; ++i) { Complex other = shfl(coeffsArray[i][0], FFTSize - adjustedThreadIdxX<FFTSize>(), FFTSize); coeffsArray2[0][i].coeff[0] = Complex(0.5f * (coeffsArray[i][0].re() + other.re()), 0.5f * (coeffsArray[i][0].im() - other.im())); coeffsArray2[1][i].coeff[0] = Complex(0.5f * ( coeffsArray[i][0].im() + other.im()), 0.5f * (-coeffsArray[i][0].re() + other.re())); } __shared__ Complex buffer[FFTSize][WARP_SIZE + 1]; transpose2DHermitianMultiple<FFTSize, WARP_SIZE, RowsPerWarp, FFTPerWarp>( coeffsArray2[0], (Complex(*)[WARP_SIZE + 1])buffer); transpose2DHermitianMultiple<FFTSize, WARP_SIZE, RowsPerWarp, FFTPerWarp>( coeffsArray2[1], (Complex(*)[WARP_SIZE + 1])buffer); #pragma unroll for (int i = 0; i < RowsPerWarp; ++i) { int y = adjustedThreadIdxY<FFTSize>() + i * blockDim.y; if (y < FFTSize / 2 + 1) { decimateInFrequency1DWarp<FFTSize>(coeffsArray2[0][i][0], roots[0]); decimateInFrequency1DWarp<FFTSize>(coeffsArray2[1][i][0], roots[0]); } } if (BitReverse) { #pragma unroll for (int i = 0; i < RowsPerWarp; ++i) { int y = adjustedThreadIdxY<FFTSize>() + i * blockDim.y; if (y < FFTSize / 2 + 1) { // Bit reversal is the same as for 1D but fully data parallel across // threadIdx.y bitReverse1DWarp<FFTSize, FFTPerWarp>(coeffsArray2[0][i], bits, 0); bitReverse1DWarp<FFTSize, FFTPerWarp>(coeffsArray2[1][i], bits, 0); } } } #pragma unroll for (int i = 0; i < RowsPerWarp; ++i) { store2D<FFTSize>(complexAsFloat, coeffsArray2[0][i], batch, 0, i); if (batch + 1 < real.getSize(0)) { store2D<FFTSize>(complexAsFloat, coeffsArray2[1][i], batch + 1, 0, i); } } } // First half of the 2-D transform for >= 64. // // This is a good 2D kernel, with 64, 1, 4, 4 sizing and the configuration // below it is 10% faster than the equivalent batched 1-D version, even if it // has only 1/2 the occupancy. template <int FFTSize, int RowsPerKernel, int BlockDimY, bool BitReverse> __launch_bounds__(32 * 8, 4) // 64 X 64 and 128 x 128 __global__ void decimateInFrequency2DKernel( DeviceTensor<float, 3> real, DeviceTensor<float, 4> complexAsFloat, const int padL, const int padU) { assert(blockDim.x == WARP_SIZE); assert(blockDim.y == BlockDimY); assert(real.getSize(0) == complexAsFloat.getSize(0)); // This version does not deal with a whole N x N FFT within a single block. // It *cannot* update in place transposed -> ensure we have the same // dimensions to update one row at a time. int LogFFTSize = getMSB(FFTSize); // Enforce that the number of FFTs we perform is divisible by the number of // FFTs per warp, otherwise weird divergence will occur and possibly bugs const int batch = adjustedBatch<FFTSize, 1>(); if (batch >= real.getSize(0)) { return; } for (int yiter = 0; yiter < FFTSize; yiter += RowsPerKernel * BlockDimY) { FFT1DCoeffs<FFTSize> coeffsArray[RowsPerKernel]; const int ColumnsPerWarp = coeffsArray[0].ColumnsPerWarp; __shared__ Complex buffer[BlockDimY][FFTSize]; #pragma unroll for (int row = 0; row < RowsPerKernel; ++row) { int y = yiter + threadIdx.y + row * blockDim.y; #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { int x = threadIdx.x + reg * blockDim.x; coeffsArray[row][reg] = Complex(inBounds(y, x, padU, padL, real) ? real[batch][y - padU][x - padL].ldg() : 0.0f, 0.0f); } } { // Twiddles is the same as for 1D but fully data parallel on threadIdx.y FFT1DRoots<FFTSize> roots; roots.template twiddles<true>(); decimateInFrequency1D<FFTSize, 1, RowsPerKernel, 0, RowsPerKernel>( coeffsArray, roots, batch); } if (BitReverse) { FFT1DBitReversal<FFTSize> bits; #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { bits.computeBitReversal(reg); } Complex (*buffer2) [FFTSize] = (Complex(*)[FFTSize])buffer; // bitReverse all #pragma unroll for (int row = 0; row < RowsPerKernel; ++row) { #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { int x = getLaneId() + reg * WARP_SIZE; buffer2[threadIdx.y][x] = coeffsArray[row][reg]; } #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { coeffsArray[row][reg] = buffer2[threadIdx.y][bits[reg]]; } } // No need to sync up here, no following kernel } #pragma unroll for (int row = 0; row < RowsPerKernel; ++row) { int y = yiter + threadIdx.y + row * blockDim.y; #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { int x = threadIdx.x + reg * blockDim.x; if (y < complexAsFloat.getSize(1) && x < complexAsFloat.getSize(2)) { *(complexAsFloat[batch][y][x].dataAs<Complex>()) = coeffsArray[row][reg]; } } } } } // Second half of the 2-D transform for >= 64. // template <int FFTSize, int RowsPerKernel, int BlockDimY, bool BitReverse> __device__ __forceinline__ void decimateInFrequency2DKernel( const DeviceTensor<Complex, 3> src, DeviceTensor<Complex, 3> dst) { assert(blockDim.x == WARP_SIZE); assert(blockDim.y == BlockDimY); assert(src.getSize(0) == dst.getSize(0)); // This version does not deal with a whole N x N FFT within a single block. // It *cannot* update in place transposed -> ensure we are writing to 2 // different storage areas assert(src.data() != dst.data()); int LogFFTSize = getMSB(FFTSize); // Enforce that the number of FFTs we perform is divisible by the number of // FFTs per warp, otherwise weird divergence will occur and possibly bugs const int batch = adjustedBatch<FFTSize, 1>(); if (batch >= src.getSize(0)) { return; } const int UpperBound = FFTSize / 2 + 1; for (int yiter = 0; yiter < UpperBound; yiter += RowsPerKernel * BlockDimY) { // Split into lower and upper half, upper half will be cut by symmetry FFT1DCoeffs<FFTSize> coeffsArray[RowsPerKernel]; const int ColumnsPerWarp = coeffsArray[0].ColumnsPerWarp; __shared__ Complex buffer[BlockDimY][FFTSize]; #pragma unroll for (int row = 0; row < RowsPerKernel; ++row) { int y = yiter + threadIdx.y + row * blockDim.y; #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { int x = threadIdx.x + reg * blockDim.x; // This is the key: uncoalesced, transposed reads using ldg work // really well and remove the need for doing an actual transpose. // TODO: Awkward ldg use but does the job coeffsArray[row][reg] = (x < src.getSize(1) && y < src.getSize(2)) ? ldg(src[batch][x][y].data()) : Complex(0.0f); } } { // Twiddles is the same as for 1D but fully data parallel wrt threadIdx.y FFT1DRoots<FFTSize> roots; roots.template twiddles<true>(); decimateInFrequency1D<FFTSize, 1, RowsPerKernel, 0, RowsPerKernel>(coeffsArray, roots, batch); } if (BitReverse) { { FFT1DBitReversal<FFTSize> bits; #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { bits.computeBitReversal(reg); } Complex (*buffer2) [FFTSize] = (Complex(*)[FFTSize])buffer; // bitReverse all #pragma unroll for (int row = 0; row < RowsPerKernel; ++row) { #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { int x = getLaneId() + reg * WARP_SIZE; buffer2[threadIdx.y][x] = coeffsArray[row][reg]; } #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { coeffsArray[row][reg] = buffer2[threadIdx.y][bits[reg]]; } } // No need to sync up here, no following smem access } } // If needed, could reintroduce the "untranspose" feature but this is // expensive for sizes > 32 #pragma unroll for (int row = 0; row < RowsPerKernel; ++row) { int y = yiter + threadIdx.y + row * blockDim.y; #pragma unroll for (int reg = 0; reg < ColumnsPerWarp; ++reg) { int x = threadIdx.x + reg * blockDim.x; if (y < dst.getSize(1) && x < dst.getSize(2)) { dst[batch][y][x] = coeffsArray[row][reg]; } } } } } template <int FFTSize, int RowsPerKernel, int BlockDimY, bool BitReverse, bool ForwardFFT> __launch_bounds__(32 * 32, 1) __global__ void decimateInFrequency2DKernel128( const DeviceTensor<Complex, 3> src, DeviceTensor<Complex, 3> dst) { decimateInFrequency2DKernel<FFTSize, RowsPerKernel, BlockDimY, BitReverse>(src, dst); } template <int FFTSize, int RowsPerKernel, int BlockDimY, bool BitReverse, bool ForwardFFT> __launch_bounds__(32 * 32, 2) __global__ void decimateInFrequency2DKernel64( const DeviceTensor<Complex, 3> src, DeviceTensor<Complex, 3> dst) { decimateInFrequency2DKernel<FFTSize, RowsPerKernel, BlockDimY, BitReverse>(src, dst); } } // namespace // First half of the forward 2-D transform // Only transform to be called for <= 32 template <int BatchDims> FBFFTParameters::ErrorCode fbfft2D( DeviceTensor<float, BatchDims + 2>& real, DeviceTensor<float, BatchDims + 3>& complexAsFloat, const int padL, const int padU, cudaStream_t s) { initTwiddles(); // Whatever the real input size, we can make assumptions on the // complexAsFloat size related to the fft size (because interpolation). // If buffer, it must be sized N x (N / 2 +1) assert(complexAsFloat.getSize(BatchDims + 1) <= 32 || (complexAsFloat.getSize(BatchDims + 1) == numHermitian(complexAsFloat.getSize(BatchDims)))); // If buffer, it must be sized (N / 2 + 1) x N assert(complexAsFloat.getSize(BatchDims) > 32 || (complexAsFloat.getSize(BatchDims) == numHermitian(complexAsFloat.getSize(BatchDims + 1)))); if (complexAsFloat.getSize(BatchDims) > 128) { // FBFFT only optimizes for sizes 8, 16 and 32 which are useful in FFT // based convolutions. Larger sizes are not optimized and are often slower // than cufft. return FBFFTParameters::UnsupportedSize; } { constexpr int FFTSize = 32; constexpr int BatchesPerBlock = 2; if (complexAsFloat.getSize(BatchDims + 1) == FFTSize) { CHECK_EQ(1, BatchDims); int maxBlocks = facebook::cuda::getCurrentDeviceProperties().maxGridSize[0]; int blx = 1; int bly = 1; if (real.getSize(0) / (BatchesPerBlock) > maxBlocks) { blx = maxBlocks; bly = ceil(real.getSize(0), maxBlocks * BatchesPerBlock); } else { blx = ceil(real.getSize(0), BatchesPerBlock); } CHECK_LE(1, blx); CHECK_LE(1, bly); CHECK_LE(blx, maxBlocks); CHECK_LE(bly, maxBlocks); CHECK_LE(real.getSize(0), blx * bly * BatchesPerBlock); dim3 blocks(blx, bly); dim3 threads(FFTSize, BatchesPerBlock); detail::fbfft2DVertical_32<BatchDims, BatchesPerBlock> <<<blocks, threads, 0, s>>>(real, complexAsFloat, padL, padU); if (cudaSuccess != cudaPeekAtLastError()) { return FBFFTParameters::CudaError; } return FBFFTParameters::Success; } } { constexpr int FFTSize = 16; constexpr int BatchesPerBlock = 4; if (complexAsFloat.getSize(BatchDims + 1) == FFTSize) { CHECK_EQ(1, BatchDims); int maxBlocks = facebook::cuda::getCurrentDeviceProperties().maxGridSize[0]; int blx = 1; int bly = 1; if (real.getSize(0) / (BatchesPerBlock) > maxBlocks) { blx = maxBlocks; bly = ceil(real.getSize(0), maxBlocks * BatchesPerBlock); } else { blx = ceil(real.getSize(0), BatchesPerBlock); } CHECK_LE(1, blx); CHECK_LE(1, bly); CHECK_LE(blx, maxBlocks); CHECK_LE(bly, maxBlocks); CHECK_LE(real.getSize(0), blx * bly * BatchesPerBlock); dim3 blocks(blx, bly); dim3 threads(FFTSize, BatchesPerBlock); detail::fbfft2DVertical_16<BatchDims, BatchesPerBlock> <<<blocks, threads, 0, s>>>(real, complexAsFloat, padL, padU); if (cudaSuccess != cudaPeekAtLastError()) { return FBFFTParameters::CudaError; } return FBFFTParameters::Success; } } { constexpr int FFTSize = 8; constexpr int BatchesPerBlock = 16; if (complexAsFloat.getSize(BatchDims + 1) == FFTSize) { CHECK_EQ(1, BatchDims); int maxBlocks = facebook::cuda::getCurrentDeviceProperties().maxGridSize[0]; int blx = 1; int bly = 1; if (real.getSize(0) / (BatchesPerBlock) > maxBlocks) { blx = maxBlocks; bly = ceil(real.getSize(0), maxBlocks * BatchesPerBlock); } else { blx = ceil(real.getSize(0), BatchesPerBlock); } CHECK_LE(1, blx); CHECK_LE(1, bly); CHECK_LE(blx, maxBlocks); CHECK_LE(bly, maxBlocks); CHECK_LE(real.getSize(0), blx * bly * BatchesPerBlock); dim3 blocks(blx, bly); dim3 threads(FFTSize, BatchesPerBlock); detail::fbfft2DVertical_8<BatchDims, BatchesPerBlock> <<<blocks, threads, 0, s>>>(real, complexAsFloat, padL, padU); if (cudaSuccess != cudaPeekAtLastError()) { return FBFFTParameters::CudaError; } return FBFFTParameters::Success; } } #define SELECT_FBFFT_2D_DIF_WARP_SINGLE( \ FFTSize, FFTS_PER_WARP, BIT_REVERSE, ROWS_PER_WARP) \ cuda_static_assert(FFTSize <= WARP_SIZE); \ if (complexAsFloat.getSize(BatchDims + 1) == FFTSize) { \ if (real.getSize(0) % (2 * FFTS_PER_WARP) == 0) { \ dim3 blocks(ceil(real.getSize(0), 2 * FFTS_PER_WARP)); \ dim3 threads(FFTSize * FFTS_PER_WARP, \ ceil(FFTSize, ROWS_PER_WARP)); \ detail::decimateInFrequencyHermitian2DWarpKernel< \ FFTSize, FFTS_PER_WARP, ROWS_PER_WARP, BIT_REVERSE> \ <<<blocks, threads, 0, s>>>(real, complexAsFloat, padL, padU); \ } else { \ dim3 blocks(ceil(complexAsFloat.getSize(0), 2)); \ dim3 threads(FFTSize, FFTSize); \ detail::decimateInFrequencyHermitian2DWarpKernel< \ FFTSize, 1, 1, BIT_REVERSE> \ <<<blocks, threads, 0, s>>>(real, complexAsFloat, padL, padU); \ } \ if (cudaSuccess != cudaPeekAtLastError()) { \ return FBFFTParameters::CudaError; \ } \ return FBFFTParameters::Success; \ } // Above warp level, buffer is needed, output must be N x (N / 2 + 1) #define SELECT_FBFFT_2D_DIF_SINGLE( \ FFTSize, ROWS_PER_KERNEL, BLOCKDIMY, BIT_REVERSE) \ if (complexAsFloat.getSize(BatchDims) == FFTSize) { \ dim3 blocks(complexAsFloat.getSize(0)); \ dim3 threads(WARP_SIZE, BLOCKDIMY); \ detail::decimateInFrequency2DKernel< \ FFTSize, ROWS_PER_KERNEL, BLOCKDIMY, BIT_REVERSE> \ <<<blocks, threads, 0, s>>>(real, complexAsFloat, padL, padU); \ if (cudaSuccess != cudaPeekAtLastError()) { \ return FBFFTParameters::CudaError; \ } \ return FBFFTParameters::Success; \ } SELECT_FBFFT_2D_DIF_WARP_SINGLE( 2, 16, true, 1); SELECT_FBFFT_2D_DIF_WARP_SINGLE( 4, 8, true, 1); SELECT_FBFFT_2D_DIF_WARP_SINGLE( 8, 4, true, 2); SELECT_FBFFT_2D_DIF_WARP_SINGLE(16, 2, true, 4); SELECT_FBFFT_2D_DIF_WARP_SINGLE(32, 1, true, 4); SELECT_FBFFT_2D_DIF_SINGLE( 64, 4, 4, true); SELECT_FBFFT_2D_DIF_SINGLE(128, 4, 4, true); #undef SELECT_FBFFT_2D_DIF_WARP_SINGLE #undef SELECT_FBFFT_2D_DIF_SINGLE return FBFFTParameters::UnsupportedSize; } // Second half of the 2-D transform for >= 64 template <int BatchDims> FBFFTParameters::ErrorCode fbfft2D( DeviceTensor<Complex, BatchDims + 2>& complexSrc, DeviceTensor<Complex, BatchDims + 2>& complexDst, cudaStream_t s) { initTwiddles(); // Input is the temporary buffer and must be sized as N x (N / 2 + 1) assert((complexSrc.getSize(BatchDims + 1) == numHermitian(complexSrc.getSize(BatchDims)))); // If we are here we must be >= 64 assert(complexSrc.getSize(BatchDims) >= 64); // Output is the real output and must be sized as the input, must enforce // this upstream if (complexSrc.getSize(BatchDims + 1) > 128) { // FBFFT only optimizes for sizes 8, 16 and 32 which are useful in FFT // based convolutions. Larger sizes are not optimized and are often slower // than cufft. return FBFFTParameters::UnsupportedSize; } #define SELECT_FBFFT_2D_DIF_SINGLE( \ FFTSize, ROWS_PER_KERNEL, BLOCKDIMY, BIT_REVERSE) \ if (complexSrc.getSize(BatchDims) == FFTSize) { \ dim3 blocks(complexSrc.getSize(0)); \ dim3 threads(32, BLOCKDIMY); \ detail::decimateInFrequency2DKernel##FFTSize< \ FFTSize, ROWS_PER_KERNEL, BLOCKDIMY, BIT_REVERSE, true> \ <<<blocks, threads, 0, s>>>(complexSrc, complexDst); \ if (cudaSuccess != cudaPeekAtLastError()) { \ return FBFFTParameters::CudaError; \ } \ return FBFFTParameters::Success; \ } SELECT_FBFFT_2D_DIF_SINGLE(64, 2, 17, true); SELECT_FBFFT_2D_DIF_SINGLE(128, 1, 17, true); #undef SELECT_FBFFT_2D_DIF_SINGLE return FBFFTParameters::UnsupportedSize; } } } } // namespace
the_stack
#ifdef _MSC_VER #define UINT2(x,y) { x, y } #else #define UINT2(x,y) (uint2) { x, y } #endif __constant__ static __align__(16) uint32_t c_E8_bslice32[42][8] = { // Round 0 (Function0) { 0xa2ded572, 0x90d6ab81, 0x67f815df, 0xf6875a4d, 0x0a15847b, 0xc54f9f4e, 0x571523b7, 0x402bd1c3 }, { 0xe03a98ea, 0xb4960266, 0x9cfa455c, 0x8a53bbf2, 0x99d2c503, 0x1a1456b5, 0x9a99b266, 0x31a2db88 }, // 1 { 0x5c5aa303, 0x8019051c, 0xdb0e199a, 0x1d959e84, 0x0ab23f40, 0xadeb336f, 0x1044c187, 0xdccde75e }, // 2 { 0x9213ba10, 0x39812c0a, 0x416bbf02, 0x5078aa37, 0x156578dc, 0xd2bf1a3f, 0xd027bbf7, 0xd3910041 }, // 3 { 0x0d5a2d42, 0x0ba75c18, 0x907eccf6, 0xac442bc7, 0x9c9f62dd, 0xd665dfd1, 0xce97c092, 0x23fcc663 }, // 4 { 0x036c6e97, 0xbb03f1ee, 0x1ab8e09e, 0xfa618e5d, 0x7e450521, 0xb29796fd, 0xa8ec6c44, 0x97818394 }, // 5 { 0x37858e4a, 0x8173fe8a, 0x2f3003db, 0x6c69b8f8, 0x2d8d672a, 0x4672c78a, 0x956a9ffb, 0x14427fc0 }, // 6 // Round 7 (Function0) { 0x8f15f4c5, 0xb775de52, 0xc45ec7bd, 0xbc88e4ae, 0xa76f4475, 0x1e00b882, 0x80bb118f, 0xf4a3a698 }, { 0x338ff48e, 0x20edf1b6, 0x1563a3a9, 0xfde05a7c, 0x24565faa, 0x5ae9ca36, 0x89f9b7d5, 0x362c4206 }, { 0x433529ce, 0x591ff5d0, 0x3d98fe4e, 0x86814e6f, 0x74f93a53, 0x81ad9d0e, 0xa74b9a73, 0x9f5ad8af }, { 0x670605a7, 0x26077447, 0x6a6234ee, 0x3f1080c6, 0xbe280b8b, 0x6f7ea0e0, 0x2717b96e, 0x7b487ec6 }, { 0xa50a550d, 0x81727686, 0xc0a4f84a, 0xd48d6050, 0x9fe7e391, 0x415a9e7e, 0x9ef18e97, 0x62b0e5f3 }, { 0xec1f9ffc, 0xf594d74f, 0x7a205440, 0xd895fa9d, 0x001ae4e3, 0x117e2e55, 0x84c9f4ce, 0xa554c324 }, { 0x2872df5b, 0xef7c8905, 0x286efebd, 0x2ed349ee, 0xe27ff578, 0x85937e44, 0xb2c4a50f, 0x7f5928eb }, // Round 14 (Function0) { 0x37695f70, 0x04771bc7, 0x4a3124b3, 0xe720b951, 0xf128865e, 0xe843fe74, 0x65e4d61d, 0x8a87d423 }, { 0xa3e8297d, 0xfb301b1d, 0xf2947692, 0xe01bdc5b, 0x097acbdd, 0x4f4924da, 0xc1d9309b, 0xbf829cf2 }, { 0x31bae7a4, 0x32fcae3b, 0xffbf70b4, 0x39d3bb53, 0x0544320d, 0xc1c39f45, 0x48bcf8de, 0xa08b29e0 }, { 0xfd05c9e5, 0x01b771a2, 0x0f09aef7, 0x95ed44e3, 0x12347094, 0x368e3be9, 0x34f19042, 0x4a982f4f }, { 0x631d4088, 0xf14abb7e, 0x15f66ca0, 0x30c60ae2, 0x4b44c147, 0xc5b67046, 0xffaf5287, 0xe68c6ecc }, { 0x56a4d5a4, 0x45ce5773, 0x00ca4fbd, 0xadd16430, 0x4b849dda, 0x68cea6e8, 0xae183ec8, 0x67255c14 }, { 0xf28cdaa3, 0x20b2601f, 0x16e10ecb, 0x7b846fc2, 0x5806e933, 0x7facced1, 0x9a99949a, 0x1885d1a0 }, // Round 21 (Function0) { 0xa15b5932, 0x67633d9f, 0xd319dd8d, 0xba6b04e4, 0xc01c9a50, 0xab19caf6, 0x46b4a5aa, 0x7eee560b }, { 0xea79b11f, 0x5aac571d, 0x742128a9, 0x76d35075, 0x35f7bde9, 0xfec2463a, 0xee51363b, 0x01707da3 }, { 0xafc135f7, 0x15638341, 0x42d8a498, 0xa8db3aea, 0x20eced78, 0x4d3bc3fa, 0x79676b9e, 0x832c8332 }, { 0x1f3b40a7, 0x6c4e3ee7, 0xf347271c, 0xfd4f21d2, 0x34f04059, 0x398dfdb8, 0x9a762db7, 0xef5957dc }, { 0x490c9b8d, 0xd0ae3b7d, 0xdaeb492b, 0x84558d7a, 0x49d7a25b, 0xf0e9a5f5, 0x0d70f368, 0x658ef8e4 }, { 0xf4a2b8a0, 0x92946891, 0x533b1036, 0x4f88e856, 0x9e07a80c, 0x555cb05b, 0x5aec3e75, 0x4cbcbaf8 }, { 0x993bbbe3, 0x28acae64, 0x7b9487f3, 0x6db334dc, 0xd6f4da75, 0x50a5346c, 0x5d1c6b72, 0x71db28b8 }, // Round 28 (Function0) { 0xf2e261f8, 0xf1bcac1c, 0x2a518d10, 0xa23fce43, 0x3364dbe3, 0x3cd1bb67, 0xfc75dd59, 0xb043e802 }, { 0xca5b0a33, 0xc3943b92, 0x75a12988, 0x1e4d790e, 0x4d19347f, 0xd7757479, 0x5c5316b4, 0x3fafeeb6 }, { 0xf7d4a8ea, 0x5324a326, 0x21391abe, 0xd23c32ba, 0x097ef45c, 0x4a17a344, 0x5127234c, 0xadd5a66d }, { 0xa63e1db5, 0xa17cf84c, 0x08c9f2af, 0x4d608672, 0x983d5983, 0xcc3ee246, 0x563c6b91, 0xf6c76e08 }, { 0xb333982f, 0xe8b6f406, 0x5e76bcb1, 0x36d4c1be, 0xa566d62b, 0x1582ee74, 0x2ae6c4ef, 0x6321efbc }, { 0x0d4ec1fd, 0x1614c17e, 0x69c953f4, 0x16fae006, 0xc45a7da7, 0x3daf907e, 0x26585806, 0x3f9d6328 }, { 0xe3f2c9d2, 0x16512a74, 0x0cd29b00, 0x9832e0f2, 0x30ceaa5f, 0xd830eb0d, 0x300cd4b7, 0x9af8cee3 }, // Round 35 (Function0) { 0x7b9ec54b, 0x574d239b, 0x9279f1b5, 0x316796e6, 0x6ee651ff, 0xf3a6e6cc, 0xd3688604, 0x05750a17 }, { 0xd98176b1, 0xb3cb2bf4, 0xce6c3213, 0x47154778, 0x8452173c, 0x825446ff, 0x62a205f8, 0x486a9323 }, { 0x0758df38, 0x442e7031, 0x65655e4e, 0x86ca0bd0, 0x897cfcf2, 0xa20940f0, 0x8e5086fc, 0x4e477830 }, { 0x39eea065, 0x26b29721, 0x8338f7d1, 0x6ff81301, 0x37e95ef7, 0xd1ed44a3, 0xbd3a2ce4, 0xe7de9fef }, { 0x15dfa08b, 0x7ceca7d8, 0xd9922576, 0x7eb027ab, 0xf6f7853c, 0xda7d8d53, 0xbe42dc12, 0xdea83eaa }, { 0x93ce25aa, 0xdaef5fc0, 0xd86902bd, 0xa5194a17, 0xfd43f65a, 0x33664d97, 0xf908731a, 0x6a21fd4c }, { 0x3198b435, 0xa163d09a, 0x701541db, 0x72409751, 0xbb0f1eea, 0xbf9d75f6, 0x9b54cded, 0xe26f4791 } // 42 rounds... }; __device__ __forceinline__ static void SWAP4(uint32_t *x) { #pragma nounroll // y is used as tmp register too for (uint32_t y = 0; y<4; y++, ++x) { asm("and.b32 %1, %0, 0xF0F0F0F0;" "xor.b32 %0, %0, %1;" "shr.b32 %1, %1, 4;" "vshl.u32.u32.u32.clamp.add %0, %0, 4, %1;\n\t" : "+r"(*x) : "r"(y)); } } __device__ __forceinline__ static void SWAP2(uint32_t *x) { #pragma nounroll // y is used as tmp register too for (uint32_t y = 0; y<4; y++, ++x) { asm("and.b32 %1, %0, 0xCCCCCCCC;" "xor.b32 %0, %0, %1;" "shr.b32 %1, %1, 2;" "vshl.u32.u32.u32.clamp.add %0, %0, 2, %1;\n\t" : "+r"(*x) : "r"(y)); } } __device__ __forceinline__ static void SWAP1(uint32_t *x) { #pragma nounroll // y is used as tmp register too for (uint32_t y = 0; y<4; y++, ++x) { asm("and.b32 %1, %0, 0xAAAAAAAA;" "xor.b32 %0, %0, %1;" "shr.b32 %1, %1, 1;" "vshl.u32.u32.u32.clamp.add %0, %0, 1, %1;\n\t" : "+r"(*x) : "r"(y)); } } /*swapping bits 16i||16i+1||......||16i+7 with bits 16i+8||16i+9||......||16i+15 of 32-bit x*/ //#define SWAP8(x) (x) = ((((x) & 0x00ff00ffUL) << 8) | (((x) & 0xff00ff00UL) >> 8)); #define SWAP8(x) (x) = __byte_perm(x, x, 0x2301); /*swapping bits 32i||32i+1||......||32i+15 with bits 32i+16||32i+17||......||32i+31 of 32-bit x*/ //#define SWAP16(x) (x) = ((((x) & 0x0000ffffUL) << 16) | (((x) & 0xffff0000UL) >> 16)); #define SWAP16(x) (x) = __byte_perm(x, x, 0x1032); /*The MDS transform*/ #define L(m0,m1,m2,m3,m4,m5,m6,m7) \ (m4) ^= (m1); \ (m5) ^= (m2); \ (m6) ^= (m0) ^ (m3); \ (m7) ^= (m0); \ (m0) ^= (m5); \ (m1) ^= (m6); \ (m2) ^= (m4) ^ (m7); \ (m3) ^= (m4); /*The Sbox*/ #define Sbox(m0,m1,m2,m3,cc) \ m3 = ~(m3); \ m0 ^= ((~(m2)) & (cc)); \ temp0 = (cc) ^ ((m0) & (m1));\ m0 ^= ((m2) & (m3)); \ m3 ^= ((~(m1)) & (m2)); \ m1 ^= ((m0) & (m2)); \ m2 ^= ((m0) & (~(m3))); \ m0 ^= ((m1) | (m3)); \ m3 ^= ((m1) & (m2)); \ m1 ^= (temp0 & (m0)); \ m2 ^= temp0; __device__ __forceinline__ static void Sbox_and_MDS_layer(uint32_t x[8][4], const int rnd) { uint2* cc = (uint2*)&c_E8_bslice32[rnd]; // Sbox and MDS layer #pragma unroll for (int i = 0; i < 4; i++, ++cc) { uint32_t temp0; Sbox(x[0][i], x[2][i], x[4][i], x[6][i], cc->x); Sbox(x[1][i], x[3][i], x[5][i], x[7][i], cc->y); L(x[0][i], x[2][i], x[4][i], x[6][i], x[1][i], x[3][i], x[5][i], x[7][i]); } } static __device__ __forceinline__ void RoundFunction0(uint32_t x[8][4], uint32_t roundnumber) { Sbox_and_MDS_layer(x, roundnumber); #pragma unroll 4 for (int j = 1; j < 8; j = j+2) { SWAP1(x[j]); } } static __device__ __forceinline__ void RoundFunction1(uint32_t x[8][4], uint32_t roundnumber) { Sbox_and_MDS_layer(x, roundnumber); #pragma unroll 4 for (int j = 1; j < 8; j = j+2) { SWAP2(x[j]); } } static __device__ __forceinline__ void RoundFunction2(uint32_t x[8][4], uint32_t roundnumber) { Sbox_and_MDS_layer(x, roundnumber); #pragma unroll 4 for (int j = 1; j < 8; j = j+2) { SWAP4(x[j]); } } static __device__ __forceinline__ void RoundFunction3(uint32_t x[8][4], uint32_t roundnumber) { Sbox_and_MDS_layer(x, roundnumber); #pragma unroll 4 for (int j = 1; j < 8; j = j+2) { #pragma unroll 4 for (int i = 0; i < 4; i++) SWAP8(x[j][i]); } } static __device__ __forceinline__ void RoundFunction4(uint32_t x[8][4], uint32_t roundnumber) { Sbox_and_MDS_layer(x, roundnumber); #pragma unroll 4 for (int j = 1; j < 8; j = j+2) { #pragma unroll 4 for (int i = 0; i < 4; i++) SWAP16(x[j][i]); } } static __device__ __forceinline__ void RoundFunction5(uint32_t x[8][4], uint32_t roundnumber) { uint32_t temp0; Sbox_and_MDS_layer(x, roundnumber); #pragma unroll 4 for (int j = 1; j < 8; j = j+2) { #pragma unroll 2 for (int i = 0; i < 4; i = i+2) { temp0 = x[j][i]; x[j][i] = x[j][i+1]; x[j][i+1] = temp0; } } } static __device__ __forceinline__ void RoundFunction6(uint32_t x[8][4], uint32_t roundnumber) { uint32_t temp0; Sbox_and_MDS_layer(x, roundnumber); #pragma unroll 4 for (int j = 1; j < 8; j = j+2) { #pragma unroll 2 for (int i = 0; i < 2; i++) { temp0 = x[j][i]; x[j][i] = x[j][i+2]; x[j][i+2] = temp0; } } } /*The bijective function E8, in bitslice form */ static __device__ __forceinline__ void E8(uint32_t x[8][4]) { /*perform 6 rounds*/ #pragma unroll 1 for (int i = 0; i < 42; i+=7) { RoundFunction0(x, i); RoundFunction1(x, i + 1); RoundFunction2(x, i + 2); RoundFunction3(x, i + 3); RoundFunction4(x, i + 4); RoundFunction5(x, i + 5); RoundFunction6(x, i + 6); } } #define U32TO64_LE(p) \ (((uint64_t)(*p)) | (((uint64_t)(*(p + 1))) << 32)) #define U64TO32_LE(p, v) \ *p = (uint32_t)((v)); *(p+1) = (uint32_t)((v) >> 32); __constant__ uint2 c_keccak_round_constants[24] = { { 0x00000001ul, 0x00000000 }, { 0x00008082ul, 0x00000000 }, { 0x0000808aul, 0x80000000 }, { 0x80008000ul, 0x80000000 }, { 0x0000808bul, 0x00000000 }, { 0x80000001ul, 0x00000000 }, { 0x80008081ul, 0x80000000 }, { 0x00008009ul, 0x80000000 }, { 0x0000008aul, 0x00000000 }, { 0x00000088ul, 0x00000000 }, { 0x80008009ul, 0x00000000 }, { 0x8000000aul, 0x00000000 }, { 0x8000808bul, 0x00000000 }, { 0x0000008bul, 0x80000000 }, { 0x00008089ul, 0x80000000 }, { 0x00008003ul, 0x80000000 }, { 0x00008002ul, 0x80000000 }, { 0x00000080ul, 0x80000000 }, { 0x0000800aul, 0x00000000 }, { 0x8000000aul, 0x80000000 }, { 0x80008081ul, 0x80000000 }, { 0x00008080ul, 0x80000000 }, { 0x80000001ul, 0x00000000 }, { 0x80008008ul, 0x80000000 } }; #define bitselect(a, b, c) ((a) ^ ((c) & ((b) ^ (a)))) __global__ __launch_bounds__(256,3) void quark_jh512Keccak512_gpu_hash_64(uint32_t threads, uint32_t startNounce, uint32_t *g_hash) { const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { const uint32_t nounce = (startNounce + thread); const int hashPosition = nounce - startNounce; uint32_t *Hash = &g_hash[16 * hashPosition]; uint32_t x[8][4] = { { 0x964bd16f, 0x17aa003e, 0x052e6a63, 0x43d5157a }, { 0x8d5e228a, 0x0bef970c, 0x591234e9, 0x61c3b3f2 }, { 0xc1a01d89, 0x1e806f53, 0x6b05a92a, 0x806d2bea }, { 0xdbcc8e58, 0xa6ba7520, 0x763a0fa9, 0xf73bf8ba }, { 0x05e66901, 0x694ae341, 0x8e8ab546, 0x5ae66f2e }, { 0xd0a74710, 0x243c84c1, 0xb1716e3b, 0x99c15a2d }, { 0xecf657cf, 0x56f8b19d, 0x7c8806a7, 0x56b11657 }, { 0xdffcc2e3, 0xfb1785e6, 0x78465a54, 0x4bdd8ccc } }; uint32_t msg[16]; uint28 *phash = (uint28*)Hash; uint28 *outpt = (uint28*)msg; outpt[0] = phash[0]; outpt[1] = phash[1]; #pragma unroll 16 for (int i = 0; i < 16; i++) x[i >> 2][i & 3] ^= (msg)[i]; E8(x); #pragma unroll 16 for (int i = 0; i < 16; i++) x[(16 + i) >> 2][(16 + i) & 3] ^= (msg)[i]; x[0 >> 2][0 & 3] ^= 0x80; x[15 >> 2][15 & 3] ^= 0x00020000; E8(x); x[(16 + 0) >> 2][(16 + 0) & 3] ^= 0x80; x[(16 + 15) >> 2][(16 + 15) & 3] ^= 0x00020000; uint2 s[25] { {x[4][0], x[4][1]}, {x[4][2], x[4][3]}, {x[5][0], x[5][1]}, {x[5][2], x[5][3]}, {x[6][0], x[6][1]}, {x[6][2], x[6][3]}, {x[7][0], x[7][1]}, {x[7][2], x[7][3]}, {1, 0x80000000}, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }; uint2 bc[5], tmpxor[5], tmp1, tmp2; tmpxor[0] = s[0] ^ s[5]; tmpxor[1] = s[1] ^ s[6]; tmpxor[2] = s[2] ^ s[7]; tmpxor[3] = s[3] ^ s[8]; tmpxor[4] = s[4]; bc[0] = tmpxor[0] ^ ROL2(tmpxor[2], 1); bc[1] = tmpxor[1] ^ ROL2(tmpxor[3], 1); bc[2] = tmpxor[2] ^ ROL2(tmpxor[4], 1); bc[3] = tmpxor[3] ^ ROL2(tmpxor[0], 1); bc[4] = tmpxor[4] ^ ROL2(tmpxor[1], 1); tmp1 = s[1] ^ bc[0]; s[0] = s[0] ^ bc[4]; s[1] = ROL2(s[6] ^ bc[0], 44); s[6] = ROL2(bc[3], 20); s[9] = ROL2(bc[1], 61); s[22] = ROL2(bc[3], 39); s[14] = ROL2(bc[4], 18); s[20] = ROL2(s[2] ^ bc[1], 62); s[2] = ROL2(bc[1], 43); s[12] = ROL2(bc[2], 25); s[13] = ROL8(bc[3]); s[19] = ROR8(bc[2]); s[23] = ROL2(bc[4], 41); s[15] = ROL2(s[4] ^ bc[3], 27); s[4] = ROL2(bc[3], 14); s[24] = ROL2(bc[0], 2); s[21] = ROL2(s[8] ^ bc[2], 55); s[8] = ROL2(bc[0], 45); s[16] = ROL2(s[5] ^ bc[4], 36); s[5] = ROL2(s[3] ^ bc[2], 28); s[3] = ROL2(bc[2], 21); s[18] = ROL2(bc[1], 15); s[17] = ROL2(bc[0], 10); s[11] = ROL2(s[7] ^ bc[1], 6); s[7] = ROL2(bc[4], 3); s[10] = ROL2(tmp1, 1); tmp1 = s[0]; tmp2 = s[1]; s[0] = bitselect(s[0] ^ s[2], s[0], s[1]); s[1] = bitselect(s[1] ^ s[3], s[1], s[2]); s[2] = bitselect(s[2] ^ s[4], s[2], s[3]); s[3] = bitselect(s[3] ^ tmp1, s[3], s[4]); s[4] = bitselect(s[4] ^ tmp2, s[4], tmp1); s[0].x ^= 1; tmp1 = s[5]; tmp2 = s[6]; s[5] = bitselect(s[5] ^ s[7], s[5], s[6]); s[6] = bitselect(s[6] ^ s[8], s[6], s[7]); s[7] = bitselect(s[7] ^ s[9], s[7], s[8]); s[8] = bitselect(s[8] ^ tmp1, s[8], s[9]); s[9] = bitselect(s[9] ^ tmp2, s[9], tmp1); tmp1 = s[10]; tmp2 = s[11]; s[10] = bitselect(s[10] ^ s[12], s[10], s[11]); s[11] = bitselect(s[11] ^ s[13], s[11], s[12]); s[12] = bitselect(s[12] ^ s[14], s[12], s[13]); s[13] = bitselect(s[13] ^ tmp1, s[13], s[14]); s[14] = bitselect(s[14] ^ tmp2, s[14], tmp1); tmp1 = s[15]; tmp2 = s[16]; s[15] = bitselect(s[15] ^ s[17], s[15], s[16]); s[16] = bitselect(s[16] ^ s[18], s[16], s[17]); s[17] = bitselect(s[17] ^ s[19], s[17], s[18]); s[18] = bitselect(s[18] ^ tmp1, s[18], s[19]); s[19] = bitselect(s[19] ^ tmp2, s[19], tmp1); tmp1 = s[20]; tmp2 = s[21]; s[20] = bitselect(s[20] ^ s[22], s[20], s[21]); s[21] = bitselect(s[21] ^ s[23], s[21], s[22]); s[22] = bitselect(s[22] ^ s[24], s[22], s[23]); s[23] = bitselect(s[23] ^ tmp1, s[23], s[24]); s[24] = bitselect(s[24] ^ tmp2, s[24], tmp1); #pragma nounroll for (int i = 1; i < 23; ++i) { #pragma unroll for (uint32_t x = 0; x < 5; x++) tmpxor[x] = s[x] ^ s[x + 5] ^ s[x + 10] ^ s[x + 15] ^ s[x + 20]; bc[0] = tmpxor[0] ^ ROL2(tmpxor[2], 1); bc[1] = tmpxor[1] ^ ROL2(tmpxor[3], 1); bc[2] = tmpxor[2] ^ ROL2(tmpxor[4], 1); bc[3] = tmpxor[3] ^ ROL2(tmpxor[0], 1); bc[4] = tmpxor[4] ^ ROL2(tmpxor[1], 1); tmp1 = s[1] ^ bc[0]; s[0] ^= bc[4]; s[1] = ROL2(s[6] ^ bc[0], 44); s[6] = ROL2(s[9] ^ bc[3], 20); s[9] = ROL2(s[22] ^ bc[1], 61); s[22] = ROL2(s[14] ^ bc[3], 39); s[14] = ROL2(s[20] ^ bc[4], 18); s[20] = ROL2(s[2] ^ bc[1], 62); s[2] = ROL2(s[12] ^ bc[1], 43); s[12] = ROL2(s[13] ^ bc[2], 25); s[13] = ROL8(s[19] ^ bc[3]); s[19] = ROR8(s[23] ^ bc[2]); s[23] = ROL2(s[15] ^ bc[4], 41); s[15] = ROL2(s[4] ^ bc[3], 27); s[4] = ROL2(s[24] ^ bc[3], 14); s[24] = ROL2(s[21] ^ bc[0], 2); s[21] = ROL2(s[8] ^ bc[2], 55); s[8] = ROL2(s[16] ^ bc[0], 45); s[16] = ROL2(s[5] ^ bc[4], 36); s[5] = ROL2(s[3] ^ bc[2], 28); s[3] = ROL2(s[18] ^ bc[2], 21); s[18] = ROL2(s[17] ^ bc[1], 15); s[17] = ROL2(s[11] ^ bc[0], 10); s[11] = ROL2(s[7] ^ bc[1], 6); s[7] = ROL2(s[10] ^ bc[4], 3); s[10] = ROL2(tmp1, 1); tmp1 = s[0]; tmp2 = s[1]; s[0] = bitselect(s[0] ^ s[2], s[0], s[1]); s[0].x ^= c_keccak_round_constants[i].x; s[0].y ^= c_keccak_round_constants[i].y; s[1] = bitselect(s[1] ^ s[3], s[1], s[2]); s[2] = bitselect(s[2] ^ s[4], s[2], s[3]); s[3] = bitselect(s[3] ^ tmp1, s[3], s[4]); s[4] = bitselect(s[4] ^ tmp2, s[4], tmp1); tmp1 = s[5]; tmp2 = s[6]; s[5] = bitselect(s[5] ^ s[7], s[5], s[6]); s[6] = bitselect(s[6] ^ s[8], s[6], s[7]); s[7] = bitselect(s[7] ^ s[9], s[7], s[8]); s[8] = bitselect(s[8] ^ tmp1, s[8], s[9]); s[9] = bitselect(s[9] ^ tmp2, s[9], tmp1); tmp1 = s[10]; tmp2 = s[11]; s[10] = bitselect(s[10] ^ s[12], s[10], s[11]); s[11] = bitselect(s[11] ^ s[13], s[11], s[12]); s[12] = bitselect(s[12] ^ s[14], s[12], s[13]); s[13] = bitselect(s[13] ^ tmp1, s[13], s[14]); s[14] = bitselect(s[14] ^ tmp2, s[14], tmp1); tmp1 = s[15]; tmp2 = s[16]; s[15] = bitselect(s[15] ^ s[17], s[15], s[16]); s[16] = bitselect(s[16] ^ s[18], s[16], s[17]); s[17] = bitselect(s[17] ^ s[19], s[17], s[18]); s[18] = bitselect(s[18] ^ tmp1, s[18], s[19]); s[19] = bitselect(s[19] ^ tmp2, s[19], tmp1); tmp1 = s[20]; tmp2 = s[21]; s[20] = bitselect(s[20] ^ s[22], s[20], s[21]); s[21] = bitselect(s[21] ^ s[23], s[21], s[22]); s[22] = bitselect(s[22] ^ s[24], s[22], s[23]); s[23] = bitselect(s[23] ^ tmp1, s[23], s[24]); s[24] = bitselect(s[24] ^ tmp2, s[24], tmp1); } #pragma unroll for (uint32_t x = 0; x < 5; x++) tmpxor[x] = s[x] ^ s[x + 5] ^ s[x + 10] ^ s[x + 15] ^ s[x + 20]; bc[0] = tmpxor[0] ^ ROL2(tmpxor[2], 1); bc[1] = tmpxor[1] ^ ROL2(tmpxor[3], 1); bc[2] = tmpxor[2] ^ ROL2(tmpxor[4], 1); bc[3] = tmpxor[3] ^ ROL2(tmpxor[0], 1); bc[4] = tmpxor[4] ^ ROL2(tmpxor[1], 1); tmp1 = s[1] ^ bc[0]; s[0] ^= bc[4]; s[1] = ROL2(s[6] ^ bc[0], 44); s[6] = ROL2(s[9] ^ bc[3], 20); s[9] = ROL2(s[22] ^ bc[1], 61); s[22] = ROL2(s[14] ^ bc[3], 39); s[14] = ROL2(s[20] ^ bc[4], 18); s[20] = ROL2(s[2] ^ bc[1], 62); s[2] = ROL2(s[12] ^ bc[1], 43); s[12] = ROL2(s[13] ^ bc[2], 25); s[13] = ROL8(s[19] ^ bc[3]); s[19] = ROR8(s[23] ^ bc[2]); s[23] = ROL2(s[15] ^ bc[4], 41); s[15] = ROL2(s[4] ^ bc[3], 27); s[4] = ROL2(s[24] ^ bc[3], 14); s[24] = ROL2(s[21] ^ bc[0], 2); s[21] = ROL2(s[8] ^ bc[2], 55); s[8] = ROL2(s[16] ^ bc[0], 45); s[16] = ROL2(s[5] ^ bc[4], 36); s[5] = ROL2(s[3] ^ bc[2], 28); s[3] = ROL2(s[18] ^ bc[2], 21); s[18] = ROL2(s[17] ^ bc[1], 15); s[17] = ROL2(s[11] ^ bc[0], 10); s[11] = ROL2(s[7] ^ bc[1], 6); s[7] = ROL2(s[10] ^ bc[4], 3); s[10] = ROL2(tmp1, 1); tmp1 = s[0]; tmp2 = s[1]; s[0] = bitselect(s[0] ^ s[2], s[0], s[1]); s[0].x ^= 0x80008008ul; s[0].y ^= 0x80000000; s[1] = bitselect(s[1] ^ s[3], s[1], s[2]); s[2] = bitselect(s[2] ^ s[4], s[2], s[3]); s[3] = bitselect(s[3] ^ tmp1, s[3], s[4]); s[4] = bitselect(s[4] ^ tmp2, s[4], tmp1); // tmp1 = s[5]; tmp2 = s[6]; s[5] = bitselect(s[5] ^ s[7], s[5], s[6]); s[6] = bitselect(s[6] ^ s[8], s[6], s[7]); s[7] = bitselect(s[7] ^ s[9], s[7], s[8]); // s[8] = bitselect(s[8] ^ tmp1, s[8], s[9]); //s[9] = bitselect(s[9] ^ tmp2, s[9], tmp1); // tmp1 = s[10]; tmp2 = s[11]; s[10] = bitselect(s[10] ^ s[12], s[10], s[11]); s[11] = bitselect(s[11] ^ s[13], s[11], s[12]); s[12] = bitselect(s[12] ^ s[14], s[12], s[13]); s[13] = bitselect(s[13] ^ tmp1, s[13], s[14]); s[14] = bitselect(s[14] ^ tmp2, s[14], tmp1); // tmp1 = s[15]; tmp2 = s[16]; s[15] = bitselect(s[15] ^ s[17], s[15], s[16]); s[16] = bitselect(s[16] ^ s[18], s[16], s[17]); s[17] = bitselect(s[17] ^ s[19], s[17], s[18]); s[18] = bitselect(s[18] ^ tmp1, s[18], s[19]); s[19] = bitselect(s[19] ^ tmp2, s[19], tmp1); // tmp1 = s[20]; tmp2 = s[21]; s[20] = bitselect(s[20] ^ s[22], s[20], s[21]); s[21] = bitselect(s[21] ^ s[23], s[21], s[22]); s[22] = bitselect(s[22] ^ s[24], s[22], s[23]); s[23] = bitselect(s[23] ^ tmp1, s[23], s[24]); s[24] = bitselect(s[24] ^ tmp2, s[24], tmp1); uint2 *outputhash = (uint2 *)Hash; outputhash[0] = s[0]; outputhash[1] = s[1]; outputhash[2] = s[2]; outputhash[3] = s[3]; outputhash[4] = s[4]; outputhash[5] = s[5]; outputhash[6] = s[6]; outputhash[7] = s[7]; } } __host__ void cuda_jh512Keccak512_cpu_hash_64(uint32_t threads, uint32_t startNounce, uint32_t *d_hash) { const uint32_t threadsperblock = 256; // berechne wie viele Thread Blocks wir brauchen dim3 grid((threads + threadsperblock-1)/threadsperblock); dim3 block(threadsperblock); quark_jh512Keccak512_gpu_hash_64 << <grid, block>> >(threads, startNounce, d_hash); }
the_stack
* * This example demostrates how to use textures bound to pitch linear memory. * It performs a shift of matrix elements using wrap addressing mode (aka * periodic boundary conditions) on two arrays, a pitch linear and a CUDA array, * in order to highlight the differences in using each. * * Textures binding to pitch linear memory is a new feature in CUDA 2.2, * and allows use of texture features such as wrap addressing mode and * filtering which are not possible with textures bound to regular linear memory */ #include <shrUtils.h> // includes, system #include <stdio.h> // includes, project #include <sdkHelper.h> // helper for shared that are common to CUDA SDK samples #include <shrQATest.h> // This is for automated testing output (--qatest) // includes CUDA #include <cutil.h> #include <cuda_runtime.h> //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors( cudaError err, const char *file, const int line ) { if( cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError( const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // General GPU Device CUDA Initialization int gpuDeviceInit(int devID) { int deviceCount; checkCudaErrors(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n"); exit(-1); } if (devID < 0) devID = 0; if (devID > deviceCount-1) { fprintf(stderr, "\n"); fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount); fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID); fprintf(stderr, "\n"); return -devID; } cudaDeviceProp deviceProp; checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) ); if (deviceProp.major < 1) { fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n"); exit(-1); \ } checkCudaErrors( cudaSetDevice(devID) ); printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name); return devID; } // This function returns the best GPU (with maximum GFLOPS) int gpuGetMaxGflopsDeviceId() { int current_device = 0, sm_per_multiproc = 0; int max_compute_perf = 0, max_perf_device = 0; int device_count = 0, best_SM_arch = 0; cudaDeviceProp deviceProp; cudaGetDeviceCount( &device_count ); // Find the best major SM Architecture GPU device while ( current_device < device_count ) { cudaGetDeviceProperties( &deviceProp, current_device ); if (deviceProp.major > 0 && deviceProp.major < 9999) { best_SM_arch = MAX(best_SM_arch, deviceProp.major); } current_device++; } // Find the best CUDA capable GPU device current_device = 0; while( current_device < device_count ) { cudaGetDeviceProperties( &deviceProp, current_device ); if (deviceProp.major == 9999 && deviceProp.minor == 9999) { sm_per_multiproc = 1; } else { sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor); } int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate; if( compute_perf > max_compute_perf ) { // If we find GPU with SM major > 2, search only these if ( best_SM_arch > 2 ) { // If our device==dest_SM_arch, choose this, or else pass if (deviceProp.major == best_SM_arch) { max_compute_perf = compute_perf; max_perf_device = current_device; } } else { max_compute_perf = compute_perf; max_perf_device = current_device; } } ++current_device; } return max_perf_device; } // Initialization code to find the best CUDA Device int findCudaDevice(int argc, const char **argv) { cudaDeviceProp deviceProp; int devID = 0; // If the command-line has a device number specified, use it if (checkCmdLineFlag(argc, argv, "device")) { devID = getCmdLineArgumentInt(argc, argv, "device="); if (devID < 0) { printf("Invalid command line parameters\n"); exit(-1); } else { devID = gpuDeviceInit(devID); if (devID < 0) { printf("exiting...\n"); shrQAFinishExit(argc, (const char **)argv, QA_FAILED); exit(-1); } } } else { // Otherwise pick the device with highest Gflops/s devID = gpuGetMaxGflopsDeviceId(); checkCudaErrors( cudaSetDevice( devID ) ); checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) ); printf("> Using CUDA device [%d]: %s\n", devID, deviceProp.name); } return devID; } // end of CUDA Helper Functions #define NUM_REPS 100 // number of repetitions performed #define TILE_DIM 16 // tile/block size // Texture references texture<float, 2, cudaReadModeElementType> texRefPL; texture<float, 2, cudaReadModeElementType> texRefArray; // ------- // kernels // ------- // // NB: (1) The second argument "pitch" is in elements, not bytes // (2) normalized coordinates are used (required for wrap address mode) __global__ void shiftPitchLinear(float* odata, int pitch, int width, int height, int shiftX, int shiftY) { int xid = blockIdx.x * blockDim.x + threadIdx.x; int yid = blockIdx.y * blockDim.y + threadIdx.y; odata[yid*pitch+xid] = tex2D(texRefPL, (xid + shiftX)/(float)width, (yid + shiftY)/(float)height); } __global__ void shiftArray(float* odata, int pitch, int width, int height, int shiftX, int shiftY) { int xid = blockIdx.x * blockDim.x + threadIdx.x; int yid = blockIdx.y * blockDim.y + threadIdx.y; odata[yid*pitch+xid] = tex2D(texRefArray, (xid + shiftX)/(float)width, (yid + shiftY)/(float)height); } // ---- // main // ---- int main( int argc, char** argv) { // set array size const int nx = 2048; const int ny = 2048; // shifts applied to x and y data const int x_shift = 5; const int y_shift = 7; shrQAStart(argc, argv); if ((nx%TILE_DIM != 0) || (ny%TILE_DIM != 0)) { printf("nx and ny must be multiples of TILE_DIM\n"); shrQAFinishExit(argc, (const char **)argv, QA_WAIVED); } // execution configuration parameters dim3 grid(nx/TILE_DIM, ny/TILE_DIM), threads(TILE_DIM, TILE_DIM); // This will pick the best possible CUDA capable device int devID = findCudaDevice((const int)argc, (const char **)argv); // CUDA events for timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // ---------------------------------- // Host allocation and initialization // ---------------------------------- float *h_idata = (float*) malloc(sizeof(float)*nx*ny); float *h_odata = (float*) malloc(sizeof(float)*nx*ny); float *gold = (float*) malloc(sizeof(float)*nx*ny); for(int i = 0; i < nx*ny; ++i) h_idata[i] = (float) i; // ------------------------ // Device memory allocation // ------------------------ // Pitch linear input data float *d_idataPL; size_t d_pitchBytes; checkCudaErrors(cudaMallocPitch((void**) &d_idataPL, &d_pitchBytes, nx*sizeof(float), ny)); // Array input data cudaArray *d_idataArray; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); checkCudaErrors(cudaMallocArray(&d_idataArray, &channelDesc, nx, ny)); // Pitch linear output data float *d_odata; checkCudaErrors(cudaMallocPitch( (void**) &d_odata, &d_pitchBytes, nx*sizeof(float), ny)); // ------------------------ // copy host data to device // ------------------------ // Pitch linear size_t h_pitchBytes = nx*sizeof(float); checkCudaErrors(cudaMemcpy2D(d_idataPL, d_pitchBytes, h_idata, h_pitchBytes, nx*sizeof(float), ny, cudaMemcpyHostToDevice)); // Array checkCudaErrors(cudaMemcpyToArray(d_idataArray, 0, 0, h_idata, nx*ny*sizeof(float), cudaMemcpyHostToDevice)); // ---------------------- // Bind texture to memory // ---------------------- // Pitch linear texRefPL.normalized = 1; texRefPL.filterMode = cudaFilterModePoint; texRefPL.addressMode[0] = cudaAddressModeWrap; texRefPL.addressMode[1] = cudaAddressModeWrap; checkCudaErrors(cudaBindTexture2D(0, &texRefPL, d_idataPL, &channelDesc, nx, ny, d_pitchBytes)); // Array texRefArray.normalized = 1; texRefArray.filterMode = cudaFilterModePoint; texRefArray.addressMode[0] = cudaAddressModeWrap; texRefArray.addressMode[1] = cudaAddressModeWrap; checkCudaErrors(cudaBindTextureToArray(texRefArray, d_idataArray, channelDesc)); // --------------------- // reference calculation // --------------------- for (int j = 0; j < ny; j++) { int jshift = (j+y_shift)%ny; for (int i = 0; i < nx; i++) { int ishift = (i+x_shift)%nx; gold[j*nx + i] = h_idata[jshift*nx + ishift]; } } // ---------------- // shiftPitchLinear // ---------------- checkCudaErrors(cudaMemset2D(d_odata, d_pitchBytes, 0, nx*sizeof(float), ny)); checkCudaErrors(cudaEventRecord(start, 0)); for (int i=0; i < NUM_REPS; i++) { shiftPitchLinear<<<grid, threads>>>(d_odata, (int)(d_pitchBytes/sizeof(float)), nx, ny, x_shift, y_shift); } checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaEventSynchronize(stop)); float timePL; checkCudaErrors(cudaEventElapsedTime(&timePL, start, stop)); // check results checkCudaErrors(cudaMemcpy2D(h_odata, h_pitchBytes, d_odata, d_pitchBytes, nx*sizeof(float), ny, cudaMemcpyDeviceToHost)); bool res = compareData(gold, h_odata, nx*ny, 0.0f, 0.15f); bool success = true; if (res == false) { printf("*** shiftPitchLinear failed ***\n"); success = false; } // ---------- // shiftArray // ---------- checkCudaErrors(cudaMemset2D(d_odata, d_pitchBytes, 0, nx*sizeof(float), ny)); checkCudaErrors(cudaEventRecord(start, 0)); for (int i=0; i < NUM_REPS; i++) { shiftArray<<<grid, threads>>>(d_odata, (int)(d_pitchBytes/sizeof(float)), nx, ny, x_shift, y_shift); } checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaEventSynchronize(stop)); float timeArray; checkCudaErrors(cudaEventElapsedTime(&timeArray, start, stop)); // check results checkCudaErrors(cudaMemcpy2D(h_odata, h_pitchBytes, d_odata, d_pitchBytes, nx*sizeof(float), ny, cudaMemcpyDeviceToHost)); res = compareData(gold, h_odata, nx*ny, 0.0f, 0.15f); if (res == CUTFalse) { printf("*** shiftArray failed ***\n"); success = CUTFalse; } float bandwidthPL = 2.f*1000.f*nx*ny*sizeof(float)/(1.e+9f)/(timePL/NUM_REPS); float bandwidthArray = 2.f*1000.f*nx*ny*sizeof(float)/(1.e+9f)/(timeArray/NUM_REPS); printf("\nBandwidth (GB/s) for pitch linear: %.2e; for array: %.2e\n", bandwidthPL, bandwidthArray); float fetchRatePL = nx*ny/1.e+6f/(timePL/(1000.0f*NUM_REPS)); float fetchRateArray = nx*ny/1.e+6f/(timeArray/(1000.0f*NUM_REPS)); printf("\nTexture fetch rate (Mpix/s) for pitch linear: %.2e; for array: %.2e\n\n", fetchRatePL, fetchRateArray); // cleanup free(h_idata); free(h_odata); free(gold); checkCudaErrors(cudaUnbindTexture(texRefPL)); checkCudaErrors(cudaUnbindTexture(texRefArray)); checkCudaErrors(cudaFree(d_idataPL)); checkCudaErrors(cudaFreeArray(d_idataArray)); checkCudaErrors(cudaFree(d_odata)); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); cudaDeviceReset(); shrQAFinishExit(argc, (const char **)argv, (success == CUTTrue) ? QA_PASSED : QA_FAILED); }
the_stack
#define FMI_ALIGNMENT 4u namespace nvbio { namespace io { ///@addtogroup IO ///@{ ///@addtogroup FMIndexIO ///@{ namespace { // anonymous namespace ///@addtogroup FMIndexIODetail ///@{ template <typename T> uint64 block_fread(T* dst, const uint64 n, FILE* file) { #if defined(WIN32) // use blocked reads on Windows, which seems to otherwise become less responsive while reading. const uint64 BATCH_SIZE = 16*1024*1024; for (uint64 batch_begin = 0; batch_begin < n; batch_begin += BATCH_SIZE) { const uint64 batch_end = nvbio::min( batch_begin + BATCH_SIZE, n ); const uint64 batch_size = batch_end - batch_begin; const uint64 n_words = fread( dst + batch_begin, sizeof(T), batch_size, file ); if (n_words != batch_size) return batch_begin + n_words; } return n; #else return fread( dst, sizeof(T), n, file ); #endif } struct file_mismatch {}; struct VectorAllocator { VectorAllocator(nvbio::vector<host_tag,uint32>& vec) : m_vec( vec ) {} uint32* alloc(const uint32 words) { m_vec.resize( words ); return raw_pointer( m_vec ); } nvbio::vector<host_tag,uint32>& m_vec; }; struct MMapAllocator { MMapAllocator( const char* name, ServerMappedFile& mmap) : m_name( name ), m_mmap( mmap ) {} uint32* alloc(const uint32 words) { return (uint32*)m_mmap.init( m_name, words * sizeof(uint32), NULL ); } const char* m_name; ServerMappedFile& m_mmap; }; template <typename Allocator> uint32* load_bwt( const char* bwt_file_name, Allocator& allocator, uint32& seq_length, uint32& seq_words, uint32& primary) { FILE* bwt_file = fopen( bwt_file_name, "rb" ); if (bwt_file == NULL) { log_warning(stderr, "unable to open bwt \"%s\"\n", bwt_file_name); return 0; } uint32 field; if (!fread( &field, sizeof(field), 1, bwt_file )) { log_error(stderr, "error: failed reading bwt \"%s\"\n", bwt_file_name); return 0; } primary = uint32(field); // discard frequencies seq_length = 0; for (uint32 i = 0; i < 4; ++i) { if (!fread( &field, sizeof(field), 1, bwt_file )) { log_error(stderr, "error: failed reading bwt \"%s\"\n", bwt_file_name); return 0; } // the sum of the frequencies gives the total length if (i == 3) seq_length = uint32(field); } // compute the number of words needed to store the sequence seq_words = util::divide_ri( seq_length, FMIndexDataCore::BWT_SYMBOLS_PER_WORD ); // pad the size to a multiple of 4 seq_words = align<4>( seq_words ); // allocate the stream storage uint32* bwt_stream = allocator.alloc( seq_words ); const uint32 n_words = (uint32)block_fread( bwt_stream, seq_words, bwt_file ); if (align<4>( n_words ) != seq_words) { log_error(stderr, "error: failed reading bwt \"%s\"\n", bwt_file_name); return 0; } // initialize the slack due to sequence padding for (uint32 i = n_words; i < seq_words; ++i) bwt_stream[i] = 0u; fclose( bwt_file ); return bwt_stream; } template <typename Allocator> uint32* load_sa( const char* sa_file_name, Allocator& allocator, const uint32 seq_length, const uint32 primary, const uint32 SA_INT) { uint32* ssa = NULL; FILE* sa_file = fopen( sa_file_name, "rb" ); if (sa_file != NULL) { log_info(stderr, "reading SSA... started\n"); try { uint32 field; if (!fread( &field, sizeof(field), 1, sa_file )) { log_error(stderr, "error: failed reading SSA \"%s\"\n", sa_file_name); return 0; } if (field != primary) { log_error(stderr, "SA file mismatch \"%s\"\n expected primary %u, got %u\n", sa_file_name, primary, field); throw file_mismatch(); } for (uint32 i = 0; i < 4; ++i) { if (!fread( &field, sizeof(field), 1, sa_file )) { log_error(stderr, "error: failed reading SSA \"%s\"\n", sa_file_name); return 0; } } if (!fread( &field, sizeof(field), 1, sa_file )) { log_error(stderr, "error: failed reading SSA \"%s\"\n", sa_file_name); return 0; } if (field != SA_INT) { log_error(stderr, "unsupported SA interval (found %u, expected %u)\n", field, SA_INT); throw file_mismatch(); } if(!fread( &field, sizeof(field), 1, sa_file )) { log_error(stderr, "error: failed reading SSA \"%s\"\n", sa_file_name); return 0; } if (field != seq_length) { log_error(stderr, "SA file mismatch \"%s\"\n expected length %u, got %u", sa_file_name, seq_length, field); throw file_mismatch(); } const uint32 sa_size = (seq_length + SA_INT) / SA_INT; ssa = allocator.alloc( sa_size ); ssa[0] = uint32(-1); if (!fread( &ssa[1], sizeof(uint32), sa_size-1, sa_file )) { log_error(stderr, "error: failed reading SSA \"%s\"\n", sa_file_name); return 0; } } catch (...) { // just skip the ssa file } fclose( sa_file ); log_info(stderr, "reading SSA... done\n"); } return ssa; } template <typename Allocator> uint32* build_occurrence_table( const uint32 seq_length, const uint32 seq_words, const nvbio::vector<host_tag,uint32>& bwt_vec, Allocator& allocator, uint32& bwt_occ_words, uint32* L2) { typedef PackedStream<const uint32*,uint8,FMIndexDataCore::BWT_BITS,FMIndexDataCore::BWT_BIG_ENDIAN> stream_type; // build a bwt stream stream_type bwt( raw_pointer( bwt_vec ) ); // compute the number of words needed to store the occurrences const uint32 occ_words = util::divide_ri( seq_length, FMIndexDataCore::OCC_INT ) * 4; // build the occurrence table nvbio::vector<host_tag,uint32> occ_vec( occ_words, 0u ); uint32 cnt[4]; nvbio::build_occurrence_table<FMIndexDataCore::BWT_BITS,FMIndexDataCore::OCC_INT>( bwt, bwt + seq_length, raw_pointer( occ_vec ), cnt ); if (occ_words != seq_words) { log_error(stderr, "error: bwt size != occurrence table size!\n words: %u, %u\n", seq_words, occ_words); return 0; } if ((seq_words % 4u) != 0) { log_error(stderr, "error: occ size not a multiple of 4\n words: %u\n", seq_words); return 0; } if ((occ_words % 4u) != 0) { log_error(stderr, "error: occ size not a multiple of 4\n words: %u\n", occ_words); return 0; } // fuse the BWT & OCC vectors bwt_occ_words = seq_words + occ_words; uint32* bwt_occ = allocator.alloc( bwt_occ_words ); #if defined(_OPENMP) #pragma omp parallel for #endif for (int64 w = 0; w < int64( seq_words ); w += 4) { bwt_occ[ w*2+0 ] = bwt_vec[ w+0 ]; bwt_occ[ w*2+1 ] = bwt_vec[ w+1 ]; bwt_occ[ w*2+2 ] = bwt_vec[ w+2 ]; bwt_occ[ w*2+3 ] = bwt_vec[ w+3 ]; bwt_occ[ w*2+4 ] = occ_vec[ w+0 ]; bwt_occ[ w*2+5 ] = occ_vec[ w+1 ]; bwt_occ[ w*2+6 ] = occ_vec[ w+2 ]; bwt_occ[ w*2+7 ] = occ_vec[ w+3 ]; } // compute the L2 table L2[0] = 0; for (uint32 c = 0; c < 4; ++c) L2[c+1] = L2[c] + cnt[c]; return bwt_occ; } ///@} // FMIndexIODetails } // anonymous namespace // constructor // FMIndexData::FMIndexData() { } int FMIndexDataHost::load( const char* genome_prefix, const uint32 flags) { log_visible(stderr, "FMIndexData: loading... started\n"); log_visible(stderr, " genome : %s\n", genome_prefix); // initialize the core this->FMIndexDataCore::operator=( FMIndexDataCore() ); // bind pointers to static vectors m_flags = flags; m_count_table = &m_count_table_vec[0]; m_L2 = &m_L2_vec[0]; std::string bwt_string = std::string( genome_prefix ) + ".bwt"; std::string rbwt_string = std::string( genome_prefix ) + ".rbwt"; std::string sa_string = std::string( genome_prefix ) + ".sa"; std::string rsa_string = std::string( genome_prefix ) + ".rsa"; const char* bwt_file_name = bwt_string.c_str(); const char* rbwt_file_name = rbwt_string.c_str(); const char* sa_file_name = sa_string.c_str(); const char* rsa_file_name = rsa_string.c_str(); uint32 seq_length; uint32 seq_words; if (flags & FORWARD) { nvbio::vector<host_tag,uint32> bwt_vec; // read bwt log_info(stderr, "reading bwt... started\n"); { VectorAllocator allocator( bwt_vec ); if (load_bwt( bwt_file_name, allocator, seq_length, seq_words, m_primary ) == NULL) return 0; } log_info(stderr, "reading bwt... done\n"); log_verbose(stderr, " length: %u\n", seq_length); log_info(stderr, "building occurrence table... started\n"); { VectorAllocator allocator( m_bwt_occ_vec ); m_bwt_occ = build_occurrence_table( seq_length, seq_words, bwt_vec, allocator, m_bwt_occ_words, m_L2 ); } log_info(stderr, "building occurrence table... done\n"); log_info(stderr, " size: %u words\n", m_bwt_occ_words ); } if (flags & REVERSE) { nvbio::vector<host_tag,uint32> rbwt_vec; log_info(stderr, "reading rbwt... started\n"); { VectorAllocator allocator( rbwt_vec ); if (load_bwt( rbwt_file_name, allocator, seq_length, seq_words, m_rprimary ) == NULL) return 0; } log_info(stderr, "reading rbwt... done\n"); log_verbose(stderr, " length: %u\n", seq_length); log_info(stderr, "building occurrence table... started\n"); { VectorAllocator allocator( m_rbwt_occ_vec ); m_rbwt_occ = build_occurrence_table( seq_length, seq_words, rbwt_vec, allocator, m_bwt_occ_words, m_L2 ); } log_info(stderr, "building occurrence table... done\n"); } // record the sequence length m_seq_length = seq_length; if (flags & FORWARD) log_visible(stderr, " primary : %u\n", uint32(m_primary)); if (flags & REVERSE) log_visible(stderr, " rprimary : %u\n", uint32(m_rprimary)); // read ssa if (flags & SA) { if (flags & FORWARD) { VectorAllocator allocator( m_ssa_vec ); m_ssa.m_ssa = load_sa( sa_file_name, allocator, seq_length, m_primary, SA_INT ); } // read rssa if (flags & REVERSE) { VectorAllocator allocator( m_rssa_vec ); m_rssa.m_ssa = load_sa( rsa_file_name, allocator, seq_length, m_rprimary, SA_INT ); } // record the number of SA words m_sa_words = (seq_length + SA_INT) / SA_INT; } // generate the count table gen_bwt_count_table( m_count_table ); const uint32 has_fw = (m_flags & FORWARD) ? 1u : 0; const uint32 has_rev = (m_flags & REVERSE) ? 1u : 0; const uint32 has_sa = (m_flags & SA) ? 1u : 0; const uint64 memory_footprint = (has_fw + has_rev) * sizeof(uint32)*m_bwt_occ_words + has_sa * (has_fw + has_rev) * sizeof(uint32)*m_sa_words; log_visible(stderr, " memory : %.1f MB\n", float(memory_footprint)/float(1024*1024)); log_visible(stderr, "FMIndexData: loading... done\n"); return 1; } int FMIndexDataMMAPServer::load(const char* genome_prefix, const char* mapped_name) { log_visible(stderr, "FMIndexData: loading... started\n"); log_visible(stderr, " genome : %s\n", genome_prefix); std::string bwt_string = std::string( genome_prefix ) + ".bwt"; std::string rbwt_string = std::string( genome_prefix ) + ".rbwt"; std::string sa_string = std::string( genome_prefix ) + ".sa"; std::string rsa_string = std::string( genome_prefix ) + ".rsa"; const char* bwt_file_name = bwt_string.c_str(); const char* rbwt_file_name = rbwt_string.c_str(); const char* sa_file_name = sa_string.c_str(); const char* rsa_file_name = rsa_string.c_str(); std::string infoName = std::string("nvbio.") + std::string( mapped_name ) + ".info"; std::string bwtName = std::string("nvbio.") + std::string( mapped_name ) + ".bwt_occ"; std::string rbwtName = std::string("nvbio.") + std::string( mapped_name ) + ".rbwt_occ"; std::string saName = std::string("nvbio.") + std::string( mapped_name ) + ".sa"; std::string rsaName = std::string("nvbio.") + std::string( mapped_name ) + ".rsa"; // initialize the core this->FMIndexDataCore::operator=( FMIndexDataCore() ); // bind pointers to static vectors m_count_table = &m_count_table_vec[0]; m_L2 = &m_L2_vec[0]; m_flags = FORWARD | REVERSE | SA; try { uint32 seq_length; uint32 seq_words; // forward BWT { nvbio::vector<host_tag,uint32> bwt_vec; log_info(stderr, "reading bwt... started\n"); { VectorAllocator allocator( bwt_vec ); if (load_bwt( bwt_file_name, allocator, seq_length, seq_words, m_primary ) == NULL) return 0; } log_info(stderr, "reading bwt... done\n"); log_verbose(stderr, " length: %u\n", seq_length); log_info(stderr, "building occurrence table... started\n"); { MMapAllocator allocator( bwtName.c_str(), m_bwt_occ_file ); m_bwt_occ = build_occurrence_table( seq_length, seq_words, bwt_vec, allocator, m_bwt_occ_words, m_L2 ); } log_info(stderr, "building occurrence table... done\n"); } // reverse BWT { nvbio::vector<host_tag,uint32> rbwt_vec; log_info(stderr, "reading bwt... started\n"); { VectorAllocator allocator( rbwt_vec ); if (load_bwt( rbwt_file_name, allocator, seq_length, seq_words, m_rprimary ) == NULL) return 0; } log_info(stderr, "reading bwt... done\n"); log_verbose(stderr, " length: %u\n", seq_length); log_info(stderr, "building occurrence table... started\n"); { MMapAllocator allocator( rbwtName.c_str(), m_rbwt_occ_file ); m_rbwt_occ = build_occurrence_table( seq_length, seq_words, rbwt_vec, allocator, m_bwt_occ_words, m_L2 ); } log_info(stderr, "building occurrence table... done\n"); } log_visible(stderr, " primary : %u\n", uint32(m_primary)); log_visible(stderr, " rprimary : %u\n", uint32(m_rprimary)); // read ssa { MMapAllocator allocator( saName.c_str(), m_sa_file ); m_ssa.m_ssa = load_sa( sa_file_name, allocator, seq_length, m_primary, SA_INT ); } // read rssa { MMapAllocator allocator( rsaName.c_str(), m_rsa_file ); m_rssa.m_ssa = load_sa( rsa_file_name, allocator, seq_length, m_rprimary, SA_INT ); } // record the sequence length m_seq_length = seq_length; // record the number of SA words m_sa_words = has_ssa() ? (seq_length + SA_INT) / SA_INT : 0u; // generate the count table gen_bwt_count_table( m_count_table ); const uint32 has_fw = (m_flags & FORWARD) ? 1u : 0; const uint32 has_rev = (m_flags & REVERSE) ? 1u : 0; const uint32 has_sa = (m_flags & SA) ? 1u : 0; const uint64 memory_footprint = (has_fw + has_rev) * sizeof(uint32)*m_bwt_occ_words + has_sa * (has_fw + has_rev) * sizeof(uint32)*m_sa_words; log_visible(stderr, " memory : %.1f MB\n", float(memory_footprint)/float(1024*1024)); m_info.sequence_length = m_seq_length; m_info.bwt_occ_words = m_bwt_occ_words; m_info.sa_words = m_sa_words; m_info.primary = m_primary; m_info.rprimary = m_rprimary; for (uint32 i = 0; i < 5; ++i) m_info.L2[i] = m_L2[i]; m_info_file.init( infoName.c_str(), sizeof(Info), &m_info ); } catch (ServerMappedFile::mapping_error error) { log_error(stderr,"could not create file mapping object \"%s\" (error %d)\n", error.m_file_name, error.m_code ); } catch (ServerMappedFile::view_error error) { log_error(stderr, "could not map view file \"%s\" (error %d)\n", error.m_file_name, error.m_code ); } catch (...) { }; log_visible(stderr, "FMIndexData: loading... done\n"); return 1; } int FMIndexDataMMAP::load( const char* file_name) { std::string infoName = std::string("nvbio.") + std::string( file_name ) + ".info"; std::string bwtName = std::string("nvbio.") + std::string( file_name ) + ".bwt_occ"; std::string rbwtName = std::string("nvbio.") + std::string( file_name ) + ".rbwt_occ"; std::string saName = std::string("nvbio.") + std::string( file_name ) + ".sa"; std::string rsaName = std::string("nvbio.") + std::string( file_name ) + ".rsa"; // initialize the core this->FMIndexDataCore::operator=( FMIndexDataCore() ); // bind pointers to static vectors m_count_table = &m_count_table_vec[0]; m_L2 = &m_L2_vec[0]; try { const Info* info = (const Info*)m_info_file.init( infoName.c_str(), sizeof(Info) ); const uint64 bwt_file_size = info->bwt_occ_words * sizeof(uint32); const uint64 sa_file_size = info->sa_words * sizeof(uint32); m_bwt_occ = (uint32*) m_bwt_occ_file.init( bwtName.c_str(), bwt_file_size ); m_rbwt_occ = (uint32*)m_rbwt_occ_file.init( rbwtName.c_str(), bwt_file_size ); if (info->sa_words) { m_ssa.m_ssa = (uint32*) m_sa_file.init( saName.c_str(), sa_file_size ); m_rssa.m_ssa = (uint32*)m_rsa_file.init( rsaName.c_str(), sa_file_size ); m_sa_words = info->sa_words; } else { m_ssa.m_ssa = NULL; m_rssa.m_ssa = NULL; m_sa_words = 0u; } // record the core info m_seq_length = info->sequence_length; m_bwt_occ_words = info->bwt_occ_words; m_primary = info->primary; m_rprimary = info->rprimary; for (uint32 i = 0; i < 5; ++i) m_L2[i] = info->L2[i]; // generate the count table gen_bwt_count_table( m_count_table_vec ); } catch (MappedFile::mapping_error error) { log_error(stderr, "FMIndexDataMMAP: error mapping file \"%s\" (%d)!\n", error.m_file_name, error.m_code); return 0; } catch (MappedFile::view_error error) { log_error(stderr, "FMIndexDataMMAP: error viewing file \"%s\" (%d)!\n", error.m_file_name, error.m_code); return 0; } catch (...) { log_error(stderr, "FMIndexDataMMAP: error mapping file (unknown)!\n"); return 0; } return 1; } void init_ssa( const FMIndexData& driver_data, FMIndexData::ssa_storage_type& ssa, FMIndexData::ssa_storage_type& rssa) { typedef FMIndexData::ssa_storage_type SSA_type; log_info(stderr, "building SSA... started\n"); ssa = SSA_type( driver_data.partial_index() /*, SA_INT*/ ); log_info(stderr, "building SSA... done\n"); log_info(stderr, "building reverse SSA... started\n"); rssa = SSA_type( driver_data.rpartial_index() /*, SA_INT*/ ); log_info(stderr, "building reverse SSA... done\n"); } FMIndexDataDevice::FMIndexDataDevice(const FMIndexData& host_data, const uint32 flags) : m_allocated( 0u ) { // initialize the core this->FMIndexDataCore::operator=( FMIndexDataCore() ); m_seq_length = host_data.m_seq_length; m_bwt_occ_words = host_data.m_bwt_occ_words; m_sa_words = host_data.m_sa_words; m_primary = host_data.m_primary; m_rprimary = host_data.m_rprimary; m_L2_vec.resize( 5 ); m_L2 = raw_pointer( m_L2_vec ); m_count_table_vec.resize( 256 ); m_count_table = raw_pointer( m_count_table_vec ); thrust::copy( host_data.m_L2, host_data.m_L2 + 5, m_L2_vec.begin() ); thrust::copy( host_data.m_count_table, host_data.m_count_table + 256, m_count_table_vec.begin() ); if (flags & FORWARD) { if (host_data.m_bwt_occ == NULL) log_warning(stderr, "FMIndexDataDevice: requested forward BWT is not available!\n"); m_bwt_occ_vec.resize( m_bwt_occ_words ); m_bwt_occ = raw_pointer( m_bwt_occ_vec ); thrust::copy( host_data.m_bwt_occ, host_data.m_bwt_occ + m_bwt_occ_words, m_bwt_occ_vec.begin() ); m_allocated += sizeof(uint32)*( m_bwt_occ_words ); if (flags & SA) { if (host_data.m_ssa.m_ssa == NULL) log_warning(stderr, "FMIndexDataDevice: requested forward SSA is not available!\n"); m_ssa_vec.resize( m_sa_words ); m_ssa.m_ssa = raw_pointer( m_ssa_vec ); thrust::copy( host_data.m_ssa.m_ssa, host_data.m_ssa.m_ssa + m_sa_words, m_ssa_vec.begin() ); m_allocated += sizeof(uint32)*( m_sa_words ); } } if (flags & REVERSE) { if (host_data.m_rbwt_occ == NULL) log_warning(stderr, "FMIndexDataDevice: requested reverse BWT is not available!\n"); m_rbwt_occ_vec.resize( m_bwt_occ_words ); m_rbwt_occ = raw_pointer( m_rbwt_occ_vec ); thrust::copy( host_data.m_rbwt_occ, host_data.m_rbwt_occ + m_bwt_occ_words, m_rbwt_occ_vec.begin() ); m_allocated += sizeof(uint32)*( m_bwt_occ_words ); if (flags & SA) { if (host_data.m_rssa.m_ssa == NULL) log_warning(stderr, "FMIndexDataDevice: requested reverse SSA is not available!\n"); m_rssa_vec.resize( m_sa_words ); m_rssa.m_ssa = raw_pointer( m_rssa_vec ); thrust::copy( host_data.m_rssa.m_ssa, host_data.m_rssa.m_ssa + m_sa_words, m_rssa_vec.begin() ); m_allocated += sizeof(uint32)*( m_sa_words ); } } nvbio::cuda::check_error("FMIndexDataDevice"); } void init_ssa( const FMIndexDataDevice& driver_data, FMIndexDataDevice::ssa_storage_type& ssa, FMIndexDataDevice::ssa_storage_type& rssa) { log_info(stderr, "building SSA... started\n"); ssa.init( driver_data.partial_index() ); log_info(stderr, "building SSA... done\n"); log_info(stderr, "building reverse SSA... started\n"); rssa.init( driver_data.rpartial_index() ); log_info(stderr, "building reverse SSA... done\n"); } ///@} // FMIndexIO ///@} // IO } // namespace io } // namespace nvbio
the_stack
#include "common.h" /* SFILE_END */ // #include "./polyquotient.cu" __host__ __device__ void cross_prod ( double a11[], int deg11, double a12[], int deg12, double a21[], int deg21, double a22[], int deg22, double a00[], double toep[], int deg00, double res[], int &dres, BMatrix B, int &current_size ) { // Does a single 2x2 cross multiplication // Do the multiplcation in temporary storage double temp[2*Maxdegree + 1]; // Work out the actual degree int deg1 = deg11 + deg22; int deg2 = deg12 + deg21; int deg = (deg1 > deg2) ? deg1 : deg2; // Clear out the temporary memset (temp, 0, sizeof(temp)); // Now, start multiplying for (int i=0; i<=deg11; i++) for (int j=0; j<=deg22; j++) temp[i+j] += a11[i]*a22[j]; for (int i=0; i<=deg12; i++) for (int j=0; j<=deg21; j++) temp[i+j] -= a12[i]*a21[j]; // Clear out the result -- not really necessary memset (res, 0, (Maxdegree+1)*sizeof(double)); //----------------------------------------------------- // This is the most tricky part of the code, to divide // one polynomial into the other. By theory, the division // should be exact, but it is not, because of roundoff error. // we need to find a way to do this efficiently and accurately. //----------------------------------------------------- #define USE_TOEPLITZ #ifdef USE_TOEPLITZ // Now, divide by a00 - there should be no remainder int sres; polyquotient (temp, deg+1, a00, toep, deg00+1, res, sres, B, current_size); dres = sres-1; #else // Now, divide by a00 - there should be no remainder double *pres = &(res[deg-deg00]); for (int d=deg; d>=deg00; d--) { // Work out the divisor int td = d - deg00; // Degree of current term double val = temp[d] / a00[deg00]; *(pres--) = val; // Do the subtraction involved in the division for (int j=0; j<deg00; j++) temp[j+td] -= val * a00[j]; } #endif #ifdef RH_DEBUG // Print the remainder printf ("Remainder\n"); for (int i=0; i<deg00; i++) printf ("\t%.5e\n", temp[i]); #endif // Set the degree of the term dres = deg - deg00; } __host__ __device__ void det_preprocess_6pt ( PolyMatrix &Q, PolyDegree degree, int n_zero_roots // Number of roots known to be zero ) { // We do row-echelon form decomposition on the matrix to eliminate the // trivial known roots. // What is assumed here is the following. // - the first row of the matrix consists of constants // - the nullity of the matrix of constant terms is n_zero_roots, // so when it is put in row-echelon form, the last n_zero_roots are zero. // Initialize the list of and columns. We will do complete pivoting const int nrows = Nrows - 1; const int ncols = Nrows; int rows[Nrows], cols[Nrows]; for (int i=0; i<nrows; i++) rows[i] = i+1; // Miss the first row for (int i=0; i<ncols; i++) cols[i] = i; // Eliminate one row at a time for (int nr=nrows-1, nc=ncols-1; nr>=n_zero_roots; nr--,nc--) { // We must take the first row first to pivot around double bestval = 0.0; int bestrow = 0, bestcol = 0; // Find the highest value to pivot around for (int i=0; i<=nr; i++) for (int j=0; j<=nc; j++) { double val=Q[rows[i]][cols[j]][0]; if (fabs(val) > bestval) { bestval = fabs(val); bestrow = i; // Actually rows[i] bestcol = j; } } // #define RH_DEBUG #ifdef RH_DEBUG #undef RH_DEBUG // Print out the best value printf ("Pivot %d = %e at position %d %d\n",nr, bestval, rows[bestrow], cols[bestcol]); #endif // Now, select this row as a pivot. Also keep track of rows pivoted int prow = rows[bestrow]; rows[bestrow] = rows[nr]; // Replace pivot row by last row rows[nr] = prow; int pcol = cols[bestcol]; cols[bestcol] = cols[nc]; cols[nc] = pcol; // Clear out all the values above and to the right for (int i=0; i<nr; i++) { int iii = rows[i]; double fac = Q[iii][pcol][0] / Q[prow][pcol][0]; // Must do this to all the columns for (int j=0; j<ncols; j++) { int jjj = cols[j]; int deg = degree[prow][jjj]; int dij = degree[iii][jjj]; if (deg>dij) degree[iii][jjj] = deg; for (int d=0; d<=deg; d++) { if (d <= dij) Q[iii][jjj][d] -= Q[prow][jjj][d] * fac; else Q[iii][jjj][d] = -Q[prow][jjj][d] * fac; } } } } // Decrease the degree of the remaining rows for (int i=0; i<n_zero_roots; i++) { int ii = rows[i]; for (int jj=0; jj<ncols; jj++) { // Decrease the degree of this element by one for (int d=1; d<=degree[ii][jj]; d++) Q[ii][jj][d-1] = Q[ii][jj][d]; degree[ii][jj] -= 1; } } // #define RH_DEBUG #ifdef RH_DEBUG #undef RH_DEBUG printf ("Degrees\n"); for (int i=0; i<Nrows; i++) { for (int j=0; j<Nrows; j++) printf ("%1d ", degree[i][j]); printf ("\n"); } printf("\n"); printf ("Equation matrix\n"); for (int i=0; i<nrows; i++) { for (int j=0; j<ncols; j++) printf ("%7.4f ", Q[rows[i]][cols[j]][0]); printf ("\n"); } printf ("\n"); #endif } __host__ __device__ double quick_compute_determinant (double A[Nrows][Nrows], int dim) { // Do row reduction on A to find the determinant (up to sign) // Initialize the list of rows int rows[Nrows]; for (int i=0; i<dim; i++) rows[i] = i; // To accumulate the determinant double sign = 1.0; // Sweep out one row at a time for (int p = dim-1; p>=0; p--) { // Find the highest value to pivot around, in column p double bestval = 0.0; int bestrow = 0; for (int i=0; i<=p; i++) { double val=A[rows[i]][p]; if (fabs(val) > bestval) { bestval = fabs(val); bestrow = i; // Actually rows[i] } } // Return early if the determinant is zero if (bestval == 0.0) return 0.0; // Now, select this row as a pivot. Swap this row with row p if (bestrow != p) { int prow = rows[bestrow]; rows[bestrow] = rows[p]; // Replace pivot row by last row rows[p] = prow; sign = -sign; // Keep track of sign } // Clear out all the values above and to the right for (int i=0; i<p; i++) { int ii = rows[i]; double fac = A[ii][p] / A[rows[p]][p]; // Must do this to all the columns for (int j=0; j<dim; j++) A[ii][j] -= A[rows[p]][j] * fac; } } // Now compute the determinant double det = sign; for (int i=0; i<dim; i++) det *= A[rows[i]][i]; return det; } __host__ __device__ void do_scale ( PolyMatrix &Q, PolyDegree degree, double &scale_factor, // Value that x is multiplied by bool degree_by_row, // Estimate degree from row degrees int dim // Actual dimension of the matrix ) { // Scale the variable so that coefficients of low and high order are equal // There is an assumption made here that the high order term of the // determinant can be computed from the high-order values of each term, // which is not in general true, but is so in the cases that we consider. // First step is to compute these values double low_order, high_order; int total_degree; // Find the coefficient of minimum degree term double A[Nrows][Nrows]; for (int i=0; i<dim; i++) for (int j=0; j<dim; j++) A[i][j] = Q[i][j][0]; low_order = quick_compute_determinant (A, dim); // printf ("Low order = %.7e\n", low_order); // Find the coefficient of maximum degree term total_degree = 0; for (int i=0; i<dim; i++) { // Find what the degree of this row is int rowdegree = -1; if (degree_by_row) { for (int j=0; j<dim; j++) if (degree[i][j] > rowdegree) rowdegree = degree[i][j]; for (int j=0; j<dim; j++) if (degree[i][j] < rowdegree) A[i][j] = 0.0; else A[i][j] = Q[i][j][rowdegree]; } else { for (int j=0; j<dim; j++) if (degree[j][i] > rowdegree) rowdegree = degree[j][i]; for (int j=0; j<dim; j++) if (degree[j][i] < rowdegree) A[j][i] = 0.0; else A[j][i] = Q[j][i][rowdegree]; } // Accumulate the row degree total_degree += rowdegree; } high_order = quick_compute_determinant (A, dim); // printf ("High order = %.7e\n", high_order); // Now, work out what the scale factor should be, and scale scale_factor = pow(fabs(low_order/high_order), 1.0 / total_degree); // printf ("Scale factor = %e\n", scale_factor); for (int i=0; i<dim; i++) for (int j=0; j<dim; j++) { double fac = scale_factor; for (int d=1; d<=degree[i][j]; d++) { Q[i][j][d] *= fac; fac *= scale_factor; } } } __host__ __device__ void find_polynomial_determinant ( PolyMatrix &Q, PolyDegree deg, int rows[Nrows], // This keeps the order of rows pivoted on. int dim // Actual dimension of the matrix ) { // Compute the polynomial determinant - we work backwards from // the end of the matrix. Do not bother with pivoting // Polynomial to start with double aa = 1.0; double *a00 = &aa; int deg00 = 0; // Initialize the list of rows for (int i=0; i<dim; i++) rows[i] = dim-1-i; // The row to pivot around. At end of the loop, this will be // the row containing the result. int piv; for (int p = dim-1; p>=1; p--) { // We want to find the element with the biggest high order term to // pivot around #define DO_PARTIAL_PIVOT #ifdef DO_PARTIAL_PIVOT double bestval = 0.0; int bestrow = 0; for (int i=0; i<=p; i++) { double val=Q[rows[i]][p][deg[rows[i]][p]]; if (fabs(val) > bestval) { bestval = fabs(val); bestrow = i; // Actually rows[i] } } // Now, select this row as a pivot. Also keep track of rows pivoted piv = rows[bestrow]; rows[bestrow] = rows[p]; // Replace pivot row by last row rows[p] = piv; #else piv = rows[p]; #endif // #define RH_DEBUG #ifdef RH_DEBUG #undef RH_DEBUG // Print out the pivot printf ("Pivot %d = \n", p); for (int i=0; i<=deg[piv][p]; i++) printf ("\t%16.5e\n", Q[piv][p][i]); #endif // Set up a matrix for Toeplitz BMatrix B; int current_size = 0; // Also the Toeplitz vector double toep[Maxdegree+1]; for (int i=0; i<=deg00; i++) { toep[i] = 0.0; for (int j=0; j+i<=deg00; j++) toep[i] += a00[j] * a00[j+i]; } // Clear out all the values above and to the right for (int i=0; i<p; i++) { int iii = rows[i]; for (int j=0; j<p; j++) cross_prod ( Q[piv][p], deg[piv][p], Q[piv][j], deg[piv][j], Q[iii][p], deg[iii][p], Q[iii][j], deg[iii][j], a00, toep, deg00, Q[iii][j], deg[iii][j], // Replace original value B, current_size ); } // Now, update to the next a00 = &(Q[piv][p][0]); deg00 = deg[piv][p]; } // Now, the polynomial in the position Q(0,0) is the solution }
the_stack
__global__ void conv_full_patch_split( float* img, float* kern, float* out, int img_len, int img_wid, int kern_len, int kern_wid, int nb_split) { int __shared__ out_len, out_wid, nb_thread_id; out_len = img_len + kern_len - 1; out_wid = img_wid + kern_wid - 1; nb_thread_id = blockDim.z*blockDim.y*blockDim.x; extern __shared__ float s_data[]; int batch_id = blockIdx.x; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int out_col = tx;//output col int out_row = ty;//output row const int thread_id = out_row*out_wid + out_col; float * d_img=&s_data[0];//size of [IMAGE_LEN * IMAGE_WID]; float * d_kern=&s_data[img_len * img_wid];//size of [KERNEL_LEN * KERNEL_WID]; img+=img_len*img_wid*batch_id;//the good batch load_to_shared(d_img, img, thread_id, nb_thread_id, img_len*img_wid); load_to_shared(d_kern, kern, thread_id, nb_thread_id, kern_len*kern_wid); __syncthreads(); for(int out_row=ty;out_row<out_len;out_row+=out_len/nb_split){ float sum = 0.0f; int img_row = out_row; for (int row=0; row < kern_len; row++) {//loop over row int inverse_row = (img_row-row); if(inverse_row<0 ||inverse_row>=(img_len))continue;//row outside the image const float* idx_in=&d_img[inverse_row*img_wid]; const float* idx_kern=&d_kern[row*kern_wid]; int img_col = out_col; int col=0,last=0; for (col=0,last=img_col; col < kern_wid; col++,last--) {//loop over col if(last<0 ||last>=(img_wid))continue;//col outside the image sum+=idx_in[last]*idx_kern[col]; } } out[batch_id*out_len*out_wid+//the output image out_row*out_wid+out_col] = sum; } } //we store the full image and the full kernel in the shared memory //each thread compute only one value for the output //thread block size=out_wid, out_len //grid block size=batch_id, nkern //dynamic shared memory: img_len*img_wid+kern_len*kern_wid __global__ void conv_full_patch( float* img, float* kern, float* out, int img_len, int img_wid, int kern_len, int kern_wid, int nkern, int nstack) { int __shared__ out_len, out_wid, nb_thread_id; out_len = img_len + kern_len - 1; out_wid = img_wid + kern_wid - 1; nb_thread_id = blockDim.z*blockDim.y*blockDim.x; extern __shared__ float s_data[]; int batch_id = blockIdx.x; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; int out_col = tx;//output col int out_row = ty;//output row const int thread_id = out_row*out_wid + out_col; float * d_img=&s_data[0];//size of [IMAGE_LEN * IMAGE_WID]; float * d_kern=&s_data[img_len * img_wid];//size of [KERNEL_LEN * KERNEL_WID]; kern+=kern_len*kern_wid*nstack*blockIdx.y;//the good nkern img+=img_len*img_wid*batch_id;//the good batch load_to_shared(d_img, img, thread_id, nb_thread_id, img_len*img_wid); load_to_shared(d_kern, kern, thread_id, nb_thread_id, kern_len*kern_wid, true); __syncthreads(); float sum = 0.0f; for (int row=0; row < kern_len; row++) {//loop over row if(row+out_row-kern_len+1<0 || row+out_row-kern_len+1>=img_len)continue; const float* idx_in=&d_img[(row+out_row-kern_len+1)*img_wid+out_col-kern_wid+1]; const float* idx_kern=&d_kern[row*kern_wid]; int col=0; int max_col=kern_wid; int img_col=out_col-kern_wid+1; max_col=min(max_col,img_wid-img_col); if(img_col<0){col=-img_col;img_col+=col;} for (; col < max_col; col++, img_col++) {//loop over col sum+=idx_in[col]*idx_kern[col]; } } out[batch_id*out_wid*out_len*nkern+//the good batch out_wid*out_len*blockIdx.y+//the output image out_row*out_wid+out_col] = sum; } //we store the full image and the full kernel in the shared memory //each thread compute only one value for the output //thread block size=out_wid, out_len //grid block size=batch_id, nkern //dynamic shared memory: img_len*img_wid+kern_len*kern_wid //template c_contiguous: if true, the img and kern have are column and row contiguous else we use the stride value from the param. The image need to be c_contiguous in the nbatch and nstack dimensions. template<bool img_c_contiguous_2d, bool kern_c_contiguous_2d> __global__ void conv_full_patch_stack( float* img, float* kern, float* out, int img_len, int img_wid, int kern_len, int kern_wid, int nkern, int nstack, int img_stride_col, int img_stride_row, int kern_stride_col, int kern_stride_row, int kern_stride_stack, int kern_stride_nkern) { int __shared__ out_len, out_wid, nb_thread_id; out_len = img_len + kern_len - 1; out_wid = img_wid + kern_wid - 1; nb_thread_id = blockDim.y*blockDim.x;//blockDim.z* float __shared__ *kern_, *img_; extern __shared__ float s_data[]; const int batch_id = blockIdx.x; const int nkern_id = blockIdx.y; const int out_col = threadIdx.x; const int out_row = threadIdx.y; const int thread_id = threadIdx.y*blockDim.x+ threadIdx.x; float* d_img=&s_data[0];//size of [IMAGE_LEN * IMAGE_WID]; float* d_kern=&s_data[img_len * img_wid];//size of [KERNEL_LEN * KERNEL_WID]; kern_=kern+kern_stride_nkern*nkern_id;//the good nkern img_=img+img_len*img_stride_row*(nstack*batch_id);//the good batch float sum = 0.0f; for (int stack = 0;stack<nstack;stack++){ load_to_shared(d_img, img_+stack*img_len*img_stride_row, thread_id,nb_thread_id,img_wid,img_len,img_stride_col, img_stride_row,false,img_c_contiguous_2d); load_to_shared(d_kern, kern_+stack*kern_stride_stack, thread_id,nb_thread_id,kern_wid,kern_len,kern_stride_col,kern_stride_row,true,kern_c_contiguous_2d); __syncthreads(); for (int row=0; row < kern_len; row++) {//loop over row if(row+out_row-kern_len+1<0 || row+out_row-kern_len+1>=img_len)continue; const float* idx_in=&d_img[(row+out_row-kern_len+1)*img_wid+out_col-kern_wid+1]; const float* idx_kern=&d_kern[row*kern_wid]; int col=0; int max_col=kern_wid; int img_col=out_col-kern_wid+1; max_col=min(max_col,img_wid-img_col); if(img_col<0){col=-img_col;img_col+=col;} for (; col < max_col; col++, img_col++) {//loop over col sum+=idx_in[col]*idx_kern[col]; } } //Needed as not all thread finish at the same time the loop //And we don't want to overwrite the shared memory. __syncthreads(); } out[batch_id*out_wid*out_len*nkern+//the good batch out_wid*out_len*blockIdx.y+//the output image out_row*out_wid+out_col] = sum; } /** * As conv_patch_stack, but used for the full convolution by padding the image in shared memory. * I keep it separated from conv_patch as we take 19-20 register which is more than the 10/16 max for each thread and thus this could lower the occupency. * Implementation of the valid convolution that keep the full image and the full kernel in shared memory * each thread compute only one value for the output if split is true. Otherwise compute ceil((float)out_len/N) pixel. * thread block size=out_wid, nb_rows (optimized value is ceil(out_len/N)) * grid block size=batch_id, nkern * dynamic shared memory: full mem: (img_len+2*kern_len-2)*(img_wid+2*kern_wid-2)+kern_len*kern_wid * dynamic shared memory: low mem:((kern_len+nb_row-1)+2*kern_len-2)*(img_wid+2*kern_wid-2)+kern_len*kern_wid * * nkern: the number of kernel, used to compute the output image to store the result * nstack: the size of the stack, used to compute the image to load. * template flipped_kern: if true, we "flip" the kernel as in a real convolution, else we don't * template c_contiguous: if true, the image and kernel have are c_contiguous.(use less registers) * template split: if true, each thread compute more than 1 output pixel. * template low_mem: if true, as split but with use less dynamic shared memory but use more registers. * if you set split and low_mem to true, we will use the low_mem version! */ template<bool flipped_kern, int KERN_WIDTH, bool c_contiguous, bool split, bool low_mem > __global__ void conv_full_patch_stack_padded( float* img, float* kern, float* out, const int img_len, const int img_wid, const int kern_len, const int kern_wid, const int nkern, const int nstack, const int img_stride_col, const int img_stride_row, const int img_stride_stack, const int img_stride_batch, const int kern_stride_col, const int kern_stride_row, const int kern_stride_stack, const int kern_stride_nkern) { int __shared__ out_len, out_wid, nb_thread_id; out_len = img_len + kern_len - 1; out_wid = img_wid + kern_wid - 1; nb_thread_id = blockDim.z*blockDim.y*blockDim.x; extern __shared__ float s_data[]; __shared__ int batch_id, kern_id, img_wid_valid, nb_rows; batch_id = blockIdx.x; kern_id = blockIdx.y; nb_rows = blockDim.y; // Thread index const int tx = threadIdx.x; const int ty = threadIdx.y; int out_col = tx;//output col const int thread_id = ty*blockDim.x + tx; float * d_kern=&s_data[0];//size of [KERNEL_LEN * KERNEL_WID]; float * d_img=&s_data[kern_len*kern_wid];//size of [see fct doc]; kern+=kern_stride_nkern*kern_id;//the good nkern img+=img_stride_batch*batch_id;//the good batch img_wid_valid=img_wid+2*kern_wid-2; if(!split && !low_mem){ fill(d_img,img_wid_valid*(img_len+2*kern_len-2), 0, thread_id, nb_thread_id); const int out_row = ty;//output row float sum = 0.0f; for (int stack = 0;stack<nstack;stack++,kern+=kern_stride_stack, img+=img_stride_stack){ __syncthreads(); load_padded_col_to_shared(d_img+img_wid_valid*(kern_len-1),img, thread_id,nb_thread_id,img_wid,img_len, img_stride_col, img_stride_row, kern_wid-1, c_contiguous); load_to_shared(d_kern, kern, thread_id, nb_thread_id, kern_wid,kern_len, kern_stride_col, kern_stride_row, flipped_kern, c_contiguous); __syncthreads(); for (int row=0; row < kern_len; row++) {//loop over row const float* idx_kern=&d_kern[row*kern_wid]; const float* idx_in=&d_img[(row+out_row)*img_wid_valid+out_col]; convolutionRowNoFlip<KERN_WIDTH>(sum, idx_kern, idx_in, kern_wid); } } out[batch_id*out_wid*out_len*nkern+//the good batch kern_id*out_wid*out_len+//the output image out_row*out_wid+out_col] = sum; }else if(split && !low_mem){ fill(d_img,img_wid_valid*(img_len+2*kern_len-2), 0, thread_id, nb_thread_id); //out_len_max must by higher then out_len as we need all thread when we load the image as the nb_rows is not always a multiple of out_len. __shared__ int out_len_max; //TODO pass a parameter nb_split out_len_max = (out_len/blockDim.y+(out_len%blockDim.y==0?0:1))*blockDim.y; for(int out_row = ty;out_row<out_len_max;out_row+=nb_rows){ float sum = 0.0f; for (int stack = 0;stack<nstack;stack++){ __syncthreads(); //TODO: load only the part of the image needed or put the partial result in shared memory load_padded_col_to_shared(d_img+img_wid_valid*(kern_len-1), img+img_stride_stack*stack, thread_id,nb_thread_id,img_wid,img_len, img_stride_col, img_stride_row, kern_wid-1, c_contiguous); load_to_shared(d_kern, kern+kern_stride_stack*stack, thread_id, nb_thread_id, kern_wid,kern_len, kern_stride_col, kern_stride_row, flipped_kern, c_contiguous); __syncthreads(); //The if is needed as on Fermi as reading out of bound index from shared memory generate an error. //Not needed on generation before as they worked anyway. Removing the if generate the good code //as we store the result of only the good thread. //This was with nvcc 3.0 on an GTX470 card. if(out_row<out_len) for (int row=0; row < kern_len; row++) {//loop over row const float* idx_kern=&d_kern[row*kern_wid]; const float* idx_in=&d_img[(row+out_row)*img_wid_valid+out_col]; convolutionRowNoFlip<KERN_WIDTH>(sum, idx_kern, idx_in, kern_wid); } if(out_row<out_len) out[batch_id*out_wid*out_len*nkern+//the good batch out_wid*out_len*kern_id+//the output image out_row*out_wid+out_col] = sum; } } }else{//low_mem version //don't need to fill the last rows padding as this is done later. fill(d_img,img_wid_valid*((kern_len+nb_rows-1)+2*kern_len-2), 0, thread_id, nb_thread_id); //out_len_max must by higher then out_len as we need all thread when we load the image as the nb_rows is not always a multiple of out_len. __shared__ int out_len_max; //TODO pass a parameter nb_split if(thread_id==0) out_len_max = (out_len/nb_rows+(out_len%nb_rows==0?0:1))*nb_rows; __syncthreads(); for(int out_row = ty, out_row_iter=0;out_row<out_len_max; out_row+=nb_rows, out_row_iter++){ float sum = 0.0f; for (int stack = 0;stack<nstack;stack++){ __syncthreads(); const int len_to_load=min(kern_len+nb_rows,img_len-out_row_iter*nb_rows);//nb rows to load, min(nb_rows for this iter, nb rows left in the image) const int empty_row = max(kern_len-1-out_row_iter*nb_rows,0);//number of empty row at the start //we need to reload some row as when we change of out_row we lost the last load du to the stack. const int previous_row = min(out_row_iter*nb_rows,kern_len-1);//number of row from last out_row iteration to reload load_padded_col_to_shared(d_img+(kern_len-1-previous_row)*img_wid_valid, img+img_stride_stack*stack//the good stack image +(out_row_iter*nb_rows-previous_row)*img_stride_row,//the good split top row. thread_id,nb_thread_id,img_wid, len_to_load+previous_row, img_stride_col, img_stride_row, kern_wid-1, c_contiguous); //TODO: fill the last row padding only when needed. //We always fill the last rows padding event when not needed. int row_to_fill = 2*kern_len-2+nb_rows- empty_row - previous_row - len_to_load; row_to_fill = min(row_to_fill,kern_len-1); fill(d_img+(kern_len-1+len_to_load)*img_wid_valid, img_wid_valid*row_to_fill, 0, thread_id, nb_thread_id); load_to_shared(d_kern, kern+kern_stride_stack*stack, thread_id, nb_thread_id, kern_wid,kern_len, kern_stride_col, kern_stride_row, flipped_kern, c_contiguous); __syncthreads(); for (int row=0; row < kern_len; row++) {//loop over row const float* idx_kern=&d_kern[row*kern_wid]; const float* idx_in=&d_img[(row+out_row-out_row_iter*nb_rows)*img_wid_valid+out_col]; convolutionRowNoFlip<KERN_WIDTH>(sum, idx_kern, idx_in, kern_wid); } } if(out_row<out_len) out[batch_id*out_wid*out_len*nkern+//the good batch out_wid*out_len*kern_id+//the output image out_row*out_wid+out_col] = sum; } } } template <int i> __device__ float everything_dot(const float * x, const int sx, const float * y, const int sy) { return everything_dot<i/2>(x, sx, y, sy) + everything_dot<(i+1)/2>(x+sy*(i/2), sx, y+sy*(i/2), sy) ; //return x[0] * y[0] + everything_dot<i-1>(x+sx, sx, y+sy, sy); } template <> __device__ float everything_dot<0>(const float * x, const int sx, const float * y, const int sy) { return 0; } template <> __device__ float everything_dot<1>(const float * x, const int sx, const float * y, const int sy) { return x[0] * y[0]; } template<int NSTACK> __global__ void conv_full_load_everything( float* img, float* kern, float* out, int img_len, int img_wid, int kern_len, int kern_wid, int nkern, int nstack, int img_stride_col, int img_stride_row, int img_stride_stack, int img_stride_batch, int kern_stride_col, int kern_stride_row, int kern_stride_stack, int kern_stride_nkern) { int __shared__ out_len, out_wid, nb_thread_id; out_len = img_len + kern_len - 1; out_wid = img_wid + kern_wid - 1; nb_thread_id = blockDim.y*blockDim.x; extern __shared__ float s_data[]; int batch_id = blockIdx.x; const int out_col = threadIdx.x;//output col const int out_row = threadIdx.y;//output row const int thread_id = out_row*out_wid + out_col; float * d_img=&s_data[0]; //size [nstack * IMAGE_LEN * IMAGE_WID]; float * d_kern=&s_data[nstack * img_len * img_wid];//size [nstack * KERNEL_LEN * KERNEL_WID]; img += blockIdx.x * img_stride_batch;//the good batch // load the image to shared memory for (int i = thread_id; i < nstack * img_len * img_wid; i += nb_thread_id) { int stack = i / (img_wid*img_len); int row = (i % (img_wid*img_len)) / img_wid; int col = (i % (img_wid*img_len)) % img_wid; d_img[i] = img[stack*img_stride_stack +row*img_stride_row +col*img_stride_col]; } for (int kern_idx = 0; kern_idx < nkern; ++kern_idx, kern += kern_stride_nkern) { // load the kernel into shared memory and flip it for (int i = thread_id; i < nstack * kern_len * kern_wid; i += nb_thread_id) { int stack = i / (kern_wid*kern_len); int row = (i % (kern_wid*kern_len)) / kern_wid; int col = (i % (kern_wid*kern_len)) % kern_wid; d_kern[stack*kern_len*kern_wid + (kern_len-1-row)*kern_wid + (kern_wid-1-col)] = kern[stack*kern_stride_stack +row*kern_stride_row +col*kern_stride_col]; } __syncthreads(); float sum = 0.0f; for (int row=0; row < kern_len; ++row) { int irow = out_row - kern_len+1+row; if (irow < 0 || irow > img_len) continue; for (int col = 0; col < kern_wid; ++col) { int icol = out_col - kern_wid+1+col; if (icol < 0 || icol > img_wid) continue; if (NSTACK > 0) { sum += everything_dot<NSTACK>(d_img + irow*img_wid + icol, img_len*img_wid, d_kern + row*kern_wid+col, kern_len*kern_wid); } else { for (int stack = 0; stack < nstack; ++stack) { sum += d_img[stack*img_len*img_wid + irow*img_wid + icol] * d_kern[stack*kern_len*kern_wid+row*kern_wid+col]; } } } } out[batch_id*out_wid*out_len*nkern+//the good batch out_wid*out_len*kern_idx+//the output image out_row*out_wid+out_col] = sum; __syncthreads(); //don't start loading another kernel until we're done here } } /* Local Variables: mode:c++ c-basic-offset:4 c-file-style:"stroustrup" indent-tabs-mode:nil fill-column:79 End: */ // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:textwidth=79 :
the_stack
* How to compile (assume cuda is installed at /usr/local/cuda/) * nvcc -c -I/usr/local/cuda/include svd_example.cpp * g++ -fopenmp -o a.out svd_example.o -L/usr/local/cuda/lib64 -lcudart -lcublas -lcusolver * * EY : 20170627 This also worked for me * nvcc -std=c++11 -arch='sm_52' -lcudart -lcublas -lcusolver SVD_vectors_unified.cu -o SVD_vectors_unified.exe * */ #include <iostream> // std::cout #include <iomanip> // std::setprecision #include <assert.h> // assert #include <cuda_runtime.h> // cudaError_t #include <cublas_v2.h> #include <cusolverDn.h> // Dn = dense (matrices) constexpr const int m = 3; constexpr const int n = 2; constexpr const int lda = m; __device__ __managed__ float A[lda*n] = { 1.0f, 4.0f, 2.0f, 2.0f, 5.0f, 1.0f }; __device__ __managed__ float U[lda*m]; // m-by-m unitary matrix __device__ __managed__ float VT[lda*n]; // n-by-n unitary matrix __device__ __managed__ float S[n]; // singular value __device__ __managed__ int *devInfo = nullptr; __device__ __managed__ float W[lda*n]; // W = S*VT __device__ __managed__ float *d_rwork = NULL; // Looks like this is for boilerplate and it looks like that /* * lda = stride * it's in "column-major" order; cuSOLVER assumes for dense matrices COLUMN-major order * cf. http://docs.nvidia.com/cuda/cusolver/index.html#format-dense-matrix * */ void printMatrix(int m, int n, const float *A, int lda, const char* name) { std::cout << name << std::endl; for (int row =0; row <m; row++) { for (int col =0 ; col <n ; col++) { float Areg = A[row + col*lda]; std::cout << Areg << " " ; } std::cout << std::endl; } } int main(int argc, char* argv[]) { /** * @name cusolverDnCreate(cusolverDnHandle_t *handle); * @brief This function initializes the cuSolverDN library and creates a handle on the cuSolverDN context. * It must be called before any other cuSolverDN API function is invoked. It allocates hardware resources * necessary for accessing the GPU. * cuSolverDN: dense LAPACK, dense LAPACK functions, as opposed to SP, sparse, RF refactorization * */ cusolverDnHandle_t cusolverH = NULL; cublasHandle_t cublasH = NULL; cublasStatus_t cublas_status = CUBLAS_STATUS_SUCCESS; cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS; /* | 1 2 | * A = | 4 5 | * | 2 1 | * */ // working space, <type> array of size lwork float *d_work = NULL; // size of working array work int lwork = 0; const float h_one = 1.f; const float h_minus_one = -1.f; float S_exact[n] = {7.065283497082729f, 1.040081297712078f}; std::cout << " A = (matlab base-1) " << std::endl; printMatrix(m, n, A, lda, "A"); std::cout << " ===== " << std::endl; cudaDeviceSynchronize(); // step 1: create cusolverDn/cublas handle cusolver_status = cusolverDnCreate(&cusolverH); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); cublas_status = cublasCreate(&cublasH); assert(CUBLAS_STATUS_SUCCESS == cublas_status); // step 2: copy A and B to device is handled automatically by managed device // step 3: query working space of SVD // The S and D data types are real valued single and double precision, respectively /** cusolverDnSgesvd_bufferSize * @brief Calculate size of work buffer used by cusolverDnDgesvd. * @ref http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-gesvd * * cusolverStatus_t * cusolverDnSgesvd_bufferSize( * cusolverDnHandle_t handle, * int m, * int n, * int *lwork); * */ // The S and D data types are real valued single and double precision, respectively cusolver_status = cusolverDnSgesvd_bufferSize( cusolverH, m, n, &lwork); assert(cusolver_status == CUSOLVER_STATUS_SUCCESS); std::cout << " \n lwork = " << lwork << std::endl << std::endl; cudaMalloc((void**)&d_work , sizeof(float)*lwork); // step 4: compute SVD /** * jobu, input * @brief specifies options for computing all or part of the matrix U:= 'A': all m columns of U are returned in array * U:='S': the first min(m,n) columns of U (the left singular vectors) are returned in the array * U:='O': the first min(m,n) columns of U (the left singular vectors) are overwritten on the array A; * = 'N': no columns (no left singular vectors) are computed. * * */ signed char jobu = 'A'; // all m columns of U /** * jobvt, input * @brief specifies options for computing all or part of the matrix V**T: * ='A': all N rows of V**T are returned in the array VT; * ='S': the first min(m,n) rows of V**T (the right singular vectors) are returned in the array VT; * ='O': the first min(m,n) rows of V**T (the first singular vectors) are overwritten on the array A; * ='N': no rows of V**T (no right singular vectors) are computed. * */ signed char jobvt = 'A'; // all n columns of VT /** * cusolverDnSgesvd - computes the singular value decomposition (SVD) of * mxn matrix A * and corresponding left and/or right singular vectors * SVD written * A = U S V^H * where * S = m x n matrix which is 0, except for its min(m,n) diagonal elements * U = m x m unitary matrix * V = n x n unitary matrix * Diagonal elements of S are singular values of A; they are real and non-negative, and returned in descending order * The first min(m,n) columns of U and V are left and right singular vectors of A * * @name API of gesvd (partial API) * @brief ldu - input - leading dimension of 2-dim. array used to store matrix U * ldvt - input - leading dim. of 2-dim. array used to store matrix Vt * rwork (here, it is d_rwork) - device - input - real array of dim. min(m,n)-1. It contains the * unconverged superdiagonal elements of an upper bidiagonal matrix if devInfo > 0 * devInfo - device - output - if devInfo = 0, the operation is successful, * if devInfo = -i, the i-th parameter is wrong. * if devInfo > 0, devInfo indicates how many superdiagonals of an intermediate bidiagonal form did not converge to 0 * */ // The S and D data types are real valued single and double precision, respectively cusolver_status = cusolverDnSgesvd( cusolverH, jobu, jobvt, m, n, A, lda, S, U, lda, // ldu VT, lda, // ldvt d_work, lwork, d_rwork, devInfo); cudaDeviceSynchronize(); // assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); Assertion failed std::cout << " cusolver_status after SVD : " << cusolver_status << std::endl; std::cout << " after gesvd: info_gpu or devInfo = " << devInfo << std::endl ; assert(0 == devInfo); std::cout << " ====== " << std::endl; std::cout << " S = (matlab base-1) : " << std::endl; printMatrix(n,1,S,lda,"S"); std::cout << " ====== " << std::endl; std::cout << " U = (matlab base-1) : " << std::endl; printMatrix(m,m,U,lda,"U"); std::cout << " ====== " << std::endl; std::cout << " VT = (matlab base-1) : " << std::endl; printMatrix(n,n,VT,lda,"VT"); std::cout << " ====== " << std::endl; // step 5: measure error of singular value float ds_sup = 0.f; for (int j =0; j < n; j++) { float err = fabs( S[j] - S_exact[j]); ds_sup = (ds_sup > err) ? ds_sup : err; } std::cout << " |S-S_exact| = " << std::setprecision(9) << ds_sup << std::endl; // step 6: |A- U*S*VT | // W = S*VT /** * cublas<t>dgmm() * cublasSdgmm single float * cublasDdgmm double float * cublasCdgmm complex number * cublasZdgmm * @brief matrix-matrix multiplication * @ref 2.8.2. cublas<t>dgmm() http://docs.nvidia.com/cuda/cublas/index.html#cublas-lt-t-gt-dgmm * performs * C = A x diag(X) if mode == CUBLAS_SIDE_RIGHT * C = diag(X) x A if mode == CUBLAS_SIDE_LEFT * * where A and C are matrices stored in column-major format with dims. m x n. * X is vector of size n if mode == CUBLAS_SIDE_RIGHT and * of size m if mode == CUBLAS_SIDE_LEFT. * X is gathered from 1-dim. array x with stride incc * * cublasStatust cublasSdgmm(cublasHandle_t handle, cublasSideMode_t mode, int m, int n, * const float * A, int lda, * const float *x, int incx, * float *C, int ldc) * */ cublas_status = cublasSdgmm( cublasH, CUBLAS_SIDE_LEFT, n,n,VT,lda,S,1,W,lda); assert(CUBLAS_STATUS_SUCCESS == cublas_status); /* sanity check cudaDeviceSynchronize(); for (int idx=0;idx<4;idx++) { std::cout << W[idx] << " "; } cudaDeviceSynchronize(); printMatrix(m, n, A, lda, "A"); cudaDeviceSynchronize(); */ /* EY : 20170628 I found that these steps are needed because A changed due to steps above. */ float host_A[lda*n] = { 1.0, 4.0, 2.0, 2.0, 5.0, 1.0 }; cudaMemcpy(A, host_A,sizeof(float)*lda*n,cudaMemcpyHostToDevice); // A := -U*W + A /** * @name cublass<t>gemm() * @ref http://docs.nvidia.com/cuda/cublas/index.html#cublas-lt-t-gt-gemm * cublasStatus_t cublasSgemm(cublasHandle_t handle, * cublasOperation_t transa, cublasOperation_t transb, * int m, int n, int k, * const float *alpha, * const float *A, int lda, * const float *B, int ldb, * const float *beta, * float *C, int ldc) * @brief This function performs the matrix-matrix multiplication * C = \alpha op(A)op(B) + \beta C * * */ cublas_status = cublasSgemm_v2(cublasH, CUBLAS_OP_N, // U CUBLAS_OP_N, // W m, // number of rows of A n, // number of columns of A n, // number of columns of U &h_minus_one, /* host pointer */ U, // U lda, W, // W lda, &h_one, /* host pointer */ A, lda); assert(CUBLAS_STATUS_SUCCESS == cublas_status); float dR_fro = 0.0f; /** * @name cublas<t>nrm2() * @ref http://docs.nvidia.com/cuda/cublas/index.html#cublas-lt-t-gt-nrm2 * @brief This function computes the Euclidean norm of the vector x. * The code uses a multiphase model of accumulation to avoid intermediate underflow and overflow (EY : 20170628 what's under and over flow?) * with the result being equivalent to sqrt( \sum_{i=1}^n (x[j] x x[j]) } * where j = 1+(i-1)*incx in exact arithmetic. * Notice that the last equation reflects 1-based indexing used for compatibility with Fortran. * * cublasStatus_t cublasSnrm2(cublasHandle_t handle, int n, * const float *x, int incx, float *result) * result - host or device - output - the result norm, which is 0.0 if n, incx <=0 * * */ cublas_status = cublasSnrm2_v2(cublasH, lda*n, A, 1, &dR_fro); assert(CUBLAS_STATUS_SUCCESS == cublas_status); std::cout << "|A - U*S*VT| = " << std::setprecision(9) << dR_fro << std::endl; // free resources if (d_work) { cudaFree(d_work); } if (cublasH) cublasDestroy(cublasH); /** * @name cusolverDnDestroy() * cusolverStatus_t * cusolverDnDestroy(cusolverDnHandle_t handle); * * @brief This function release CPU-side resources used by the cuSolverDN library * cuSolverDN dense LAPACK Function, library, as opposed to Sp sparse, RF, refactorization * @ref http://docs.nvidia.com/cuda/cusolver/index.html#cuSolverDNdestroy * * */ if (cusolverH) cusolverDnDestroy(cusolverH); cudaDeviceReset(); return 0; }
the_stack
#pragma once #include <gunrock/app/enactor_base.cuh> #include <gunrock/app/enactor_iteration.cuh> #include <gunrock/app/enactor_loop.cuh> #include <gunrock/oprtr/oprtr.cuh> #include <gunrock/app/geo/geo_problem.cuh> #include <gunrock/app/geo/geo_spatial.cuh> namespace gunrock { namespace app { namespace geo { /** * @brief Speciflying parameters for Geo Enactor * @param parameters The util::Parameter<...> structure holding all parameter * info \return cudaError_t error message(s), if any */ cudaError_t UseParameters_enactor(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(app::UseParameters_enactor(parameters)); return retval; } /** * @brief defination of Geo iteration loop * @tparam EnactorT Type of enactor */ template <typename EnactorT> struct GEOIterationLoop : public IterationLoopBase<EnactorT, Use_FullQ | Push> { typedef typename EnactorT::VertexT VertexT; typedef typename EnactorT::SizeT SizeT; typedef typename EnactorT::ValueT ValueT; typedef typename EnactorT::Problem::GraphT::CsrT CsrT; typedef typename EnactorT::Problem::GraphT::GpT GpT; typedef IterationLoopBase<EnactorT, Use_FullQ | Push> BaseIterationLoop; GEOIterationLoop() : BaseIterationLoop() {} /** * @brief Core computation of Geo, one iteration * @param[in] peer_ Which GPU peers to work on, 0 means local * \return cudaError_t error message(s), if any */ cudaError_t Core(int peer_ = 0) { // -- // Alias variables auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &enactor_slice = this->enactor ->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_]; auto &enactor_stats = enactor_slice.enactor_stats; auto &graph = data_slice.sub_graph[0]; auto &frontier = enactor_slice.frontier; auto &oprtr_parameters = enactor_slice.oprtr_parameters; auto &retval = enactor_stats.retval; auto &latitude = data_slice.latitude; auto &longitude = data_slice.longitude; auto &active = data_slice.active; auto &spatial_iter = data_slice.spatial_iter; auto &geo_complete = data_slice.geo_complete; auto &Dinv = data_slice.Dinv; util::Location target = util::DEVICE; // -- // Define operations /** * @brief Compute "center" of a set of points. * * For set X -> * if points == 1; center = point; * if points == 2; center = midpoint; * if points > 2; center = spatial median; */ auto spatial_center_op = [graph, latitude, longitude, Dinv, target, spatial_iter] __host__ __device__(VertexT * v_q, const SizeT &pos) { VertexT v = v_q[pos]; // if no predicted location, and neighbor locations exists // Custom spatial center kernel for geolocation if (!util::isValid(latitude[v]) && !util::isValid(longitude[v])) { SizeT start_edge = graph.CsrT::GetNeighborListOffset(v); SizeT num_neighbors = graph.CsrT::GetNeighborListLength(v); SizeT i = 0; ValueT neighbor_lat[2], neighbor_lon[2]; // for length <=2 use registers for (SizeT e = start_edge; e < start_edge + num_neighbors; e++) { VertexT u = graph.CsrT::GetEdgeDest(e); if (util::isValid(latitude[u]) && util::isValid(longitude[u])) { neighbor_lat[i % 2] = latitude[u]; // last valid latitude neighbor_lon[i % 2] = longitude[u]; // last valid longitude i++; } } SizeT valid_neighbors = i; // If one location found, point at that location if (valid_neighbors == 1) { latitude[v] = neighbor_lat[0]; longitude[v] = neighbor_lon[0]; return; } // If two locations found, compute a midpoint else if (valid_neighbors == 2) { midpoint(neighbor_lat[0], neighbor_lon[0], neighbor_lat[1], neighbor_lon[1], latitude.GetPointer(target), longitude.GetPointer(target), v); return; } // if locations more than 2, compute spatial // median. else if (valid_neighbors > 2) { spatial_median( graph, valid_neighbors, latitude.GetPointer(target), longitude.GetPointer(target), v, Dinv.GetPointer(target), false, target, spatial_iter); } // if no valid locations are found else { latitude[v] = util::PreDefinedValues<ValueT>::InvalidValue; longitude[v] = util::PreDefinedValues<ValueT>::InvalidValue; } } // -- median calculation. }; auto status_op = [latitude, longitude, active] __host__ __device__( VertexT * v_q, const SizeT &pos) { VertexT v = v_q[pos]; if (util::isValid(latitude[v]) && util::isValid(longitude[v])) { atomicAdd(&active[0], 1); } }; // Run -- GUARD_CU(frontier.V_Q()->ForAll(spatial_center_op, frontier.queue_length, util::DEVICE, oprtr_parameters.stream)); if (geo_complete) { GUARD_CU(frontier.V_Q()->ForAll(status_op, frontier.queue_length, util::DEVICE, oprtr_parameters.stream)); GUARD_CU(data_slice.active.SetPointer(&data_slice.active_, sizeof(SizeT), util::HOST)); GUARD_CU(data_slice.active.Move(util::DEVICE, util::HOST)); } return retval; } /** * @brief Routine to combine received data and local data * @tparam NUM_VERTEX_ASSOCIATES Number of data associated with each * transmition item, typed VertexT * @tparam NUM_VALUE__ASSOCIATES Number of data associated with each * transmition item, typed ValueT * @param received_length The numver of transmition items received * @param[in] peer_ which peer GPU the data came from * \return cudaError_t error message(s), if any */ template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES> cudaError_t ExpandIncoming(SizeT &received_length, int peer_) { // ================ INCOMPLETE TEMPLATE - MULTIGPU ==================== auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &enactor_slice = this->enactor ->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_]; auto expand_op = [ // TODO: pass data used by the lambda, e.g.: // distances ] __host__ __device__(VertexT & key, const SizeT &in_pos, VertexT *vertex_associate_ins, ValueT *value__associate_ins) -> bool { // TODO: fill in the lambda to combine received and local data, e.g.: // ValueT in_val = value__associate_ins[in_pos]; // ValueT old_val = atomicMin(distances + key, in_val); // if (old_val <= in_val) // return false; return true; }; cudaError_t retval = BaseIterationLoop::template ExpandIncomingBase<NUM_VERTEX_ASSOCIATES, NUM_VALUE__ASSOCIATES>( received_length, peer_, expand_op); return retval; } bool Stop_Condition(int gpu_num = 0) { auto &enactor_slice = this->enactor->enactor_slices[0]; auto &enactor_stats = enactor_slice.enactor_stats; auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0]; auto &graph = data_slice.sub_graph[0]; auto iter = enactor_stats.iteration; // Anymore work to do? // printf("Predictions active in Stop: %u vs. needed %u.\n", // data_slice.active_, graph.nodes); if (data_slice.geo_complete) { if (data_slice.active_ >= graph.nodes) return true; } else { if (iter >= data_slice.geo_iter) return true; } // else, keep running return false; } }; // end of GEOIteration /** * @brief Geolocation enactor class. * @tparam _Problem Problem type we process on * @tparam ARRAY_FLAG Flags for util::Array1D used in the enactor * @tparam cudaHostRegisterFlag Flags for util::Array1D used in the enactor */ template <typename _Problem, util::ArrayFlag ARRAY_FLAG = util::ARRAY_NONE, unsigned int cudaHostRegisterFlag = cudaHostRegisterDefault> class Enactor : public EnactorBase< typename _Problem::GraphT, typename _Problem::GraphT::VertexT, typename _Problem::GraphT::ValueT, ARRAY_FLAG, cudaHostRegisterFlag> { public: typedef _Problem Problem; typedef typename Problem::SizeT SizeT; typedef typename Problem::VertexT VertexT; typedef typename Problem::GraphT GraphT; typedef typename GraphT::VertexT LabelT; typedef typename GraphT::ValueT ValueT; typedef EnactorBase<GraphT, LabelT, ValueT, ARRAY_FLAG, cudaHostRegisterFlag> BaseEnactor; typedef Enactor<Problem, ARRAY_FLAG, cudaHostRegisterFlag> EnactorT; typedef GEOIterationLoop<EnactorT> IterationT; Problem *problem; IterationT *iterations; /** * @brief geo constructor */ Enactor() : BaseEnactor("Geolocation"), problem(NULL) { this->max_num_vertex_associates = 0; this->max_num_value__associates = 1; } /** * @brief geo destructor */ virtual ~Enactor() { /*Release();*/ } /* * @brief Releasing allocated memory space * @param target The location to release memory from * \return cudaError_t error message(s), if any */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseEnactor::Release(target)); delete[] iterations; iterations = NULL; problem = NULL; return retval; } /** * @brief Initialize the problem. * @param[in] problem The problem object. * @param[in] target Target location of data * \return cudaError_t error message(s), if any */ cudaError_t Init(Problem &problem, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; this->problem = &problem; // Lazy initialization GUARD_CU(BaseEnactor::Init(problem, Enactor_None, 2, NULL, target, false)); for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); auto &enactor_slice = this->enactor_slices[gpu * this->num_gpus + 0]; auto &graph = problem.sub_graphs[gpu]; GUARD_CU(enactor_slice.frontier.Allocate(graph.nodes, graph.edges, this->queue_factors)); } iterations = new IterationT[this->num_gpus]; for (int gpu = 0; gpu < this->num_gpus; gpu++) { GUARD_CU(iterations[gpu].Init(this, gpu)); } GUARD_CU(this->Init_Threads( this, (CUT_THREADROUTINE) & (GunrockThread<EnactorT>))); return retval; } /** * @brief one run of geo, to be called within GunrockThread * @param thread_data Data for the CPU thread * \return cudaError_t error message(s), if any */ cudaError_t Run(ThreadSlice &thread_data) { gunrock::app::Iteration_Loop<0, 1, IterationT>( thread_data, iterations[thread_data.thread_num]); return cudaSuccess; } /** * @brief Reset enactor * @param[in] target Target location of data * \return cudaError_t error message(s), if any */ cudaError_t Reset(util::Location target = util::DEVICE) { typedef typename GraphT::GpT GpT; cudaError_t retval = cudaSuccess; GUARD_CU(BaseEnactor::Reset(target)); SizeT nodes = this->problem->data_slices[0][0].sub_graph[0].nodes; for (int gpu = 0; gpu < this->num_gpus; gpu++) { if (this->num_gpus == 1) { this->thread_slices[gpu].init_size = nodes; for (int peer_ = 0; peer_ < this->num_gpus; peer_++) { auto &frontier = this->enactor_slices[gpu * this->num_gpus + peer_].frontier; frontier.queue_length = (peer_ == 0) ? nodes : 0; if (peer_ == 0) { util::Array1D<SizeT, VertexT> tmp; tmp.Allocate(nodes, target | util::HOST); for (SizeT i = 0; i < nodes; ++i) { tmp[i] = (VertexT)i % nodes; } GUARD_CU(tmp.Move(util::HOST, target)); GUARD_CU(frontier.V_Q()->ForEach( tmp, [] __host__ __device__(VertexT & v, VertexT & i) { v = i; }, nodes, target, 0)); tmp.Release(); } } } else { // MULTIGPU INCOMPLETE } } GUARD_CU(BaseEnactor::Sync()); return retval; } /** * @brief Enacts a geo computing on the specified graph. ... * \return cudaError_t error message(s), if any */ cudaError_t Enact() { cudaError_t retval = cudaSuccess; GUARD_CU(this->Run_Threads(this)); util::PrintMsg("GPU Template Done.", this->flag & Debug); return retval; } }; } // namespace geo } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#if !defined(ENABLE_GPU) | !defined(ENABLE_CUDNN) #error "nnconv_cudnn.hpp cannot be compiled without GPU and CUDNN support." #endif #include "nnconv.hpp" #include "datacu.hpp" #include "impl/cudnnhelper.hpp" #include <cassert> #include <algorithm> using namespace vl ; using namespace vl::nn ; using namespace vl::impl ; #define CHECK(x) \ { \ cudnnError = x ; \ if (cudnnError != CUDNN_STATUS_SUCCESS) { \ error = op.context.setError(op.context.getCudaHelper().catchCudnnError(cudnnError, \ STRINGIZE(__FILE__) ":" STRINGIZE(__LINE__))) ; \ goto done ; \ } } // ------------------------------------------------------------------- // Forward // ------------------------------------------------------------------- template<DataType dataType> struct ConvolutionForwardCudnn { vl::ErrorCode operator() (Convolution &op, Tensor output, double outputMult, Tensor const& input, double inputMult, Tensor const& filter, Tensor const& bias) { assert(output) ; assert(input) ; assert(filter) ; typedef typename DataTypeTraits<dataType>::type type ; cudnnTensorDescriptor_t outputDesc, biasDesc, dataDesc ; cudnnFilterDescriptor_t filterDesc ; cudnnConvolutionDescriptor_t convDesc ; bool outputDescInitialized = false ; bool biasDescInitialized = false ; bool dataDescInitialized = false ; bool filterDescInitialized = false ; bool convDescInitialized = false ; void* workSpace = NULL ; int numGroups = input.getDepth() / filter.getDepth() ; int numFiltersPerGroup = filter.getSize() / numGroups ; if (op.dilateX != 1 || op.dilateY != 1) return vl::VLE_Unsupported ; if (op.padLeft != op.padRight) return vl::VLE_Unsupported ; if (op.padTop != op.padBottom) return vl::VLE_Unsupported ; if (filter.getHeight() > input.getHeight()) return vl::VLE_Unsupported ; if (filter.getWidth() > input.getWidth()) return vl::VLE_Unsupported ; cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ; vl::ErrorCode error = vl::VLE_Success ; cudnnHandle_t handle ; // Get CuDNN CHECK(op.context.getCudaHelper().getCudnnHandle(&handle)) ; // Get tensor descripotrs CHECK(cudnnCreateTensorDescriptor(&outputDesc)) ; outputDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptorEx(outputDesc, DataTypeToCudnn<dataType>::dataType , output.getSize(), // sizes numFiltersPerGroup, output.getWidth(), output.getHeight(), output.getHeight()*output.getWidth()*output.getDepth(), //strides output.getHeight()*output.getWidth(), output.getHeight(), 1)) ; CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptorEx(dataDesc, DataTypeToCudnn<dataType>::dataType, input.getSize(), input.getDepth() / numGroups, input.getWidth(), input.getHeight(), input.getHeight()*input.getWidth()*input.getDepth(), //strides input.getHeight()*input.getWidth(), input.getHeight(), 1)) ; CHECK(cudnnCreateFilterDescriptor(&filterDesc)) ; filterDescInitialized = true ; CHECK(cudnnSetFilter4dDescriptor(filterDesc, DataTypeToCudnn<dataType>::dataType, IF_CUDNN_GE5(CUDNN_TENSOR_NCHW COMMA) numFiltersPerGroup, filter.getDepth(), filter.getWidth(), filter.getHeight())) ; if (bias) { CHECK(cudnnCreateTensorDescriptor(&biasDesc)) ; biasDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(biasDesc, CUDNN_TENSOR_NCHW, DataTypeToCudnn<dataType>::dataType , 1, bias.getNumElements() / numGroups, 1, 1)) ; } // Get convolution descriptor CHECK(cudnnCreateConvolutionDescriptor(&convDesc)) ; convDescInitialized = true ; CHECK(cudnnSetConvolution2dDescriptor(convDesc, op.padLeft, op.padTop, op.strideX, op.strideY, 1,1, // upscale CUDNN_CROSS_CORRELATION IF_CUDNN_GE6(COMMA DataTypeToCudnn<dataType>::dataType))) ; // Sanity check #if 1 { int n, c, h, w ; cudnnGetConvolution2dForwardOutputDim(convDesc, dataDesc, filterDesc, &n, &c, &w, &h) ; bool sane = output.getSize() == n && numFiltersPerGroup == c && output.getWidth() == w && output.getHeight() == h ; assert(sane) ; } #endif op.context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed = 0 ; op.context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceUsed = 0 ; op.context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceUsed = 0 ; if (!op.context.getCudaHelper().cudnnConvolutionFwdSpecificAlgo) { // Determine algorithm automatically CHECK(cudnnGetConvolutionForwardAlgorithm(handle, dataDesc, filterDesc, convDesc, outputDesc, op.context.getCudaHelper().cudnnConvolutionFwdPreference, op.context.getCudaHelper().cudnnConvolutionFwdWorkSpaceLimit, &op.context.getCudaHelper().cudnnConvolutionFwdAlgo)) ; } // Get workspace size CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle, dataDesc, filterDesc, convDesc, outputDesc, op.context.getCudaHelper().cudnnConvolutionFwdAlgo, &op.context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed)) ; // Get workspace if (op.context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed > 0) { workSpace = op.context.getWorkspace(vl::VLDT_GPU, op.context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed) ; if (workSpace == NULL) { error = op.context.getLastError() ; goto done ; } } // Perform convolution for each filter group for (int g = 0 ; g < numGroups ; ++g) { ptrdiff_t dataGrpOffset = (input.getHeight() * input.getWidth() * filter.getDepth()) * g ; ptrdiff_t filterGrpOffset = (filter.getHeight() * filter.getWidth() * filter.getDepth()) * numFiltersPerGroup * g ; ptrdiff_t outputGrpOffset = (output.getHeight() * output.getWidth() * numFiltersPerGroup) * g ; ptrdiff_t biasGrpOffset = numFiltersPerGroup * g ; type alpha = inputMult ; type beta = outputMult ; CHECK(cudnnConvolutionForward(handle, &alpha, dataDesc, (type const*)input.getMemory() + dataGrpOffset, filterDesc, (type const*)filter.getMemory() + filterGrpOffset, convDesc, op.context.getCudaHelper().cudnnConvolutionFwdAlgo, workSpace, op.context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed, &beta, outputDesc, (type*)output.getMemory() + outputGrpOffset)) ; if (bias) { type alpha = 1.0f ; type beta = 1.0f ; #if (CUDNN_VERSION < 4000) CHECK(cudnnAddTensor(handle, CUDNN_ADD_SAME_C, &alpha, biasDesc, (type const*)bias.getMemory() + biasGrpOffset, &beta, outputDesc, (type*)output.getMemory() + outputGrpOffset)) ; #else CHECK(cudnnAddTensor(handle, &alpha, biasDesc, (type const*)bias.getMemory() + biasGrpOffset, &beta, outputDesc, (type*)output.getMemory() + outputGrpOffset)) ; #endif } } /* cleanup */ done: if (convDescInitialized) { cudnnDestroyConvolutionDescriptor(convDesc) ; } if (filterDescInitialized) { cudnnDestroyFilterDescriptor(filterDesc) ; } if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; } if (biasDescInitialized) { cudnnDestroyTensorDescriptor(biasDesc) ; } if (outputDescInitialized) { cudnnDestroyTensorDescriptor(outputDesc) ; } return op.context.passError(error, __func__) ; } } ; // ------------------------------------------------------------------- // Backward // ------------------------------------------------------------------- template<DataType dataType> struct ConvolutionBackwardCudnn { vl::ErrorCode operator() (Convolution &op, Tensor derInput, Tensor derFilter, Tensor derBias, Tensor const &input, Tensor const &filter, Tensor const &derOutput) { typedef typename DataTypeTraits<dataType>::type type ; /* no derInputDesc needed as same as dataDesc */ cudnnTensorDescriptor_t dataDesc, derBiasDesc, derOutputDesc ; cudnnFilterDescriptor_t filterDesc ; cudnnConvolutionDescriptor_t convDesc ; bool dataDescInitialized = false ; bool derBiasDescInitialized = false ; bool derOutputDescInitialized = false ; bool filterDescInitialized = false ; bool convDescInitialized = false ; #if (CUDNN_VERSION >= 3000) void* workSpace = NULL ; size_t workSpaceSize = 0 ; #endif ptrdiff_t numGroups = 1 ; ptrdiff_t numFiltersPerGroup = 0 ; ptrdiff_t filterVolume = 0 ; if (op.dilateX != 1 || op.dilateY != 1) return vl::VLE_Unsupported ; if (op.padLeft != op.padRight) return vl::VLE_Unsupported ; if (op.padTop != op.padBottom) return vl::VLE_Unsupported ; cudnnStatus_t cudnnError = CUDNN_STATUS_SUCCESS ; vl::ErrorCode error = vl::VLE_Success ; cudnnHandle_t handle ; // Get CuDNN CHECK(op.context.getCudaHelper().getCudnnHandle(&handle)) ; // Get the dimensions of the tensrors involved // If derInput is specified (hence comptued as output), use this // tensor as a basis to compute such dimensions, otherwise use derFilter. if (derInput) { assert(filter) ; numGroups = derInput.getDepth() / filter.getDepth() ; numFiltersPerGroup = filter.getSize() / numGroups ; filterVolume = filter.getHeight() * filter.getWidth() * filter.getDepth() ; CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptorEx(dataDesc, DataTypeToCudnn<dataType>::dataType , derInput.getSize(), derInput.getDepth() / numGroups, derInput.getWidth(), derInput.getHeight(), derInput.getHeight()*derInput.getWidth()*derInput.getDepth(), //strides derInput.getHeight()*derInput.getWidth(), derInput.getHeight(), 1)) ; CHECK(cudnnCreateFilterDescriptor(&filterDesc)) ; filterDescInitialized = true ; CHECK(cudnnSetFilter4dDescriptor(filterDesc, DataTypeToCudnn<dataType>::dataType , IF_CUDNN_GE5(CUDNN_TENSOR_NCHW COMMA) numFiltersPerGroup, filter.getDepth(), filter.getWidth(), filter.getHeight())) ; } else if (derFilter) { assert(input) ; numGroups = input.getDepth() / derFilter.getDepth() ; numFiltersPerGroup = derFilter.getSize() / numGroups ; filterVolume = derFilter.getHeight() * derFilter.getWidth() * derFilter.getDepth() ; CHECK(cudnnCreateTensorDescriptor(&dataDesc)) ; dataDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptorEx(dataDesc, DataTypeToCudnn<dataType>::dataType , input.getSize(), input.getDepth() / numGroups, input.getWidth(), input.getHeight(), input.getHeight()*input.getWidth()*input.getDepth(), //strides input.getHeight()*input.getWidth(), input.getHeight(), 1)) ; CHECK(cudnnCreateFilterDescriptor(&filterDesc)) ; filterDescInitialized = true ; CHECK(cudnnSetFilter4dDescriptor(filterDesc, DataTypeToCudnn<dataType>::dataType , IF_CUDNN_GE5(CUDNN_TENSOR_NCHW COMMA) numFiltersPerGroup, derFilter.getDepth(), derFilter.getWidth(), derFilter.getHeight())) ; } CHECK(cudnnCreateConvolutionDescriptor(&convDesc)) ; convDescInitialized = true ; CHECK(cudnnSetConvolution2dDescriptor(convDesc, op.padLeft, op.padTop, op.strideX, op.strideY, 1,1, // upscale CUDNN_CROSS_CORRELATION IF_CUDNN_GE6(COMMA DataTypeToCudnn<dataType>::dataType))) ; // Must have derOutput for all derivatives assert(derOutput) ; CHECK(cudnnCreateTensorDescriptor(&derOutputDesc)) ; derOutputDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptorEx(derOutputDesc, DataTypeToCudnn<dataType>::dataType , derOutput.getSize(), // sizes numFiltersPerGroup, derOutput.getWidth(), derOutput.getHeight(), derOutput.getHeight()*derOutput.getWidth()*derOutput.getDepth(), //strides derOutput.getHeight()*derOutput.getWidth(), derOutput.getHeight(), 1)) ; // for derivatives w.r.t. bias if (derBias) { CHECK(cudnnCreateTensorDescriptor(&derBiasDesc)) ; derBiasDescInitialized = true ; CHECK(cudnnSetTensor4dDescriptor(derBiasDesc, CUDNN_TENSOR_NCHW, DataTypeToCudnn<dataType>::dataType , 1, derBias.getNumElements() / numGroups, 1, 1)) ; } op.context.getCudaHelper().cudnnConvolutionFwdWorkSpaceUsed = 0 ; op.context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceUsed = 0 ; op.context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceUsed = 0 ; #if (CUDNN_VERSION >= 3000) if (derFilter) { // Get filter derivatives algorithm CHECK(cudnnGetConvolutionBackwardFilterAlgorithm (handle, dataDesc, derOutputDesc, convDesc, filterDesc, op.context.getCudaHelper().cudnnConvolutionBwdFilterPreference, op.context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceLimit, &op.context.getCudaHelper().cudnnConvolutionBwdFilterAlgo)) ; // Get workspace size CHECK(cudnnGetConvolutionBackwardFilterWorkspaceSize (handle, dataDesc, derOutputDesc, convDesc, filterDesc, op.context.getCudaHelper().cudnnConvolutionBwdFilterAlgo, &op.context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceUsed)) ; workSpaceSize = std::max(workSpaceSize, op.context.getCudaHelper().cudnnConvolutionBwdFilterWorkSpaceUsed) ; } if (derInput) { // Get data derivatives CHECK(cudnnGetConvolutionBackwardDataAlgorithm (handle, filterDesc, derOutputDesc, convDesc, dataDesc, op.context.getCudaHelper().cudnnConvolutionBwdDataPreference, op.context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceLimit, &op.context.getCudaHelper().cudnnConvolutionBwdDataAlgo)) ; // Get workspace size CHECK(cudnnGetConvolutionBackwardDataWorkspaceSize (handle, filterDesc, derOutputDesc, convDesc, dataDesc, op.context.getCudaHelper().cudnnConvolutionBwdDataAlgo, &op.context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceUsed)) ; workSpaceSize = std::max(workSpaceSize, op.context.getCudaHelper().cudnnConvolutionBwdDataWorkSpaceUsed) ; } // Get workspace if (workSpaceSize > 0) { workSpace = op.context.getWorkspace(vl::VLDT_GPU, workSpaceSize) ; if (workSpace == NULL) { error = op.context.getLastError() ; goto done ; } } #endif // Perform backward convolution for each filter group for (int g = 0 ; g < numGroups ; ++g) { ptrdiff_t filterGrpOffset = filterVolume * numFiltersPerGroup * g ; ptrdiff_t derOutputGrpOffset = (derOutput.getHeight() * derOutput.getWidth() * numFiltersPerGroup) * g ; if (derBias) { ptrdiff_t derBiasGrpOffset = numFiltersPerGroup * g ; type alpha = 1 ; type beta = 0 ; CHECK(cudnnConvolutionBackwardBias (handle, &alpha, derOutputDesc, (type const*)derOutput.getMemory() + derOutputGrpOffset, &beta, derBiasDesc, (type*)derBias.getMemory() + derBiasGrpOffset)) ; } if (derFilter) { ptrdiff_t dataGrpOffset = (input.getHeight() * input.getWidth() * derFilter.getDepth()) * g ; type alpha = 1 ; type beta = 0 ; #if (CUDNN_VERSION >= 3000) CHECK( IF_CUDNN_GE4(cudnnConvolutionBackwardFilter) IF_CUDNN_GE3_LT4(cudnnConvolutionBackwardFilter_v3) (handle, &alpha, dataDesc, (type const*)input.getMemory() + dataGrpOffset, derOutputDesc, (type const*)derOutput.getMemory() + derOutputGrpOffset, convDesc, op.context.getCudaHelper().cudnnConvolutionBwdFilterAlgo, workSpace, workSpaceSize, &beta, filterDesc, (type*)derFilter.getMemory() + filterGrpOffset)) ; #else CHECK(cudnnConvolutionBackwardFilter (handle, &alpha, dataDesc, (type const*)input.getMemory() + dataGrpOffset, derOutputDesc, (type const*)derOutput.getMemory() + derOutputGrpOffset, convDesc, &beta, filterDesc, (type*)derFilter.getMemory() + filterGrpOffset)) ; #endif } if (derInput) { ptrdiff_t dataGrpOffset = (derInput.getHeight() * derInput.getWidth() * filter.getDepth()) * g ; type alpha = 1 ; type beta = 0 ; #if (CUDNN_VERSION >= 3000) CHECK( IF_CUDNN_GE4(cudnnConvolutionBackwardData) IF_CUDNN_GE3_LT4(cudnnConvolutionBackwardData_v3) (handle, &alpha, filterDesc, (type const*)filter.getMemory() + filterGrpOffset, derOutputDesc, (type const*)derOutput.getMemory() + derOutputGrpOffset, convDesc, op.context.getCudaHelper().cudnnConvolutionBwdDataAlgo, workSpace, workSpaceSize, &beta, dataDesc, (type*)derInput.getMemory() + dataGrpOffset)) ; #else CHECK(cudnnConvolutionBackwardData (handle, &alpha, filterDesc, filter.getMemory() + filterGrpOffset, derOutputDesc, derOutput.getMemory() + derOutputGrpOffset, convDesc, &beta, dataDesc, derInput.getMemory() + dataGrpOffset)) ; #endif } } done: if (convDescInitialized) { cudnnDestroyConvolutionDescriptor(convDesc) ; } if (filterDescInitialized) { cudnnDestroyFilterDescriptor(filterDesc) ; } if (derOutputDescInitialized) { cudnnDestroyTensorDescriptor(derOutputDesc) ; } if (derBiasDescInitialized) { cudnnDestroyTensorDescriptor(derBiasDesc) ; } if (dataDescInitialized) { cudnnDestroyTensorDescriptor(dataDesc) ; } return op.context.passError(error, __func__) ; } } ;
the_stack