text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
#include <stdio.h> #include <stdlib.h> #include <nvbio/basic/timer.h> #include <nvbio/basic/console.h> #include <nvbio/basic/vector.h> #include <nvbio/basic/shared_pointer.h> #include <nvbio/basic/dna.h> #include <nvbio/strings/string_set.h> #include <nvbio/strings/infix.h> #include <nvbio/strings/seeds.h> #include <nvbio/fmindex/filter.h> #include <nvbio/io/sequence/sequence.h> #include <nvbio/io/sequence/sequence_encoder.h> #include <nvbio/io/fmindex/fmindex.h> #include <nvbio/alignment/alignment.h> #include <nvbio/alignment/batched.h> #include "util.h" using namespace nvbio; // alignment params // struct Params { uint32 seed_len; uint32 seed_intv; uint32 merge_intv; }; // query stats // struct Stats { Stats() : time(0), extract_time(0), rank_time(0), locate_time(0), align_time(0), reads(0), aligned(0), queries(0), occurrences(0) {} float time; float extract_time; float rank_time; float locate_time; float align_time; uint64 reads; uint64 aligned; uint64 queries; uint64 occurrences; }; // the pipeline state // struct Pipeline { typedef io::FMIndexDataDevice::fm_index_type fm_index_type; typedef FMIndexFilterDevice<fm_index_type> fm_filter_type; Params params; // program options SharedPointer<io::SequenceDataDevice> ref_data; // reference data SharedPointer<io::FMIndexDataDevice> fm_data; // fm-index data fm_filter_type fm_filter; // fm-index filter }; // transform an (index-pos,seed-id) hit into a diagonal (text-pos = index-pos - seed-pos, read-id) struct hit_to_diagonal { typedef uint2 argument_type; typedef uint2 result_type; // constructor NVBIO_FORCEINLINE NVBIO_HOST_DEVICE hit_to_diagonal(const string_set_infix_coord_type* _seed_coords) : seed_coords(_seed_coords) {} // functor operator NVBIO_FORCEINLINE NVBIO_HOST_DEVICE uint2 operator() (const uint2 hit) const { const uint32 index_pos = hit.x; const uint32 seed_id = hit.y; const string_set_infix_coord_type seed = seed_coords[ seed_id ]; const uint32 read_pos = infix_begin( seed ); const uint32 read_id = string_id( seed ); return make_uint2( index_pos - read_pos, read_id ); } const string_set_infix_coord_type* seed_coords; }; // extract a set of uniformly spaced seeds from a string-set and return it as an InfixSet // template <typename system_tag, typename string_set_type> InfixSet<string_set_type, const string_set_infix_coord_type*> extract_seeds( const string_set_type string_set, // the input string-set const uint32 seed_len, // the seeds length const uint32 seed_interval, // the spacing between seeds nvbio::vector<system_tag,string_set_infix_coord_type>& seed_coords) // the output vector of seed coordinates { // enumerate all seeds const uint32 n_seeds = enumerate_string_set_seeds( string_set, uniform_seeds_functor<>( seed_len, seed_interval ), seed_coords ); // and build the output infix-set return InfixSet<string_set_type, const string_set_infix_coord_type*>( n_seeds, string_set, nvbio::plain_view( seed_coords ) ); } // a functor to extract the read infixes from the hit diagonals // struct read_infixes { // constructor NVBIO_HOST_DEVICE read_infixes(const io::ConstSequenceDataView reads) : m_reads( reads ) {} // functor operator NVBIO_HOST_DEVICE string_infix_coord_type operator() (const uint2 diagonal) const { const io::SequenceDataAccess<DNA_N> reads( m_reads ); const uint32 read_id = diagonal.y; // fetch the read range return reads.get_range( read_id ); } const io::ConstSequenceDataView m_reads; }; // a functor to extract the genome infixes from the hit diagonals // template <uint32 BAND_LEN> struct genome_infixes { typedef const io::SequenceDataAccess<DNA_N,io::ConstSequenceDataView> read_access_type; // constructor NVBIO_HOST_DEVICE genome_infixes(const uint32 genome_len, const io::ConstSequenceDataView reads) : m_genome_len( genome_len ), m_reads( reads ) {} // functor operator NVBIO_HOST_DEVICE string_infix_coord_type operator() (const uint2 diagonal) const { const io::SequenceDataAccess<DNA_N> reads( m_reads ); const uint32 read_id = diagonal.y; const uint32 text_pos = diagonal.x; // fetch the read range const uint2 read_range = reads.get_range( read_id ); const uint32 read_len = read_range.y - read_range.x; // compute the segment of text to align to const uint32 genome_begin = text_pos > BAND_LEN/2 ? text_pos - BAND_LEN/2 : 0u; const uint32 genome_end = nvbio::min( genome_begin + read_len + BAND_LEN, m_genome_len ); return make_uint2( genome_begin, genome_end ); } const uint32 m_genome_len; const io::ConstSequenceDataView m_reads; }; // a functor to extract the score from a sink // struct sink_score { typedef aln::BestSink<int16> argument_type; typedef int16 result_type; // functor operator NVBIO_HOST_DEVICE int16 operator() (const aln::BestSink<int16>& sink) const { return sink.score; } }; // perform q-gram index mapping // void map( Pipeline& pipeline, const io::SequenceDataDevice& reads, nvbio::vector<device_tag,int16>& best_scores, Stats& stats) { typedef io::SequenceDataAccess<DNA> genome_access_type; typedef genome_access_type::sequence_stream_type genome_string; typedef io::SequenceDataAccess<DNA_N> read_access_type; typedef read_access_type::sequence_string_set_type read_string_set_type; typedef read_access_type::sequence_stream_type read_stream; typedef string_set_infix_coord_type infix_coord_type; typedef nvbio::vector<device_tag,infix_coord_type> infix_vector_type; typedef InfixSet<read_string_set_type, const string_set_infix_coord_type*> seed_string_set_type; // fetch the program options const Params& params = pipeline.params; // fetch the genome string const genome_access_type genome_access( *pipeline.ref_data ); const uint32 genome_len = genome_access.bps(); const genome_string genome( genome_access.sequence_stream() ); // fetch an fm-index view const Pipeline::fm_index_type fm_index = pipeline.fm_data->index(); // fetch the fm-index filter Pipeline::fm_filter_type& fm_filter = pipeline.fm_filter; // prepare some vectors to store the query qgrams infix_vector_type seed_coords; Timer timer; timer.start(); const read_access_type reads_access( reads ); const read_string_set_type read_string_set = reads_access.sequence_string_set(); const seed_string_set_type seed_string_set = extract_seeds( read_string_set, params.seed_len, params.seed_intv, seed_coords ); cudaDeviceSynchronize(); timer.stop(); const float extract_time = timer.seconds(); stats.queries += seed_string_set.size(); stats.extract_time += extract_time; // // search the sorted seeds with the FM-index filter // const uint32 batch_size = 16*1024*1024; typedef uint2 hit_type; // each hit will be an (index-pos,seed-id) coordinate pair // prepare storage for the output hits nvbio::vector<device_tag,hit_type> hits( batch_size ); nvbio::vector<device_tag,uint32> out_reads( batch_size ); nvbio::vector<device_tag,int16> out_scores( batch_size ); nvbio::vector<device_tag,uint8> temp_storage; timer.start(); // first step: rank the query seeds const uint64 n_hits = fm_filter.rank( fm_index, seed_string_set ); cudaDeviceSynchronize(); timer.stop(); stats.rank_time += timer.seconds(); stats.occurrences += n_hits; nvbio::vector<device_tag, aln::BestSink<int16> > sinks( batch_size ); nvbio::vector<device_tag,string_infix_coord_type> genome_infix_coords( batch_size ); nvbio::vector<device_tag,string_infix_coord_type> read_infix_coords( batch_size ); static const uint32 BAND_LEN = 31; // loop through large batches of hits and locate & merge them for (uint64 hits_begin = 0; hits_begin < n_hits; hits_begin += batch_size) { const uint64 hits_end = nvbio::min( hits_begin + batch_size, n_hits ); timer.start(); fm_filter.locate( hits_begin, hits_end, hits.begin() ); cudaDeviceSynchronize(); timer.stop(); stats.locate_time += timer.seconds(); // transform the (index-pos,seed-id) hit coordinates into diagonals, (text-pos = index-pos - seed-pos, read-id) thrust::transform( hits.begin(), hits.begin() + hits_end - hits_begin, hits.begin(), hit_to_diagonal( nvbio::plain_view( seed_coords ) ) ); timer.start(); // build the set of read infixes thrust::transform( hits.begin(), hits.begin() + hits_end - hits_begin, read_infix_coords.begin(), read_infixes( nvbio::plain_view( reads ) ) ); // build the set of genome infixes thrust::transform( hits.begin(), hits.begin() + hits_end - hits_begin, genome_infix_coords.begin(), genome_infixes<BAND_LEN>( genome_len, nvbio::plain_view( reads ) ) ); typedef nvbio::vector<device_tag,string_infix_coord_type>::const_iterator infix_iterator; const SparseStringSet<read_stream,infix_iterator> read_infix_set( hits_end - hits_begin, reads_access.sequence_stream(), read_infix_coords.begin() ); const SparseStringSet<genome_string,infix_iterator> genome_infix_set( hits_end - hits_begin, genome, genome_infix_coords.begin() ); typedef aln::MyersTag<5u> myers_dna5_tag; //const aln::SimpleGotohScheme gotoh( 2, -2, -5, -3 ); aln::batch_banded_alignment_score<BAND_LEN>( aln::make_edit_distance_aligner<aln::SEMI_GLOBAL, myers_dna5_tag>(), //aln::make_gotoh_aligner<aln::LOCAL>( gotoh ), read_infix_set, genome_infix_set, sinks.begin(), aln::DeviceThreadScheduler(), reads.max_sequence_len(), reads.max_sequence_len() + BAND_LEN ); cudaDeviceSynchronize(); timer.stop(); stats.align_time += timer.seconds(); // compute the best score for each read in this batch; // note that we divide the string-id by 2 to merge results coming from the forward // and reverse-complemented strands cuda::reduce_by_key( hits_end - hits_begin, thrust::make_transform_iterator( hits.begin(), make_composition_functor( divide_by_two(), component_functor<hit_type>( 1u ) ) ), // take the second component divided by 2 thrust::make_transform_iterator( sinks.begin(), sink_score() ), out_reads.begin(), out_scores.begin(), thrust::maximum<int16>(), temp_storage ); // and keep track of the global best update_scores( hits_end - hits_begin, nvbio::plain_view( out_reads ), nvbio::plain_view( out_scores ), nvbio::plain_view( best_scores ) ); log_verbose(stderr, "\r processed %6.2f %% reads", 100.0f * float( hits_end ) / float( n_hits )); } log_verbose_cont(stderr, "\n"); } // main test entry point // int main(int argc, char* argv[]) { // // perform some basic option parsing // const uint32 batch_reads = 1*1024*1024; const uint32 batch_bps = 100*1024*1024; const char* reads = argv[argc-1]; const char* index = argv[argc-2]; Params params; params.seed_len = 22; params.seed_intv = 10; params.merge_intv = 16; uint32 max_reads = uint32(-1); int16 score_threshold = -20; for (int i = 0; i < argc; ++i) { if (strcmp( argv[i], "-s" ) == 0) { params.seed_len = uint32( atoi( argv[++i] ) ); params.seed_intv = uint32( atoi( argv[++i] ) ); } if (strcmp( argv[i], "-m" ) == 0) params.merge_intv = uint32( atoi( argv[++i] ) ); else if (strcmp( argv[i], "-max-reads" ) == 0) max_reads = uint32( atoi( argv[++i] ) ); else if (strcmp( argv[i], "-t" ) == 0) score_threshold = int16( atoi( argv[++i] ) ); } io::SequenceDataHost h_ref; if (!io::load_sequence_file( DNA, &h_ref, index )) { log_error(stderr, " failed loading reference \"%s\"\n", index); return 1u; } io::FMIndexDataHost h_fmi; if (!h_fmi.load( index, io::FMIndexData::FORWARD | io::FMIndexData::SA )) { log_error(stderr, " failed loading index \"%s\"\n", index); return 1u; } Pipeline pipeline; // store the program options pipeline.params = params; // build its device version pipeline.ref_data = new io::SequenceDataDevice( h_ref ); // build its device version pipeline.fm_data = new io::FMIndexDataDevice( h_fmi, io::FMIndexData::FORWARD | io::FMIndexData::SA ); // open a read file log_info(stderr, " opening reads file... started\n"); SharedPointer<io::SequenceDataStream> read_data_file( io::open_sequence_file( reads, io::Phred33, 2*max_reads, uint32(-1), io::SequenceEncoding( io::FORWARD | io::REVERSE_COMPLEMENT ) ) ); // check whether the file opened correctly if (read_data_file == NULL || read_data_file->is_ok() == false) { log_error(stderr, " failed opening file \"%s\"\n", reads); return 1u; } log_info(stderr, " opening reads file... done\n"); // keep stats Stats stats; io::SequenceDataHost h_read_data; while (1) { // load a batch of reads if (io::next( DNA_N, &h_read_data, read_data_file.get(), batch_reads, batch_bps ) == 0) break; log_info(stderr, " loading reads... started\n"); // copy it to the device const io::SequenceDataDevice d_read_data = h_read_data; const uint32 n_reads = d_read_data.size() / 2; log_info(stderr, " loading reads... done\n"); log_info(stderr, " %u reads\n", n_reads); const int16 worst_score = Field_traits<int16>::min(); nvbio::vector<device_tag,int16> best_scores( n_reads, worst_score ); nvbio::vector<device_tag,uint8> temp_storage; Timer timer; timer.start(); map( pipeline, d_read_data, best_scores, stats ); timer.stop(); const float time = timer.seconds(); // accumulate the number of aligned reads stats.reads += n_reads; stats.time += time; // count how many reads have a score >= score_threshold const uint32 n_aligned = cuda::reduce( n_reads, thrust::make_transform_iterator( best_scores.begin(), above_threshold( score_threshold ) ), thrust::plus<uint32>(), temp_storage ); stats.aligned += n_aligned; log_info(stderr, " aligned %6.2f %% reads (%6.2f K reads/s)\n", 100.0f * float( stats.aligned ) / float( stats.reads ), (1.0e-3f * float( stats.reads )) / stats.time); log_verbose(stderr, " breakdown:\n"); log_verbose(stderr, " extract throughput : %.2f B seeds/s\n", (1.0e-9f * float( stats.queries )) / stats.extract_time); log_verbose(stderr, " rank throughput : %6.2f K reads/s\n", (1.0e-3f * float( stats.reads )) / stats.rank_time); log_verbose(stderr, " : %6.2f B seeds/s\n", (1.0e-9f * float( stats.queries )) / stats.rank_time); log_verbose(stderr, " locate throughput : %6.2f K reads/s\n", (1.0e-3f * float( stats.reads )) / stats.locate_time); log_verbose(stderr, " align throughput : %6.2f K reads/s\n", (1.0e-3f * float( stats.reads )) / stats.align_time); log_verbose(stderr, " : %6.2f M hits/s\n", (1.0e-6f * float( stats.occurrences )) / stats.align_time); log_verbose(stderr, " occurrences : %.3f B\n", 1.0e-9f * float( stats.occurrences ) ); } return 0; }
the_stack
typedef struct { int hashbitlen; unsigned long long databitlen; unsigned long long datasize_in_buffer; uint64_t x[8][2]; unsigned char buffer[64]; } jhHashState; __constant__ unsigned char d_JH256_H0[512] = { 0xeb, 0x98, 0xa3, 0x41, 0x2c, 0x20, 0xd3, 0xeb, 0x92, 0xcd, 0xbe, 0x7b, 0x9c, 0xb2, 0x45, 0xc1, 0x1c, 0x93, 0x51, 0x91, 0x60, 0xd4, 0xc7, 0xfa, 0x26, 0x0, 0x82, 0xd6, 0x7e, 0x50, 0x8a, 0x3, 0xa4, 0x23, 0x9e, 0x26, 0x77, 0x26, 0xb9, 0x45, 0xe0, 0xfb, 0x1a, 0x48, 0xd4, 0x1a, 0x94, 0x77, 0xcd, 0xb5, 0xab, 0x26, 0x2, 0x6b, 0x17, 0x7a, 0x56, 0xf0, 0x24, 0x42, 0xf, 0xff, 0x2f, 0xa8, 0x71, 0xa3, 0x96, 0x89, 0x7f, 0x2e, 0x4d, 0x75, 0x1d, 0x14, 0x49, 0x8, 0xf7, 0x7d, 0xe2, 0x62, 0x27, 0x76, 0x95, 0xf7, 0x76, 0x24, 0x8f, 0x94, 0x87, 0xd5, 0xb6, 0x57, 0x47, 0x80, 0x29, 0x6c, 0x5c, 0x5e, 0x27, 0x2d, 0xac, 0x8e, 0xd, 0x6c, 0x51, 0x84, 0x50, 0xc6, 0x57, 0x5, 0x7a, 0xf, 0x7b, 0xe4, 0xd3, 0x67, 0x70, 0x24, 0x12, 0xea, 0x89, 0xe3, 0xab, 0x13, 0xd3, 0x1c, 0xd7, 0x69 }; __constant__ unsigned char d_E8_rc[42][32] = { {0x72, 0xd5, 0xde, 0xa2, 0xdf, 0x15, 0xf8, 0x67, 0x7b, 0x84, 0x15, 0xa, 0xb7, 0x23, 0x15, 0x57, 0x81, 0xab, 0xd6, 0x90, 0x4d, 0x5a, 0x87, 0xf6, 0x4e, 0x9f, 0x4f, 0xc5, 0xc3, 0xd1, 0x2b, 0x40}, {0xea, 0x98, 0x3a, 0xe0, 0x5c, 0x45, 0xfa, 0x9c, 0x3, 0xc5, 0xd2, 0x99, 0x66, 0xb2, 0x99, 0x9a, 0x66, 0x2, 0x96, 0xb4, 0xf2, 0xbb, 0x53, 0x8a, 0xb5, 0x56, 0x14, 0x1a, 0x88, 0xdb, 0xa2, 0x31}, {0x3, 0xa3, 0x5a, 0x5c, 0x9a, 0x19, 0xe, 0xdb, 0x40, 0x3f, 0xb2, 0xa, 0x87, 0xc1, 0x44, 0x10, 0x1c, 0x5, 0x19, 0x80, 0x84, 0x9e, 0x95, 0x1d, 0x6f, 0x33, 0xeb, 0xad, 0x5e, 0xe7, 0xcd, 0xdc}, {0x10, 0xba, 0x13, 0x92, 0x2, 0xbf, 0x6b, 0x41, 0xdc, 0x78, 0x65, 0x15, 0xf7, 0xbb, 0x27, 0xd0, 0xa, 0x2c, 0x81, 0x39, 0x37, 0xaa, 0x78, 0x50, 0x3f, 0x1a, 0xbf, 0xd2, 0x41, 0x0, 0x91, 0xd3}, {0x42, 0x2d, 0x5a, 0xd, 0xf6, 0xcc, 0x7e, 0x90, 0xdd, 0x62, 0x9f, 0x9c, 0x92, 0xc0, 0x97, 0xce, 0x18, 0x5c, 0xa7, 0xb, 0xc7, 0x2b, 0x44, 0xac, 0xd1, 0xdf, 0x65, 0xd6, 0x63, 0xc6, 0xfc, 0x23}, {0x97, 0x6e, 0x6c, 0x3, 0x9e, 0xe0, 0xb8, 0x1a, 0x21, 0x5, 0x45, 0x7e, 0x44, 0x6c, 0xec, 0xa8, 0xee, 0xf1, 0x3, 0xbb, 0x5d, 0x8e, 0x61, 0xfa, 0xfd, 0x96, 0x97, 0xb2, 0x94, 0x83, 0x81, 0x97}, {0x4a, 0x8e, 0x85, 0x37, 0xdb, 0x3, 0x30, 0x2f, 0x2a, 0x67, 0x8d, 0x2d, 0xfb, 0x9f, 0x6a, 0x95, 0x8a, 0xfe, 0x73, 0x81, 0xf8, 0xb8, 0x69, 0x6c, 0x8a, 0xc7, 0x72, 0x46, 0xc0, 0x7f, 0x42, 0x14}, {0xc5, 0xf4, 0x15, 0x8f, 0xbd, 0xc7, 0x5e, 0xc4, 0x75, 0x44, 0x6f, 0xa7, 0x8f, 0x11, 0xbb, 0x80, 0x52, 0xde, 0x75, 0xb7, 0xae, 0xe4, 0x88, 0xbc, 0x82, 0xb8, 0x0, 0x1e, 0x98, 0xa6, 0xa3, 0xf4}, {0x8e, 0xf4, 0x8f, 0x33, 0xa9, 0xa3, 0x63, 0x15, 0xaa, 0x5f, 0x56, 0x24, 0xd5, 0xb7, 0xf9, 0x89, 0xb6, 0xf1, 0xed, 0x20, 0x7c, 0x5a, 0xe0, 0xfd, 0x36, 0xca, 0xe9, 0x5a, 0x6, 0x42, 0x2c, 0x36}, {0xce, 0x29, 0x35, 0x43, 0x4e, 0xfe, 0x98, 0x3d, 0x53, 0x3a, 0xf9, 0x74, 0x73, 0x9a, 0x4b, 0xa7, 0xd0, 0xf5, 0x1f, 0x59, 0x6f, 0x4e, 0x81, 0x86, 0xe, 0x9d, 0xad, 0x81, 0xaf, 0xd8, 0x5a, 0x9f}, {0xa7, 0x5, 0x6, 0x67, 0xee, 0x34, 0x62, 0x6a, 0x8b, 0xb, 0x28, 0xbe, 0x6e, 0xb9, 0x17, 0x27, 0x47, 0x74, 0x7, 0x26, 0xc6, 0x80, 0x10, 0x3f, 0xe0, 0xa0, 0x7e, 0x6f, 0xc6, 0x7e, 0x48, 0x7b}, {0xd, 0x55, 0xa, 0xa5, 0x4a, 0xf8, 0xa4, 0xc0, 0x91, 0xe3, 0xe7, 0x9f, 0x97, 0x8e, 0xf1, 0x9e, 0x86, 0x76, 0x72, 0x81, 0x50, 0x60, 0x8d, 0xd4, 0x7e, 0x9e, 0x5a, 0x41, 0xf3, 0xe5, 0xb0, 0x62}, {0xfc, 0x9f, 0x1f, 0xec, 0x40, 0x54, 0x20, 0x7a, 0xe3, 0xe4, 0x1a, 0x0, 0xce, 0xf4, 0xc9, 0x84, 0x4f, 0xd7, 0x94, 0xf5, 0x9d, 0xfa, 0x95, 0xd8, 0x55, 0x2e, 0x7e, 0x11, 0x24, 0xc3, 0x54, 0xa5}, {0x5b, 0xdf, 0x72, 0x28, 0xbd, 0xfe, 0x6e, 0x28, 0x78, 0xf5, 0x7f, 0xe2, 0xf, 0xa5, 0xc4, 0xb2, 0x5, 0x89, 0x7c, 0xef, 0xee, 0x49, 0xd3, 0x2e, 0x44, 0x7e, 0x93, 0x85, 0xeb, 0x28, 0x59, 0x7f}, {0x70, 0x5f, 0x69, 0x37, 0xb3, 0x24, 0x31, 0x4a, 0x5e, 0x86, 0x28, 0xf1, 0x1d, 0xd6, 0xe4, 0x65, 0xc7, 0x1b, 0x77, 0x4, 0x51, 0xb9, 0x20, 0xe7, 0x74, 0xfe, 0x43, 0xe8, 0x23, 0xd4, 0x87, 0x8a}, {0x7d, 0x29, 0xe8, 0xa3, 0x92, 0x76, 0x94, 0xf2, 0xdd, 0xcb, 0x7a, 0x9, 0x9b, 0x30, 0xd9, 0xc1, 0x1d, 0x1b, 0x30, 0xfb, 0x5b, 0xdc, 0x1b, 0xe0, 0xda, 0x24, 0x49, 0x4f, 0xf2, 0x9c, 0x82, 0xbf}, {0xa4, 0xe7, 0xba, 0x31, 0xb4, 0x70, 0xbf, 0xff, 0xd, 0x32, 0x44, 0x5, 0xde, 0xf8, 0xbc, 0x48, 0x3b, 0xae, 0xfc, 0x32, 0x53, 0xbb, 0xd3, 0x39, 0x45, 0x9f, 0xc3, 0xc1, 0xe0, 0x29, 0x8b, 0xa0}, {0xe5, 0xc9, 0x5, 0xfd, 0xf7, 0xae, 0x9, 0xf, 0x94, 0x70, 0x34, 0x12, 0x42, 0x90, 0xf1, 0x34, 0xa2, 0x71, 0xb7, 0x1, 0xe3, 0x44, 0xed, 0x95, 0xe9, 0x3b, 0x8e, 0x36, 0x4f, 0x2f, 0x98, 0x4a}, {0x88, 0x40, 0x1d, 0x63, 0xa0, 0x6c, 0xf6, 0x15, 0x47, 0xc1, 0x44, 0x4b, 0x87, 0x52, 0xaf, 0xff, 0x7e, 0xbb, 0x4a, 0xf1, 0xe2, 0xa, 0xc6, 0x30, 0x46, 0x70, 0xb6, 0xc5, 0xcc, 0x6e, 0x8c, 0xe6}, {0xa4, 0xd5, 0xa4, 0x56, 0xbd, 0x4f, 0xca, 0x0, 0xda, 0x9d, 0x84, 0x4b, 0xc8, 0x3e, 0x18, 0xae, 0x73, 0x57, 0xce, 0x45, 0x30, 0x64, 0xd1, 0xad, 0xe8, 0xa6, 0xce, 0x68, 0x14, 0x5c, 0x25, 0x67}, {0xa3, 0xda, 0x8c, 0xf2, 0xcb, 0xe, 0xe1, 0x16, 0x33, 0xe9, 0x6, 0x58, 0x9a, 0x94, 0x99, 0x9a, 0x1f, 0x60, 0xb2, 0x20, 0xc2, 0x6f, 0x84, 0x7b, 0xd1, 0xce, 0xac, 0x7f, 0xa0, 0xd1, 0x85, 0x18}, {0x32, 0x59, 0x5b, 0xa1, 0x8d, 0xdd, 0x19, 0xd3, 0x50, 0x9a, 0x1c, 0xc0, 0xaa, 0xa5, 0xb4, 0x46, 0x9f, 0x3d, 0x63, 0x67, 0xe4, 0x4, 0x6b, 0xba, 0xf6, 0xca, 0x19, 0xab, 0xb, 0x56, 0xee, 0x7e}, {0x1f, 0xb1, 0x79, 0xea, 0xa9, 0x28, 0x21, 0x74, 0xe9, 0xbd, 0xf7, 0x35, 0x3b, 0x36, 0x51, 0xee, 0x1d, 0x57, 0xac, 0x5a, 0x75, 0x50, 0xd3, 0x76, 0x3a, 0x46, 0xc2, 0xfe, 0xa3, 0x7d, 0x70, 0x1}, {0xf7, 0x35, 0xc1, 0xaf, 0x98, 0xa4, 0xd8, 0x42, 0x78, 0xed, 0xec, 0x20, 0x9e, 0x6b, 0x67, 0x79, 0x41, 0x83, 0x63, 0x15, 0xea, 0x3a, 0xdb, 0xa8, 0xfa, 0xc3, 0x3b, 0x4d, 0x32, 0x83, 0x2c, 0x83}, {0xa7, 0x40, 0x3b, 0x1f, 0x1c, 0x27, 0x47, 0xf3, 0x59, 0x40, 0xf0, 0x34, 0xb7, 0x2d, 0x76, 0x9a, 0xe7, 0x3e, 0x4e, 0x6c, 0xd2, 0x21, 0x4f, 0xfd, 0xb8, 0xfd, 0x8d, 0x39, 0xdc, 0x57, 0x59, 0xef}, {0x8d, 0x9b, 0xc, 0x49, 0x2b, 0x49, 0xeb, 0xda, 0x5b, 0xa2, 0xd7, 0x49, 0x68, 0xf3, 0x70, 0xd, 0x7d, 0x3b, 0xae, 0xd0, 0x7a, 0x8d, 0x55, 0x84, 0xf5, 0xa5, 0xe9, 0xf0, 0xe4, 0xf8, 0x8e, 0x65}, {0xa0, 0xb8, 0xa2, 0xf4, 0x36, 0x10, 0x3b, 0x53, 0xc, 0xa8, 0x7, 0x9e, 0x75, 0x3e, 0xec, 0x5a, 0x91, 0x68, 0x94, 0x92, 0x56, 0xe8, 0x88, 0x4f, 0x5b, 0xb0, 0x5c, 0x55, 0xf8, 0xba, 0xbc, 0x4c}, {0xe3, 0xbb, 0x3b, 0x99, 0xf3, 0x87, 0x94, 0x7b, 0x75, 0xda, 0xf4, 0xd6, 0x72, 0x6b, 0x1c, 0x5d, 0x64, 0xae, 0xac, 0x28, 0xdc, 0x34, 0xb3, 0x6d, 0x6c, 0x34, 0xa5, 0x50, 0xb8, 0x28, 0xdb, 0x71}, {0xf8, 0x61, 0xe2, 0xf2, 0x10, 0x8d, 0x51, 0x2a, 0xe3, 0xdb, 0x64, 0x33, 0x59, 0xdd, 0x75, 0xfc, 0x1c, 0xac, 0xbc, 0xf1, 0x43, 0xce, 0x3f, 0xa2, 0x67, 0xbb, 0xd1, 0x3c, 0x2, 0xe8, 0x43, 0xb0}, {0x33, 0xa, 0x5b, 0xca, 0x88, 0x29, 0xa1, 0x75, 0x7f, 0x34, 0x19, 0x4d, 0xb4, 0x16, 0x53, 0x5c, 0x92, 0x3b, 0x94, 0xc3, 0xe, 0x79, 0x4d, 0x1e, 0x79, 0x74, 0x75, 0xd7, 0xb6, 0xee, 0xaf, 0x3f}, {0xea, 0xa8, 0xd4, 0xf7, 0xbe, 0x1a, 0x39, 0x21, 0x5c, 0xf4, 0x7e, 0x9, 0x4c, 0x23, 0x27, 0x51, 0x26, 0xa3, 0x24, 0x53, 0xba, 0x32, 0x3c, 0xd2, 0x44, 0xa3, 0x17, 0x4a, 0x6d, 0xa6, 0xd5, 0xad}, {0xb5, 0x1d, 0x3e, 0xa6, 0xaf, 0xf2, 0xc9, 0x8, 0x83, 0x59, 0x3d, 0x98, 0x91, 0x6b, 0x3c, 0x56, 0x4c, 0xf8, 0x7c, 0xa1, 0x72, 0x86, 0x60, 0x4d, 0x46, 0xe2, 0x3e, 0xcc, 0x8, 0x6e, 0xc7, 0xf6}, {0x2f, 0x98, 0x33, 0xb3, 0xb1, 0xbc, 0x76, 0x5e, 0x2b, 0xd6, 0x66, 0xa5, 0xef, 0xc4, 0xe6, 0x2a, 0x6, 0xf4, 0xb6, 0xe8, 0xbe, 0xc1, 0xd4, 0x36, 0x74, 0xee, 0x82, 0x15, 0xbc, 0xef, 0x21, 0x63}, {0xfd, 0xc1, 0x4e, 0xd, 0xf4, 0x53, 0xc9, 0x69, 0xa7, 0x7d, 0x5a, 0xc4, 0x6, 0x58, 0x58, 0x26, 0x7e, 0xc1, 0x14, 0x16, 0x6, 0xe0, 0xfa, 0x16, 0x7e, 0x90, 0xaf, 0x3d, 0x28, 0x63, 0x9d, 0x3f}, {0xd2, 0xc9, 0xf2, 0xe3, 0x0, 0x9b, 0xd2, 0xc, 0x5f, 0xaa, 0xce, 0x30, 0xb7, 0xd4, 0xc, 0x30, 0x74, 0x2a, 0x51, 0x16, 0xf2, 0xe0, 0x32, 0x98, 0xd, 0xeb, 0x30, 0xd8, 0xe3, 0xce, 0xf8, 0x9a}, {0x4b, 0xc5, 0x9e, 0x7b, 0xb5, 0xf1, 0x79, 0x92, 0xff, 0x51, 0xe6, 0x6e, 0x4, 0x86, 0x68, 0xd3, 0x9b, 0x23, 0x4d, 0x57, 0xe6, 0x96, 0x67, 0x31, 0xcc, 0xe6, 0xa6, 0xf3, 0x17, 0xa, 0x75, 0x5}, {0xb1, 0x76, 0x81, 0xd9, 0x13, 0x32, 0x6c, 0xce, 0x3c, 0x17, 0x52, 0x84, 0xf8, 0x5, 0xa2, 0x62, 0xf4, 0x2b, 0xcb, 0xb3, 0x78, 0x47, 0x15, 0x47, 0xff, 0x46, 0x54, 0x82, 0x23, 0x93, 0x6a, 0x48}, {0x38, 0xdf, 0x58, 0x7, 0x4e, 0x5e, 0x65, 0x65, 0xf2, 0xfc, 0x7c, 0x89, 0xfc, 0x86, 0x50, 0x8e, 0x31, 0x70, 0x2e, 0x44, 0xd0, 0xb, 0xca, 0x86, 0xf0, 0x40, 0x9, 0xa2, 0x30, 0x78, 0x47, 0x4e}, {0x65, 0xa0, 0xee, 0x39, 0xd1, 0xf7, 0x38, 0x83, 0xf7, 0x5e, 0xe9, 0x37, 0xe4, 0x2c, 0x3a, 0xbd, 0x21, 0x97, 0xb2, 0x26, 0x1, 0x13, 0xf8, 0x6f, 0xa3, 0x44, 0xed, 0xd1, 0xef, 0x9f, 0xde, 0xe7}, {0x8b, 0xa0, 0xdf, 0x15, 0x76, 0x25, 0x92, 0xd9, 0x3c, 0x85, 0xf7, 0xf6, 0x12, 0xdc, 0x42, 0xbe, 0xd8, 0xa7, 0xec, 0x7c, 0xab, 0x27, 0xb0, 0x7e, 0x53, 0x8d, 0x7d, 0xda, 0xaa, 0x3e, 0xa8, 0xde}, {0xaa, 0x25, 0xce, 0x93, 0xbd, 0x2, 0x69, 0xd8, 0x5a, 0xf6, 0x43, 0xfd, 0x1a, 0x73, 0x8, 0xf9, 0xc0, 0x5f, 0xef, 0xda, 0x17, 0x4a, 0x19, 0xa5, 0x97, 0x4d, 0x66, 0x33, 0x4c, 0xfd, 0x21, 0x6a}, {0x35, 0xb4, 0x98, 0x31, 0xdb, 0x41, 0x15, 0x70, 0xea, 0x1e, 0xf, 0xbb, 0xed, 0xcd, 0x54, 0x9b, 0x9a, 0xd0, 0x63, 0xa1, 0x51, 0x97, 0x40, 0x72, 0xf6, 0x75, 0x9d, 0xbf, 0x91, 0x47, 0x6f, 0xe2} }; #define JH_SWAP1(x) (x) = ((((x) & 0x5555555555555555ULL) << 1) | (((x) & 0xaaaaaaaaaaaaaaaaULL) >> 1)); #define JH_SWAP2(x) (x) = ((((x) & 0x3333333333333333ULL) << 2) | (((x) & 0xccccccccccccccccULL) >> 2)); #define JH_SWAP4(x) (x) = ((((x) & 0x0f0f0f0f0f0f0f0fULL) << 4) | (((x) & 0xf0f0f0f0f0f0f0f0ULL) >> 4)); #define JH_SWAP8(x) (x) = ((((x) & 0x00ff00ff00ff00ffULL) << 8) | (((x) & 0xff00ff00ff00ff00ULL) >> 8)); #define JH_SWAP16(x) (x) = ((((x) & 0x0000ffff0000ffffULL) << 16) | (((x) & 0xffff0000ffff0000ULL) >> 16)); #define JH_SWAP32(x) (x) = (((x) << 32) | ((x) >> 32)); #define JH_L(m0,m1,m2,m3,m4,m5,m6,m7) \ (m4) ^= (m1); \ (m5) ^= (m2); \ (m6) ^= (m0) ^ (m3); \ (m7) ^= (m0); \ (m0) ^= (m5); \ (m1) ^= (m6); \ (m2) ^= (m4) ^ (m7); \ (m3) ^= (m4); #define JH_SS(m0,m1,m2,m3,m4,m5,m6,m7,cc0,cc1) \ m3 = ~(m3); \ m7 = ~(m7); \ m0 ^= ((~(m2)) & (cc0)); \ m4 ^= ((~(m6)) & (cc1)); \ temp0 = (cc0) ^ ((m0) & (m1));\ temp1 = (cc1) ^ ((m4) & (m5));\ m0 ^= ((m2) & (m3)); \ m4 ^= ((m6) & (m7)); \ m3 ^= ((~(m1)) & (m2)); \ m7 ^= ((~(m5)) & (m6)); \ m1 ^= ((m0) & (m2)); \ m5 ^= ((m4) & (m6)); \ m2 ^= ((m0) & (~(m3))); \ m6 ^= ((m4) & (~(m7))); \ m0 ^= ((m1) | (m3)); \ m4 ^= ((m5) | (m7)); \ m3 ^= ((m1) & (m2)); \ m7 ^= ((m5) & (m6)); \ m1 ^= (temp0 & (m0)); \ m5 ^= (temp1 & (m4)); \ m2 ^= temp0; \ m6 ^= temp1; __device__ void cn_jh_E8(jhHashState *state) { uint64_t i,roundnumber,temp0,temp1; for (roundnumber = 0; roundnumber < 42; roundnumber = roundnumber+7) { for (i = 0; i < 2; i++) { JH_SS(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i], ((uint64_t *)d_E8_rc[roundnumber+0])[i],((uint64_t *)d_E8_rc[roundnumber+0])[i+2] ); JH_L(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i]); JH_SWAP1(state->x[1][i]); JH_SWAP1(state->x[3][i]); JH_SWAP1(state->x[5][i]); JH_SWAP1(state->x[7][i]); } for (i = 0; i < 2; i++) { JH_SS(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i], ((uint64_t *)d_E8_rc[roundnumber+1])[i],((uint64_t *)d_E8_rc[roundnumber+1])[i+2] ); JH_L(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i]); JH_SWAP2(state->x[1][i]); JH_SWAP2(state->x[3][i]); JH_SWAP2(state->x[5][i]); JH_SWAP2(state->x[7][i]); } for (i = 0; i < 2; i++) { JH_SS(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i], ((uint64_t *)d_E8_rc[roundnumber+2])[i],((uint64_t *)d_E8_rc[roundnumber+2])[i+2] ); JH_L(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i]); JH_SWAP4(state->x[1][i]); JH_SWAP4(state->x[3][i]); JH_SWAP4(state->x[5][i]); JH_SWAP4(state->x[7][i]); } for (i = 0; i < 2; i++) { JH_SS(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i], ((uint64_t *)d_E8_rc[roundnumber+3])[i],((uint64_t *)d_E8_rc[roundnumber+3])[i+2] ); JH_L(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i]); JH_SWAP8(state->x[1][i]); JH_SWAP8(state->x[3][i]); JH_SWAP8(state->x[5][i]); JH_SWAP8(state->x[7][i]); } for (i = 0; i < 2; i++) { JH_SS(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i], ((uint64_t *)d_E8_rc[roundnumber+4])[i],((uint64_t *)d_E8_rc[roundnumber+4])[i+2] ); JH_L(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i]); JH_SWAP16(state->x[1][i]); JH_SWAP16(state->x[3][i]); JH_SWAP16(state->x[5][i]); JH_SWAP16(state->x[7][i]); } for (i = 0; i < 2; i++) { JH_SS(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i], ((uint64_t *)d_E8_rc[roundnumber+5])[i],((uint64_t *)d_E8_rc[roundnumber+5])[i+2] ); JH_L(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i]); JH_SWAP32(state->x[1][i]); JH_SWAP32(state->x[3][i]); JH_SWAP32(state->x[5][i]); JH_SWAP32(state->x[7][i]); } for (i = 0; i < 2; i++) { JH_SS(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i], ((uint64_t *)d_E8_rc[roundnumber+6])[i],((uint64_t *)d_E8_rc[roundnumber+6])[i+2] ); JH_L(state->x[0][i],state->x[2][i],state->x[4][i],state->x[6][i],state->x[1][i],state->x[3][i],state->x[5][i],state->x[7][i]); } for (i = 1; i < 8; i = i+2) { temp0 = state->x[i][0]; state->x[i][0] = state->x[i][1]; state->x[i][1] = temp0; } } } __device__ void cn_jh_F8(jhHashState *state) { uint64_t i; for (i = 0; i < 8; i++) { state->x[i >> 1][i & 1] ^= ((uint64_t *)state->buffer)[i]; } cn_jh_E8(state); for (i = 0; i < 8; i++) { state->x[(8+i) >> 1][(8+i) & 1] ^= ((uint64_t *)state->buffer)[i]; } } __device__ void cn_jh_update(jhHashState * __restrict__ state, const uint8_t * __restrict__ data, DataLength databitlen) { DataLength index; state->databitlen += databitlen; index = 0; if ( (state->datasize_in_buffer > 0 ) && (( state->datasize_in_buffer + databitlen) < 512) ) { if ( (databitlen & 7) == 0 ) { memcpy(state->buffer + (state->datasize_in_buffer >> 3), data, 64-(state->datasize_in_buffer >> 3)); } else memcpy(state->buffer + (state->datasize_in_buffer >> 3), data, 64-(state->datasize_in_buffer >> 3)+1); state->datasize_in_buffer += databitlen; databitlen = 0; } if ( (state->datasize_in_buffer > 0 ) && (( state->datasize_in_buffer + databitlen) >= 512) ) { memcpy( state->buffer + (state->datasize_in_buffer >> 3), data, 64-(state->datasize_in_buffer >> 3) ); index = 64-(state->datasize_in_buffer >> 3); databitlen = databitlen - (512 - state->datasize_in_buffer); cn_jh_F8(state); state->datasize_in_buffer = 0; } for ( ; databitlen >= 512; index = index+64, databitlen = databitlen - 512) { memcpy(state->buffer, data+index, 64); cn_jh_F8(state); } if ( databitlen > 0) { if ((databitlen & 7) == 0) memcpy(state->buffer, data+index, (databitlen & 0x1ff) >> 3); else memcpy(state->buffer, data+index, ((databitlen & 0x1ff) >> 3)+1); state->datasize_in_buffer = databitlen; } } /* pad the message, process the padded block(s), truncate the hash value H to obtain the message digest */ __device__ void cn_jh_final(jhHashState * __restrict__ state, uint8_t * __restrict__ hashval) { unsigned int i; //uint32_t *bufptr = (uint32_t *)state->buffer; if ( (state->databitlen & 0x1ff) == 0 ) { /* pad the message when databitlen is multiple of 512 bits, then process the padded block */ memset(state->buffer, 0, 64); //for( i = 0; i < 16; i++ ) *(bufptr+i) = 0x00000000; state->buffer[0] = 0x80; state->buffer[63] = state->databitlen & 0xff; state->buffer[62] = (state->databitlen >> 8) & 0xff; state->buffer[61] = (state->databitlen >> 16) & 0xff; state->buffer[60] = (state->databitlen >> 24) & 0xff; state->buffer[59] = (state->databitlen >> 32) & 0xff; state->buffer[58] = (state->databitlen >> 40) & 0xff; state->buffer[57] = (state->databitlen >> 48) & 0xff; state->buffer[56] = (state->databitlen >> 56) & 0xff; cn_jh_F8(state); } else { /* set the rest of the bytes in the buffer to 0 */ if ( (state->datasize_in_buffer & 7) == 0) { for (i = (state->databitlen & 0x1ff) >> 3; i < 64; i++) state->buffer[i] = 0; } else { for (i = ((state->databitlen & 0x1ff) >> 3)+1; i < 64; i++) state->buffer[i] = 0; } /*pad and process the partial block when databitlen is not multiple of 512 bits, then hash the padded blocks*/ state->buffer[((state->databitlen & 0x1ff) >> 3)] |= 1 << (7- (state->databitlen & 7)); cn_jh_F8(state); memset(state->buffer, 0, 64); //for( i = 0; i < 16; i++ ) *(bufptr+i) = 0x00000000; state->buffer[63] = state->databitlen & 0xff; state->buffer[62] = (state->databitlen >> 8) & 0xff; state->buffer[61] = (state->databitlen >> 16) & 0xff; state->buffer[60] = (state->databitlen >> 24) & 0xff; state->buffer[59] = (state->databitlen >> 32) & 0xff; state->buffer[58] = (state->databitlen >> 40) & 0xff; state->buffer[57] = (state->databitlen >> 48) & 0xff; state->buffer[56] = (state->databitlen >> 56) & 0xff; cn_jh_F8(state); } memcpy(hashval, ((unsigned char*)state->x) + 64 + 32, 32); //MEMCPY4(hashval, ((unsigned char*)state->x) + 64 + 32, 8); } __device__ void cn_jh_init(jhHashState *state, int hashbitlen) { state->databitlen = 0; state->datasize_in_buffer = 0; state->hashbitlen = hashbitlen; memcpy(state->x, d_JH256_H0, 128); //MEMCPY8(state->x, d_JH256_H0, 128 / 8); } __device__ void cn_jh(const uint8_t * __restrict__ data, DataLength len, uint32_t * hashval) { const int hashbitlen = 256; DataLength databitlen = len << 3; jhHashState state; cn_jh_init(&state, hashbitlen); cn_jh_update(&state, data, databitlen); cn_jh_final(&state, (uint8_t*) hashval); }
the_stack
#include "_reg_resampling_gpu.h" #include "_reg_tools.h" #include "interpolations.h" /* *************************************************************** */ /* *************************************************************** */ template <const bool tIs3D, const resampler_boundary_e tBoundary> __global__ void reg_getImageGradient_spline_kernel(float *p_gradientArray, const float *pc_floating, const float *pc_deformation, const int3 floating_dims, const int3 deformation_dims, const float paddingValue, const int ref_size) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; const int nof_dims = 2 + int(tIs3D); const int kernel_size = 4; if(tid<ref_size){ //Get the voxel-based deformation in the floating space float voxeldeformation[nof_dims]; int voxel[nof_dims]; float basis[nof_dims][kernel_size]; float derivative[nof_dims][kernel_size]; for (int d = 0; d < nof_dims; ++d) { float relative; voxeldeformation[d] = pc_deformation[tid+d*ref_size]; voxel[d] = int(voxeldeformation[d]); relative = fabsf(voxeldeformation[d] - voxel[d]); reg_getNiftynetCubicSpline(relative, basis[d]); reg_getNiftynetCubicSplineDerivative(relative, derivative[d]); voxel[d] -= 1; } float4 gradientValue=make_float4(0.0f, 0.0f, 0.0f, 0.0f); if (tIs3D) { for(short c = 0; c < kernel_size; ++c) { int z = reg_applyBoundary<tBoundary>(voxel[2] + c, floating_dims.z); float3 tempValueY = make_float3(0.0f, 0.0f, 0.0f); for(short b = 0; b < kernel_size; ++b){ float2 tempValueX = make_float2(0.0f, 0.0f); int y = reg_applyBoundary<tBoundary>(voxel[1] + b, floating_dims.y); for(short a = 0; a < kernel_size; ++a){ int x = reg_applyBoundary<tBoundary>(voxel[0] + a, floating_dims.x); float intensity = paddingValue; if (reg_checkImageDimensionIndex<tBoundary>(x, floating_dims.x) && reg_checkImageDimensionIndex<tBoundary>(y, floating_dims.y) && reg_checkImageDimensionIndex<tBoundary>(z, floating_dims.z)) { intensity = pc_floating[((z*floating_dims.y)+y)*floating_dims.x+x]; } tempValueX.x += intensity*derivative[0][a]; tempValueX.y += intensity*basis[0][a]; } tempValueY.x += tempValueX.x*basis[1][b]; tempValueY.y += tempValueX.y*derivative[1][b]; tempValueY.z += tempValueX.y*basis[1][b]; } gradientValue.x += tempValueY.x*basis[2][c]; gradientValue.y += tempValueY.y*basis[2][c]; gradientValue.z += tempValueY.z*derivative[2][c]; } } else { for(short b = 0; b < kernel_size; ++b){ float2 tempValueX = make_float2(0.0f, 0.0f); int y = reg_applyBoundary<tBoundary>(voxel[1] + b, floating_dims.y); for(short a = 0; a < kernel_size; ++a){ int x = reg_applyBoundary<tBoundary>(voxel[0] + a, floating_dims.x); float intensity=paddingValue; if (reg_checkImageDimensionIndex<tBoundary>(x, floating_dims.x) && reg_checkImageDimensionIndex<tBoundary>(y, floating_dims.y)) { intensity = pc_floating[y*floating_dims.x+x]; } tempValueX.x += intensity*derivative[0][a]; tempValueX.y += intensity*basis[0][a]; } gradientValue.x += tempValueX.x*basis[1][b]; gradientValue.y += tempValueX.y*derivative[1][b]; } } p_gradientArray[tid] = gradientValue.x; p_gradientArray[ref_size+tid] = gradientValue.y; if (tIs3D) { p_gradientArray[2*ref_size+tid] = gradientValue.z; } } } /* *************************************************************** */ template <const bool tIs3D, const resampler_boundary_e tBoundary> __global__ void reg_getImageGradient_kernel(float *p_gradientArray, const float *pc_floating, const float *pc_deformation, const int3 floating_dims, const int3 deformation_dims, const float paddingValue, const int ref_size) { const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(tid<ref_size){ //Get the voxel-based deformation in the floating space float3 voxeldeformation; voxeldeformation.x = pc_deformation[tid]; voxeldeformation.y = pc_deformation[ref_size+tid]; if (tIs3D) { voxeldeformation.z = pc_deformation[2*ref_size+tid]; } int3 voxel; voxel.x = (int)(voxeldeformation.x); voxel.y = (int)(voxeldeformation.y); if (tIs3D) { voxel.z = (int)(voxeldeformation.z); } float xBasis[2]; float relative = fabsf(voxeldeformation.x - (float)voxel.x); xBasis[0]=1.0f-relative; xBasis[1]=relative; float yBasis[2]; relative = fabsf(voxeldeformation.y - (float)voxel.y); yBasis[0]=1.0f-relative; yBasis[1]=relative; float zBasis[2]; if (tIs3D) { relative = fabsf(voxeldeformation.z - (float)voxel.z); zBasis[0]=1.0f-relative; zBasis[1]=relative; } float4 gradientValue=make_float4(0.0f, 0.0f, 0.0f, 0.0f); if (tIs3D) { for(short c=0; c<2; c++){ int z = reg_applyBoundary<tBoundary>(voxel.z + c, floating_dims.z); float3 tempValueY=make_float3(0.0f, 0.0f, 0.0f); for(short b=0; b<2; b++){ float2 tempValueX=make_float2(0.0f, 0.0f); int y = reg_applyBoundary<tBoundary>(voxel.y + b, floating_dims.y); for(short a=0; a<2; a++){ int x= reg_applyBoundary<tBoundary>(voxel.x + a, floating_dims.x); float intensity=paddingValue; if (reg_checkImageDimensionIndex<tBoundary>(x, floating_dims.x) && reg_checkImageDimensionIndex<tBoundary>(y, floating_dims.y) && reg_checkImageDimensionIndex<tBoundary>(z, floating_dims.z)) { intensity = pc_floating[((z*floating_dims.y)+y)*floating_dims.x+x]; } tempValueX.x += (1 - 2*int(a == 0))*intensity; tempValueX.y += intensity * xBasis[a]; } tempValueY.x += tempValueX.x * yBasis[b]; tempValueY.y += (1 - 2*int(b == 0))*tempValueX.y; tempValueY.z += tempValueX.y * yBasis[b]; } gradientValue.x += tempValueY.x * zBasis[c]; gradientValue.y += tempValueY.y * zBasis[c]; gradientValue.z += (1 - 2*int(c == 0))*tempValueY.z; } } else { for(short b=0; b<2; b++){ float2 tempValueX=make_float2(0.0f, 0.0f); int y = reg_applyBoundary<tBoundary>(voxel.y + b, floating_dims.y); for(short a=0; a<2; a++){ int x = reg_applyBoundary<tBoundary>(voxel.x + a, floating_dims.x); float intensity=paddingValue; if (reg_checkImageDimensionIndex<tBoundary>(x, floating_dims.x) && reg_checkImageDimensionIndex<tBoundary>(y, floating_dims.y)) { intensity = pc_floating[y*floating_dims.x+x]; } tempValueX.x += intensity*(1 - 2*(a == 0)); tempValueX.y += intensity * xBasis[a]; } gradientValue.x += tempValueX.x * yBasis[b]; gradientValue.y += tempValueX.y*(1 - 2*(b == 0)); } } p_gradientArray[tid] = gradientValue.x; p_gradientArray[ref_size+tid] = gradientValue.y; if (tIs3D) { p_gradientArray[2*ref_size+tid] = gradientValue.z; } } } /* *************************************************************** */ template <const bool tIs3D, const resampler_boundary_e tBoundary> static void _launchGradientKernelBoundary(const nifti_image &sourceImage, const nifti_image &deformationImage, const float *sourceImageArray_d, const float *positionFieldImageArray_d, float *resultGradientArray_d, const float pad, const int interpolation) { int3 floatingDim = make_int3(sourceImage.nx, sourceImage.ny, sourceImage.nz); int3 deformationDim = make_int3(deformationImage.nx, deformationImage.ny, deformationImage.nz); dim3 B1; dim3 G1; int ref_size = deformationImage.nx*deformationImage.ny*deformationImage.nz; cudaCommon_computeGridConfiguration(B1, G1, ref_size); if (interpolation == 3) { reg_getImageGradient_spline_kernel<tIs3D, tBoundary> <<<G1, B1>>> (resultGradientArray_d, sourceImageArray_d, positionFieldImageArray_d, floatingDim, deformationDim, pad, ref_size); } else { reg_getImageGradient_kernel<tIs3D, tBoundary> <<<G1, B1>>> (resultGradientArray_d, sourceImageArray_d, positionFieldImageArray_d, floatingDim, deformationDim, pad, ref_size); } } /* *************************************************************** */ template <const bool tIs3D> static void _launchGradientKernelND(const nifti_image &sourceImage, const nifti_image &deformationImage, const float *sourceImageArray_d, const float *positionFieldImageArray_d, float *resultGradientArray_d, const resampler_boundary_e boundary, const int interpolation) { const float pad = reg_getPaddingValue<float>(boundary); switch (boundary) { case resampler_boundary_e::CLAMPING: _launchGradientKernelBoundary<tIs3D, resampler_boundary_e::CLAMPING>(sourceImage, deformationImage, sourceImageArray_d, positionFieldImageArray_d, resultGradientArray_d, pad, interpolation); break; case resampler_boundary_e::REFLECTING: _launchGradientKernelBoundary<tIs3D, resampler_boundary_e::REFLECTING>(sourceImage, deformationImage, sourceImageArray_d, positionFieldImageArray_d, resultGradientArray_d, pad, interpolation); break; default: _launchGradientKernelBoundary<tIs3D, resampler_boundary_e::ZEROPAD>(sourceImage, deformationImage, sourceImageArray_d, positionFieldImageArray_d, resultGradientArray_d, pad, interpolation); } } /* *************************************************************** */ void reg_getImageGradient_gpu(const nifti_image &sourceImage, const nifti_image &deformationImage, const float *sourceImageArray_d, const float *positionFieldImageArray_d, float *resultGradientArray_d, const resampler_boundary_e boundary, const int interpolation) { if (sourceImage.nz > 1 || deformationImage.nz > 1) { _launchGradientKernelND<true>(sourceImage, deformationImage, sourceImageArray_d, positionFieldImageArray_d, resultGradientArray_d, boundary, interpolation); } else { _launchGradientKernelND<false>(sourceImage, deformationImage, sourceImageArray_d, positionFieldImageArray_d, resultGradientArray_d, boundary, interpolation); } } /* *************************************************************** */ /* *************************************************************** */ #endif
the_stack
//#include <torch/serialize/tensor.h> //#include <ATen/ATen.h> //#include <ATen/cuda/CUDAContext.h> #define CUDA_NUM_THREADS 256 #define THREADS_PER_BLOCK 64 #define DIM0(TENSOR) ((TENSOR).x) #define DIM1(TENSOR) ((TENSOR).y) #define DIM2(TENSOR) ((TENSOR).z) #define DIM3(TENSOR) ((TENSOR).w) #define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))]) #ifdef __cplusplus extern "C" { #endif __global__ void nlf_down_forward(const int n, const float* filters, const int channel, const int height,const int width,const int wsize, float* top_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; int base = index * step; //up->down int fbase = index/channel*wsize*step; for(int row = 0; row < height; row ++){ for(int col = 0; col < width; col++){ float temp = 0; int r = row; int c = col; int shift = 0 * step + row * width + col; temp += top_data[base + r*width + c] * filters[fbase + shift]; r = row - 1; c = col; shift = 1 * step + row * width + col; if(r >= 0) temp += top_data[base + r*width+c]*filters[fbase + shift]; else temp += top_data[base + row*width+col]*filters[fbase + shift]; r = row - 1; c = col - 1; shift = 2 * step + row * width + col; if(r >= 0 && c >= 0) temp += top_data[base + r*width+c]*filters[fbase + shift]; else temp += top_data[base + row*width+col]*filters[fbase + shift]; r = row - 1; c = col + 1; shift = 3 * step + row * width + col; if(r >= 0 && c < width) temp += top_data[base + r*width+c] * filters[fbase + shift]; else temp += top_data[base + row*width+col] * filters[fbase + shift]; r = row; c = col - 1; shift = 4 * step + row * width + col; if(c >= 0) temp += top_data[base + r*width+c] * filters[fbase + shift]; else temp += top_data[base + row*width+col] * filters[fbase + shift]; top_data[base + row*width + col] = temp; } } } __global__ void nlf_up_forward(const int n, const float* filters, const int channel, const int height,const int width,const int wsize, float* top_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; int base = index*step; //down->up int fbase = index / channel * wsize * step; for(int row = height - 1; row >= 0; row --){ for(int col = width-1; col >=0; col--){ float temp = 0; int r = row; int c = col; int shift = 0 * step + row * width + col; temp += top_data[base + r*width+c]*filters[fbase + shift]; r = row + 1; c = col; shift = 1 * step + row * width + col; if(r < height) //changed temp += top_data[base + r*width+c]*filters[fbase + shift]; else temp += top_data[base + row*width+col]*filters[fbase + shift]; r = row + 1; c = col - 1; shift = 2 * step + row * width + col; if(r < height && c >=0)//changed temp += top_data[base + r*width+c]*filters[fbase + shift]; else temp += top_data[base + row*width+col]*filters[fbase + shift]; r = row + 1; c = col + 1; shift = 3 * step + row * width + col; if(r < height && c < width)//changed temp += top_data[base + r*width+c]*filters[fbase + shift]; else temp += top_data[base + row*width+col]*filters[fbase + shift]; r = row; c = col + 1; shift = 4 * step + row * width + col; if(c < width)//changed temp += top_data[base + r*width+c]*filters[fbase + shift]; else temp += top_data[base + row*width+col]*filters[fbase + shift]; top_data[base + row*width + col]=temp; } } } __global__ void nlf_right_forward(const int n, const float* filters, const int channel, const int height,const int width, const int wsize, float* top_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step=height * width; int base = index*step; //left->right int fbase = index / channel * wsize * step; for(int col = 0; col < width; col++){ for(int row = 0; row < height; row ++){ //changed float temp = 0; int r = row; int c = col; int shift = 0 * step + row * width + col; temp += top_data[base + r*width+c]*filters[fbase + shift]; r = row; c = col - 1; //changed shift = 1 * step + row * width + col; if(c >= 0) //changed temp += top_data[base + r*width+c]*filters[fbase + shift]; else temp += top_data[base + row*width+col]*filters[fbase + shift]; r = row - 1; //changed c = col - 1; shift = 2 * step + row * width + col; if(c >= 0 && r >=0)//changed temp += top_data[base + r*width+c]*filters[fbase + shift]; else temp += top_data[base + row*width+col]*filters[fbase + shift]; r = row + 1; c = col - 1; //changed shift = 3 * step + row * width + col; if(c >= 0 && r < height)//changed temp += top_data[base + r*width+c]*filters[fbase + shift]; else temp += top_data[base + row*width+col]*filters[fbase + shift]; r = row - 1; c = col; //changed shift = 4 * step + row * width + col; if(r >= 0)//changed temp += top_data[base + r*width+c]*filters[fbase + shift]; else temp += top_data[base + row*width+col]*filters[fbase + shift]; top_data[base + row*width + col] = temp; } } } __global__ void nlf_left_forward(const int n, const float* filters, const int channel, const int height, const int width, const int wsize, float* top_data) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; int base = index * step; //right->left int fbase = index / channel * wsize * step; for(int col = width - 1; col >= 0; col --){ for(int row = height-1; row >= 0; row --){ //changed float temp = 0; int r = row; int c = col; int shift = 0 * step + row * width + col; temp += top_data[base + r*width+c] * filters[fbase + shift]; r = row; c = col + 1; //changed shift = 1 * step + row * width + col; if(c < width) //changed temp += top_data[base + r*width+c]*filters[fbase + shift]; else temp += top_data[base + row*width+col]*filters[fbase + shift]; r = row - 1; //changed c = col + 1; shift = 2 * step + row * width + col; if(c < width && r >= 0)//changed temp += top_data[base + r*width+c]*filters[fbase + shift]; else temp += top_data[base + row*width + col]*filters[fbase + shift]; r = row + 1; c = col + 1; //changed shift = 3 * step + row * width + col; if(c < width && r < height)//changed temp += top_data[base + r*width+c]*filters[fbase + shift]; else temp += top_data[base + row*width + col]*filters[fbase + shift]; r = row + 1; c = col; //changed shift = 4 * step + row * width + col; if(r < height)//changed temp += top_data[base + r*width+c]*filters[fbase + shift]; else temp += top_data[base + row*width + col]*filters[fbase + shift]; top_data[base + row*width + col] = temp; } } } __global__ void nlf_down_backward(const int n, const float* filters, float* top_diff, const int channel, const int height, const int width, const int wsize,float* bottom_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; int base = index * step; //up->down int fbase = index/channel * step * wsize; for(int row = height - 1; row >= 0; row --){ for(int col = width - 1; col >= 0; col --){ float temp = top_diff[base + row * width + col]; // int r = row; // int c = col; // int shift = 0 * step + row * width + col; // temp += top_data[base + r*width+c]*filters[fbase + shift]; int r = row + 1; int c = col; int shift = 1 * step + r * width + c; if(r < height) temp += top_diff[base + r*width+c]*filters[fbase + shift]; r = row + 1; c = col + 1; shift = 2 * step + r * width + c; if(r < height && c < width) temp += top_diff[base + r*width+c]*filters[fbase + shift]; r = row + 1; c = col - 1; shift = 3 * step + r * width + c; if(r < height && c >= 0) temp += top_diff[base + r*width+c]*filters[fbase + shift]; r = row; c = col + 1; shift = 4 * step + r * width + c; if(c < width) temp += top_diff[base + r*width+c]*filters[fbase + shift]; shift = row * width + col; top_diff[base + shift] = temp; bottom_diff[base + shift] += temp * filters[fbase + shift]; } } for(int col=0; col<width; col++){ int location = base + col; int shift = fbase + col; bottom_diff[location] += top_diff[location] * filters[shift + step]; } for (int row = 0; row < height; row++) { int location = base + row * width; int shift = fbase + row * width; bottom_diff[location] += top_diff[location] * filters[shift + 2 * step]; bottom_diff[location] += top_diff[location] * filters[shift + 4 * step]; location += width-1; shift += width-1; bottom_diff[location] += top_diff[location] * filters[shift + 3 * step]; } } __global__ void nlf_up_backward(const int n, const float* filters, float* top_diff, const int channel, const int height, const int width, const int wsize,float* bottom_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; int base = index * step; //up->down int fbase = index/channel * step * wsize; for(int row = 0; row < height; row ++){ for(int col =0; col< width; col++){ float temp = top_diff[base + row * width + col]; // int r = row; // int c = col; // int shift = 0 * step + row * width + col; // temp += top_data[base + r*width+c]*filters[fbase + shift]; int r = row - 1; int c = col; int shift = 1 * step + r * width + c; if(r >= 0) temp += top_diff[base + r*width+c]*filters[fbase + shift]; r = row - 1; c = col + 1; shift = 2 * step + r * width + c; if(r >= 0 && c < width) temp += top_diff[base + r*width+c]*filters[fbase + shift]; r = row - 1; c = col - 1; shift = 3 * step + r * width + c; if(r >= 0 && c >= 0) temp += top_diff[base + r*width+c]*filters[fbase + shift]; r = row; c = col - 1; shift = 4 * step + r * width + c; if(c >= 0) temp += top_diff[base + r*width+c]*filters[fbase + shift]; shift = row * width + col; top_diff[base + shift] = temp; bottom_diff[base + shift] += temp * filters[fbase + shift]; } } for(int col=0; col<width; col++){ int location = base + (height-1)*width + col; int shift = fbase + (height-1)*width + col; bottom_diff[location] += top_diff[location] * filters[shift + step]; } for (int row = 0; row < height; row++) { int location = base + row * width; int shift = fbase + row * width; bottom_diff[location] += top_diff[location] * filters[shift + 2 * step]; location += width-1; shift += width-1; bottom_diff[location] += top_diff[location] * filters[shift + 3 * step]; bottom_diff[location] += top_diff[location] * filters[shift + 4 * step]; } } __global__ void nlf_right_backward(const int n, const float* filters, float* top_diff, const int channel, const int height, const int width, const int wsize,float* bottom_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; int base = index * step; //up->down int fbase = index/channel * step * wsize; for(int col = width - 1; col >= 0; col--){ for(int row = height-1; row >= 0; row --){ float temp = top_diff[base + row * width + col]; // int r = row; // int c = col; // int shift = 0 * step + row * width + col; // temp += top_data[base + r*width+c]*filters[fbase + shift]; int r = row; int c = col + 1; int shift = 1 * step + r * width + c; if(c < width) temp += top_diff[base + r*width+c]*filters[fbase + shift]; r = row + 1; c = col + 1; shift = 2 * step + r * width + c; if(c < width && r < height) temp += top_diff[base + r*width+c]*filters[fbase + shift]; r = row - 1; c = col + 1; shift = 3 * step + r * width + c; if(c < width && r >= 0) temp += top_diff[base + r*width+c]*filters[fbase + shift]; r = row + 1; c = col; shift = 4 * step + r * width + c; if(r < height) temp += top_diff[base + r*width+c]*filters[fbase + shift]; shift = row * width + col; top_diff[base + shift] = temp; bottom_diff[base + shift] += temp * filters[fbase + shift]; } } for(int row=0; row<height; row++){ int location = base + row*width; int shift = fbase + row*width; bottom_diff[location] += top_diff[location] * filters[shift + step]; } for (int col = 0; col < width; col ++) { int location = base + col; int shift = fbase + col; bottom_diff[location] += top_diff[location] * filters[shift + 2 * step]; bottom_diff[location] += top_diff[location] * filters[shift + 4 * step]; location += (height - 1) * width; shift += (height - 1) * width; bottom_diff[location] += top_diff[location] * filters[shift + 3 * step]; } } __global__ void nlf_left_backward(const int n, const float* filters, float* top_diff, const int channel, const int height, const int width, const int wsize, float* bottom_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; int base = index * step; //up->down int fbase = index/channel * step * wsize; for(int col = 0; col < width; col ++){ for(int row = 0; row < height; row ++){ float temp = top_diff[base + row * width + col]; // int r = row; // int c = col; // int shift = 0 * step + row * width + col; // temp += top_data[base + r*width+c]*filters[fbase + shift]; int r = row; int c = col - 1; int shift = 1 * step + r * width + c; if(c >= 0) temp += top_diff[base + r*width+c]*filters[fbase + shift]; r = row + 1; c = col - 1; shift = 2 * step + r * width + c; if(c >= 0 && r < height) temp += top_diff[base + r*width+c]*filters[fbase + shift]; r = row - 1; c = col - 1; shift = 3 * step + r * width + c; if(c >= 0 && r >= 0) temp += top_diff[base + r*width+c]*filters[fbase + shift]; r = row - 1; c = col; shift = 4 * step + r * width + c; if(r >= 0) temp += top_diff[base + r*width+c]*filters[fbase + shift]; shift = row * width + col; top_diff[base + shift] = temp; bottom_diff[base + shift] += temp * filters[fbase + shift]; } } for(int row=0; row<height; row++){ int location = base + row*width + width-1; int shift = fbase + row*width + width-1; bottom_diff[location] += top_diff[location] * filters[shift + step]; } for (int col = 0; col < width; col ++) { int location = base + col; int shift = fbase + col; bottom_diff[location] += top_diff[location] * filters[shift + 2 * step]; location += (height - 1) * width; shift += (height - 1) * width; bottom_diff[location] += top_diff[location] * filters[shift + 3 * step]; bottom_diff[location] += top_diff[location] * filters[shift + 4 * step]; } } __global__ void nlf_filter_down_backward(const int n, const float* bottom_data, const float* top_data, const float* temp_diff, const int channel, const int height, const int width, const int wsize, float* filters_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; // int base = index; int base = index/step*step*channel+index%step; //up->down int fbase = index/step*step*wsize+index%step; int row = index%step/width; int col = index%step%width; for(int i = 0; i < channel; i++){ filters_diff[fbase] += temp_diff[base + i * step] * bottom_data[base + i * step]; if(row - 1 >= 0) filters_diff[fbase + step] += temp_diff[base + i*step] * top_data[base - width + i*step]; else filters_diff[fbase + step] += temp_diff[base + i*step] * bottom_data[base + i*step]; if(row - 1 >= 0 && col - 1 >= 0) filters_diff[fbase + 2*step] += temp_diff[base + i*step] * top_data[base - width - 1 + i*step]; else filters_diff[fbase + 2*step] += temp_diff[base + i*step] * bottom_data[base + i*step]; if(row - 1 >= 0 && col + 1 < width) filters_diff[fbase + 3*step] += temp_diff[base + i*step] * top_data[base - width + 1 + i*step]; else filters_diff[fbase + 3*step] += temp_diff[base + i*step] * bottom_data[base + i*step]; if(col - 1 >= 0) filters_diff[fbase + 4*step] += temp_diff[base + i*step] * top_data[base - 1 + i*step]; else filters_diff[fbase + 4*step] += temp_diff[base + i*step] * bottom_data[base + i*step]; } } __global__ void nlf_filter_up_backward(const int n, const float* bottom_data, const float* top_data, const float* temp_diff, const int channel, const int height, const int width, const int wsize, float* filters_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; // int base = index; int base = index/step*step*channel+index%step; //up->down int fbase = index/step*step*wsize+index%step; int row = index%step/width; int col = index%step%width; for(int i=0; i< channel; i++){ filters_diff[fbase] += temp_diff[base + i*step] * bottom_data[base + i*step]; if(row + 1 < height) filters_diff[fbase + step] += temp_diff[base + i*step] * top_data[base + width + i*step]; else filters_diff[fbase + step] += temp_diff[base + i*step] * bottom_data[base + i*step]; if(row + 1 < height && col - 1 >= 0) filters_diff[fbase + 2*step] += temp_diff[base + i*step] * top_data[base + width - 1 + i*step]; else filters_diff[fbase + 2*step] += temp_diff[base + i*step] * bottom_data[base + i*step]; if(row + 1 < height && col + 1 < width) filters_diff[fbase + 3*step] += temp_diff[base + i*step] * top_data[base + width + 1 + i*step]; else filters_diff[fbase + 3*step] += temp_diff[base + i*step] * bottom_data[base + i*step]; if(col + 1 < width) filters_diff[fbase + 4*step] += temp_diff[base + i*step] * top_data[base + 1 + i*step]; else filters_diff[fbase + 4*step] += temp_diff[base + i*step] * bottom_data[base + i*step]; } } __global__ void nlf_filter_right_backward(const int n, const float* bottom_data, const float* top_data, const float* temp_diff, const int channel, const int height, const int width, const int wsize, float* filters_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; // int base = index; int base = index/step*step*channel+index%step; //up->down int fbase = index/step*step*wsize+index%step; int row = index%step/width; int col = index%step%width; for(int i=0; i < channel; i++){ filters_diff[fbase] += temp_diff[base + i*step] * bottom_data[base + i*step]; if(col - 1 >= 0) filters_diff[fbase + step] += temp_diff[base + i*step] * top_data[base - 1 + i*step]; else filters_diff[fbase + step] += temp_diff[base + i*step] * bottom_data[base + i*step]; if(col - 1 >= 0 && row - 1 >= 0) filters_diff[fbase + 2*step] += temp_diff[base + i*step] * top_data[base - width - 1 + i*step]; else filters_diff[fbase + 2*step] += temp_diff[base + i*step] * bottom_data[base + i*step]; if(col -1 >= 0 && row + 1 < height) filters_diff[fbase + 3*step] += temp_diff[base + i*step] * top_data[base + width - 1 + i*step]; else filters_diff[fbase + 3*step] += temp_diff[base + i*step] * bottom_data[base + i*step]; if(row - 1 >= 0) filters_diff[fbase + 4*step] += temp_diff[base + i*step] * top_data[base - width + i*step]; else filters_diff[fbase + 4*step] += temp_diff[base + i*step] * bottom_data[base + i*step]; } } __global__ void nlf_filter_left_backward(const int n, const float* bottom_data, const float* top_data, const float* temp_diff, const int channel, const int height, const int width, const int wsize, float* filters_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; // int base = index; int base = index/step*step*channel+index%step; //up->down int fbase = index/step*step*wsize+index%step; int row = index%step/width; int col = index%step%width; for(int i = 0; i < channel; i++){ filters_diff[fbase] += temp_diff[base + i*step] * bottom_data[base + i*step]; if(col + 1 < width) filters_diff[fbase + step] += temp_diff[base + i*step] * top_data[base + 1 + i*step]; else filters_diff[fbase + step] += temp_diff[base + i*step] * bottom_data[base + i*step]; if(col + 1 < width && row - 1 >= 0) filters_diff[fbase + 2*step] += temp_diff[base + i*step] * top_data[base - width + 1 + i*step]; else filters_diff[fbase + 2*step] += temp_diff[base + i*step] * bottom_data[base + i*step]; if(col + 1 < width && row + 1 < height) filters_diff[fbase + 3*step] += temp_diff[base + i*step] * top_data[base + width + 1 + i*step]; else filters_diff[fbase + 3*step] += temp_diff[base + i*step] * bottom_data[base + i*step]; if(row + 1 < height) filters_diff[fbase + 4*step] += temp_diff[base + i*step] * top_data[base + width + i*step]; else filters_diff[fbase + 4*step] += temp_diff[base + i*step] * bottom_data[base + i*step]; } } void nlf_down_kernel_forward (at::Tensor input, at::Tensor guidance_down, at::Tensor output_down){ int num = input.size(0); int channel = input.size(1); int height = input.size(2); int width = input.size(3); if(input.dim()>4){ channel = input.size(1) * input.size(2); height = input.size(3); width = input.size(4); } int wsize = guidance_down.size(1); float *top_down = output_down.data<float>(); const float *bottom_data = input.data<float>(); const float *g0 = guidance_down.data<float>(); int n = num * channel; int threads = (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; int N = input.numel(); cudaMemcpy (top_down, bottom_data, sizeof (float) * N, cudaMemcpyDeviceToDevice); nlf_down_forward <<< threads, CUDA_NUM_THREADS >>> (n, g0, channel, height, width, wsize, top_down); } void nlf_up_kernel_forward (at::Tensor input, at::Tensor guidance_up, at::Tensor output_up){ int num = input.size(0); int channel = input.size(1); int height = input.size(2); int width = input.size(3); if(input.dim()>4){ channel = input.size(1) * input.size(2); height = input.size(3); width = input.size(4); } int wsize = guidance_up.size(1); float *top_up = output_up.data<float>(); const float *bottom_data = input.data<float>(); const float *g1 = guidance_up.data<float>(); int n = num * channel; int threads = (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; int N = input.numel(); cudaMemcpy (top_up, bottom_data, sizeof (float) * N, cudaMemcpyDeviceToDevice); nlf_up_forward <<< threads, CUDA_NUM_THREADS >>> (n, g1, channel, height, width, wsize, top_up); } void nlf_right_kernel_forward (at::Tensor input, at::Tensor guidance_right, at::Tensor output_right){ int num = input.size(0); int channel = input.size(1); int height = input.size(2); int width = input.size(3); if(input.dim()>4){ channel = input.size(1) * input.size(2); height = input.size(3); width = input.size(4); } int wsize = guidance_right.size(1); float *top_right = output_right.data<float>(); const float *bottom_data = input.data<float>(); const float *g2 = guidance_right.data<float>(); int n = num * channel; int threads = (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; int N = input.numel(); cudaMemcpy (top_right, bottom_data, sizeof (float) * N, cudaMemcpyDeviceToDevice); nlf_right_forward <<< threads, CUDA_NUM_THREADS >>> (n, g2, channel, height, width, wsize, top_right); } void nlf_left_kernel_forward (at::Tensor input, at::Tensor guidance_left, at::Tensor output_left){ int num = input.size(0); int channel = input.size(1); int height = input.size(2); int width = input.size(3); if(input.dim()>4){ channel = input.size(1) * input.size(2); height = input.size(3); width = input.size(4); } int wsize = guidance_left.size(1); float *top_left = output_left.data<float>(); const float *bottom_data = input.data<float>(); const float *g3 = guidance_left.data<float>(); int n = num * channel; int threads = (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; int N = input.numel(); cudaMemcpy (top_left, bottom_data, sizeof (float) * N, cudaMemcpyDeviceToDevice); nlf_left_forward <<< threads, CUDA_NUM_THREADS >>> (n, g3, channel, height, width, wsize, top_left); } void nlf_down_kernel_backward (at::Tensor input, at::Tensor guidance_down, at::Tensor output_down, at::Tensor gradOutput, at::Tensor gradInput, at::Tensor grad_down){ int num = input.size (0); int channel = input.size (1); int height = input.size (2); int width = input.size (3); if(input.dim()>4){ channel = input.size(1) * input.size(2); height = input.size(3); width = input.size(4); } int wsize = guidance_down.size (1); const float *bottom_data = input.data<float>(); float *grad_output = gradOutput.data<float>(); float *grad_input = gradInput.data<float>(); const float *top_down = output_down.data<float>(); const float *g0 = guidance_down.data<float>(); float *grad0 = grad_down.data<float>(); int N = input.numel(); int n = num * channel; cudaMemset (grad_input, 0, sizeof (float) * N); cudaMemset (grad0, 0, sizeof (float) * num*wsize*height*width ); nlf_down_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, g0, grad_output, channel, height, width, wsize, grad_input); n = num*height*width; nlf_filter_down_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, bottom_data, top_down, grad_output, channel, height, width, wsize, grad0); } void nlf_up_kernel_backward (at::Tensor input, at::Tensor guidance_up, at::Tensor output_up, at::Tensor gradOutput, at::Tensor gradInput, at::Tensor grad_up){ int num = input.size (0); int channel = input.size (1); int height = input.size (2); int width = input.size (3); if(input.dim()>4){ channel = input.size(1) * input.size(2); height = input.size(3); width = input.size(4); } int wsize = guidance_up.size (1); const float *bottom_data = input.data<float>(); float *grad_output = gradOutput.data<float>(); float *grad_input = gradInput.data<float>(); const float *top_up = output_up.data<float>(); const float *g1 = guidance_up.data<float>(); float *grad1 = grad_up.data<float>(); int N = input.numel(); int n = num * channel; cudaMemset (grad_input, 0, sizeof (float) * N); cudaMemset (grad1, 0, sizeof (float) * num*wsize*height*width ); nlf_up_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, g1, grad_output, channel, height, width, wsize, grad_input); n = num*height*width; nlf_filter_up_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, bottom_data, top_up, grad_output, channel, height, width, wsize, grad1); } void nlf_right_kernel_backward (at::Tensor input, at::Tensor guidance_right, at::Tensor output_right, at::Tensor gradOutput, at::Tensor gradInput, at::Tensor grad_right){ int num = input.size (0); int channel = input.size (1); int height = input.size (2); int width = input.size (3); if(input.dim()>4){ channel = input.size(1) * input.size(2); height = input.size(3); width = input.size(4); } int wsize = guidance_right.size (1); const float *bottom_data = input.data<float>(); float *grad_output = gradOutput.data<float>(); float *grad_input = gradInput.data<float>(); const float *top_right = output_right.data<float>(); const float *g2 = guidance_right.data<float>(); float *grad2 = grad_right.data<float>(); int N = input.numel(); int n = num * channel; cudaMemset (grad_input, 0, sizeof (float) * N); cudaMemset (grad2, 0, sizeof (float) * num*wsize*height*width ); nlf_right_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, g2, grad_output, channel, height, width, wsize, grad_input); n = num*height*width; nlf_filter_right_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, bottom_data, top_right, grad_output, channel, height, width, wsize, grad2); } void nlf_left_kernel_backward (at::Tensor input, at::Tensor guidance_left, at::Tensor output_left, at::Tensor gradOutput, at::Tensor gradInput, at::Tensor grad_left){ int num = input.size (0); int channel = input.size (1); int height = input.size (2); int width = input.size (3); if(input.dim()>4){ channel = input.size(1) * input.size(2); height = input.size(3); width = input.size(4); } int wsize = guidance_left.size (1); const float *bottom_data = input.data<float>(); float *grad_output = gradOutput.data<float>(); float *grad_input = gradInput.data<float>(); const float *top_left = output_left.data<float>(); const float *g3 = guidance_left.data<float>(); float *grad3 = grad_left.data<float>(); int N = input.numel(); int n = num * channel; cudaMemset (grad_input, 0, sizeof (float) * N); cudaMemset (grad3, 0, sizeof (float) * num*wsize*height*width ); nlf_left_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, g3, grad_output, channel, height, width, wsize, grad_input); n = num*height*width; nlf_filter_left_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, bottom_data, top_left, grad_output, channel, height, width, wsize, grad3); } void nlf_kernel_forward (at::Tensor input, at::Tensor guidance_down, at::Tensor guidance_up, at::Tensor guidance_right, at::Tensor guidance_left, at::Tensor output_down, at::Tensor output_up, at::Tensor output_right, at::Tensor output_left){ int num = input.size(0); int channel = input.size(1); int height = input.size(2); int width = input.size(3); int wsize = guidance_down.size(1); float *top_down = output_down.data<float>(); float *top_up = output_up.data<float>(); float *top_right = output_right.data<float>(); float *top_left = output_left.data<float>(); const float *bottom_data = input.data<float>(); const float *g0 = guidance_down.data<float>(); const float *g1 = guidance_up.data<float>(); const float *g2 = guidance_right.data<float>(); const float *g3 = guidance_left.data<float>(); int n = num * channel; int threads = (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; int N = input.numel(); // printf("%d %d %d %d %d %d %d %d\n", num, channel, height, width, wsize, n, threads, N); cudaMemcpy (top_down, bottom_data, sizeof (float) * N, cudaMemcpyDeviceToDevice); nlf_down_forward <<< threads, CUDA_NUM_THREADS >>> (n, g0, channel, height, width, wsize, top_down); // printf("sgf down done...\n"); cudaMemcpy (top_up, top_down, sizeof (float) * N, cudaMemcpyDeviceToDevice); nlf_up_forward <<< threads, CUDA_NUM_THREADS >>> (n, g1, channel, height, width, wsize, top_up); // printf("sgf up done...\n"); cudaMemcpy (top_right, top_up, sizeof (float) * N, cudaMemcpyDeviceToDevice); nlf_right_forward <<< threads, CUDA_NUM_THREADS >>> (n, g2, channel, height, width, wsize, top_right); // printf("sgf right done...\n"); cudaMemcpy (top_left, top_right, sizeof (float) * N, cudaMemcpyDeviceToDevice); nlf_left_forward <<< threads, CUDA_NUM_THREADS >>> (n, g3, channel, height, width, wsize, top_left); // printf("sgf left done...\n"); } void nlf_kernel_backward (at::Tensor input, at::Tensor guidance_down, at::Tensor guidance_up, at::Tensor guidance_right, at::Tensor guidance_left, at::Tensor output_down, at::Tensor output_up, at::Tensor output_right, at::Tensor output_left, at::Tensor gradOutput, at::Tensor gradInput, at::Tensor grad_down, at::Tensor grad_up, at::Tensor grad_right, at::Tensor grad_left){ int num = input.size (0); int channel = input.size (1); int height = input.size (2); int width = input.size (3); int wsize = guidance_down.size (1); const float *bottom_data = input.data<float>(); float *grad_output = gradOutput.data<float>(); float *grad_input = gradInput.data<float>(); const float *top_down = output_down.data<float>(); const float *top_up = output_up.data<float>(); const float *top_right = output_right.data<float>(); const float *top_left = output_left.data<float>(); const float *g0 = guidance_down.data<float>(); const float *g1 = guidance_up.data<float>(); const float *g2 = guidance_right.data<float>(); const float *g3 = guidance_left.data<float>(); float *grad0 = grad_down.data<float>(); float *grad1 = grad_up.data<float>(); float *grad2 = grad_right.data<float>(); float *grad3 = grad_left.data<float>(); int N = input.numel(); int n = num * channel; cudaMemset (grad_input, 0, sizeof (float) * N); nlf_left_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, g3, grad_output, channel, height, width, wsize, grad_input); nlf_filter_left_backward <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (N, bottom_data, top_left, grad_output, channel, height, width, wsize, grad3); // printf("backward left done...\n"); cudaMemcpy (grad_output, grad_input, sizeof (float) * N, cudaMemcpyDeviceToDevice); cudaMemset (grad_input, 0, sizeof (float) * N); nlf_right_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, g2, grad_output, channel, height, width, wsize, grad_input); nlf_filter_right_backward <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (N, bottom_data, top_right, grad_output, channel, height, width, wsize, grad2); // printf("backward right done...\n"); cudaMemcpy (grad_output, grad_input, sizeof (float) * N, cudaMemcpyDeviceToDevice); cudaMemset (grad_input, 0, sizeof (float) * N); nlf_up_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, g1, grad_output, channel, height, width, wsize, grad_input); nlf_filter_up_backward <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (N, bottom_data, top_up, grad_output, channel, height, width, wsize, grad1); cudaMemcpy (grad_output, grad_input, sizeof (float) * N, cudaMemcpyDeviceToDevice); cudaMemset (grad_input, 0, sizeof (float) * N); nlf_down_backward <<< (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (n, g0, grad_output, channel, height, width, wsize, grad_input); nlf_filter_down_backward <<< (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS, CUDA_NUM_THREADS >>> (N, bottom_data, top_down, grad_output, channel, height, width, wsize, grad0); } #ifdef __cplusplus } #endif
the_stack
#include <thrust/device_vector.h> #include <thrust/host_vector.h> #include "../gpu/arima/arima.h" #include "cuda_utils2.h" TEST(ARIMA, differencing) { const int length = 10; thrust::device_vector<float> data(length); for (auto i = 0; i < length; ++i) data[i] = float(i / 2); thrust::device_vector<float> differenced_data(length); h2o4gpu::ARIMAModel<float>::Difference( thrust::raw_pointer_cast(differenced_data.data()), thrust::raw_pointer_cast(data.data()), length); OK(cudaDeviceSynchronize()); thrust::host_vector<float> h_differenced_data = differenced_data; ASSERT_FLOAT_EQ(0, h_differenced_data[0]); ASSERT_FLOAT_EQ(-1, h_differenced_data[1]); ASSERT_FLOAT_EQ(0, h_differenced_data[2]); ASSERT_FLOAT_EQ(-1, h_differenced_data[3]); ASSERT_FLOAT_EQ(0, h_differenced_data[4]); ASSERT_FLOAT_EQ(-1, h_differenced_data[5]); ASSERT_FLOAT_EQ(0, h_differenced_data[6]); ASSERT_FLOAT_EQ(-1, h_differenced_data[7]); ASSERT_FLOAT_EQ(0, h_differenced_data[8]); ASSERT_TRUE(std::isnan(h_differenced_data[9])); } TEST(ARIMA, ts_data_to_matrix) { const int length = 7; const int depth = 3; const int lda = 6; thrust::device_vector<float> ts_data(length); for (auto i = 0; i < length; ++i) ts_data[i] = float(i); thrust::device_vector<float> A(depth * lda, -1.0); h2o4gpu::ARIMAModel<float>::AsMatrix(thrust::raw_pointer_cast(ts_data.data()), thrust::raw_pointer_cast(A.data()), depth, lda, length); OK(cudaDeviceSynchronize()); thrust::host_vector<float> h_A = A; ASSERT_FLOAT_EQ(0.0f, h_A[0]); ASSERT_FLOAT_EQ(1.0f, h_A[1]); ASSERT_FLOAT_EQ(2.0f, h_A[2]); ASSERT_FLOAT_EQ(3.0f, h_A[3]); ASSERT_FLOAT_EQ(4.0f, h_A[4]); ASSERT_FLOAT_EQ(-1.0f, h_A[5]); ASSERT_FLOAT_EQ(1.0f, h_A[6]); ASSERT_FLOAT_EQ(2.0f, h_A[7]); ASSERT_FLOAT_EQ(3.0f, h_A[8]); ASSERT_FLOAT_EQ(4.0f, h_A[9]); ASSERT_FLOAT_EQ(5.0f, h_A[10]); ASSERT_FLOAT_EQ(-1.0f, h_A[11]); ASSERT_FLOAT_EQ(2.0f, h_A[12]); ASSERT_FLOAT_EQ(3.0f, h_A[13]); ASSERT_FLOAT_EQ(4.0f, h_A[14]); ASSERT_FLOAT_EQ(5.0f, h_A[15]); ASSERT_FLOAT_EQ(6.0f, h_A[16]); ASSERT_FLOAT_EQ(-1.0f, h_A[17]); } TEST(ARIMA, double_ts_data_to_matrix1) { const int length = 7; const int a_depth = 2; const int b_depth = 3; const int lda = 10; thrust::device_vector<float> ts_a(length); for (auto i = 0; i < length; ++i) ts_a[i] = float(i); thrust::device_vector<float> ts_b(length); for (auto i = 0; i < length; ++i) ts_b[i] = float(i + 1000); thrust::device_vector<float> A((a_depth + b_depth) * lda, NAN); h2o4gpu::ARIMAModel<float>::AsMatrix(thrust::raw_pointer_cast(ts_a.data()), thrust::raw_pointer_cast(ts_b.data()), thrust::raw_pointer_cast(A.data()), a_depth, b_depth, lda, length); OK(cudaDeviceSynchronize()); thrust::host_vector<float> h_A = A; ASSERT_FLOAT_EQ(0.000000, h_A[0]); ASSERT_FLOAT_EQ(1.000000, h_A[1]); ASSERT_FLOAT_EQ(2.000000, h_A[2]); ASSERT_FLOAT_EQ(3.000000, h_A[3]); ASSERT_FLOAT_EQ(4.000000, h_A[4]); ASSERT_FLOAT_EQ(5.000000, h_A[5]); ASSERT_TRUE(std::isnan(h_A[6])); ASSERT_TRUE(std::isnan(h_A[7])); ASSERT_TRUE(std::isnan(h_A[8])); ASSERT_TRUE(std::isnan(h_A[9])); ASSERT_FLOAT_EQ(1.000000, h_A[10]); ASSERT_FLOAT_EQ(2.000000, h_A[11]); ASSERT_FLOAT_EQ(3.000000, h_A[12]); ASSERT_FLOAT_EQ(4.000000, h_A[13]); ASSERT_FLOAT_EQ(5.000000, h_A[14]); ASSERT_FLOAT_EQ(6.000000, h_A[15]); ASSERT_TRUE(std::isnan(h_A[16])); ASSERT_TRUE(std::isnan(h_A[17])); ASSERT_TRUE(std::isnan(h_A[18])); ASSERT_TRUE(std::isnan(h_A[19])); ASSERT_FLOAT_EQ(1000.000000, h_A[20]); ASSERT_FLOAT_EQ(1001.000000, h_A[21]); ASSERT_FLOAT_EQ(1002.000000, h_A[22]); ASSERT_FLOAT_EQ(1003.000000, h_A[23]); ASSERT_FLOAT_EQ(1004.000000, h_A[24]); ASSERT_TRUE(std::isnan(h_A[25])); ASSERT_TRUE(std::isnan(h_A[26])); ASSERT_TRUE(std::isnan(h_A[27])); ASSERT_TRUE(std::isnan(h_A[28])); ASSERT_TRUE(std::isnan(h_A[29])); ASSERT_FLOAT_EQ(1001.000000, h_A[30]); ASSERT_FLOAT_EQ(1002.000000, h_A[31]); ASSERT_FLOAT_EQ(1003.000000, h_A[32]); ASSERT_FLOAT_EQ(1004.000000, h_A[33]); ASSERT_FLOAT_EQ(1005.000000, h_A[34]); ASSERT_TRUE(std::isnan(h_A[35])); ASSERT_TRUE(std::isnan(h_A[36])); ASSERT_TRUE(std::isnan(h_A[37])); ASSERT_TRUE(std::isnan(h_A[38])); ASSERT_TRUE(std::isnan(h_A[39])); ASSERT_FLOAT_EQ(1002.000000, h_A[40]); ASSERT_FLOAT_EQ(1003.000000, h_A[41]); ASSERT_FLOAT_EQ(1004.000000, h_A[42]); ASSERT_FLOAT_EQ(1005.000000, h_A[43]); ASSERT_FLOAT_EQ(1006.000000, h_A[44]); ASSERT_TRUE(std::isnan(h_A[45])); ASSERT_TRUE(std::isnan(h_A[46])); ASSERT_TRUE(std::isnan(h_A[47])); ASSERT_TRUE(std::isnan(h_A[48])); ASSERT_TRUE(std::isnan(h_A[49])); } TEST(ARIMA, double_ts_data_to_matrix2) { const int length = 7; const int a_depth = 2; const int b_depth = 3; const int lda = 5; thrust::device_vector<float> ts_a(length); for (auto i = 0; i < length; ++i) ts_a[i] = float(i); thrust::device_vector<float> ts_b(length); for (auto i = 0; i < length; ++i) ts_b[i] = float(i + 1000); thrust::device_vector<float> A((a_depth + b_depth) * lda, NAN); h2o4gpu::ARIMAModel<float>::AsMatrix(thrust::raw_pointer_cast(ts_a.data()), thrust::raw_pointer_cast(ts_b.data()), thrust::raw_pointer_cast(A.data()), a_depth, b_depth, lda, length); OK(cudaDeviceSynchronize()); thrust::host_vector<float> h_A = A; ASSERT_FLOAT_EQ(0.000000, h_A[0]); ASSERT_FLOAT_EQ(1.000000, h_A[1]); ASSERT_FLOAT_EQ(2.000000, h_A[2]); ASSERT_FLOAT_EQ(3.000000, h_A[3]); ASSERT_FLOAT_EQ(4.000000, h_A[4]); ASSERT_FLOAT_EQ(1.000000, h_A[5]); ASSERT_FLOAT_EQ(2.000000, h_A[6]); ASSERT_FLOAT_EQ(3.000000, h_A[7]); ASSERT_FLOAT_EQ(4.000000, h_A[8]); ASSERT_FLOAT_EQ(5.000000, h_A[9]); ASSERT_FLOAT_EQ(1000.000000, h_A[10]); ASSERT_FLOAT_EQ(1001.000000, h_A[11]); ASSERT_FLOAT_EQ(1002.000000, h_A[12]); ASSERT_FLOAT_EQ(1003.000000, h_A[13]); ASSERT_FLOAT_EQ(1004.000000, h_A[14]); ASSERT_FLOAT_EQ(1001.000000, h_A[15]); ASSERT_FLOAT_EQ(1002.000000, h_A[16]); ASSERT_FLOAT_EQ(1003.000000, h_A[17]); ASSERT_FLOAT_EQ(1004.000000, h_A[18]); ASSERT_FLOAT_EQ(1005.000000, h_A[19]); ASSERT_FLOAT_EQ(1002.000000, h_A[20]); ASSERT_FLOAT_EQ(1003.000000, h_A[21]); ASSERT_FLOAT_EQ(1004.000000, h_A[22]); ASSERT_FLOAT_EQ(1005.000000, h_A[23]); ASSERT_FLOAT_EQ(1006.000000, h_A[24]); } TEST(ARIMA, applyAR) { const int length = 10; thrust::device_vector<float> ts_data(length); for (auto i = 0; i < length; ++i) ts_data[i] = float(i); const int p = 2; thrust::device_vector<float> phi(p); phi[0] = 1.0; phi[1] = 0.5; thrust::device_vector<float> res(length * p, 0); h2o4gpu::ARIMAModel<float>::Apply(thrust::raw_pointer_cast(res.data()), thrust::raw_pointer_cast(ts_data.data()), thrust::raw_pointer_cast(phi.data()), p, nullptr, nullptr, 0, length); thrust::host_vector<float> h_res = res; ASSERT_FLOAT_EQ(-2, h_res[0]); ASSERT_FLOAT_EQ(-2.5, h_res[1]); ASSERT_FLOAT_EQ(-3, h_res[2]); ASSERT_FLOAT_EQ(-3.5, h_res[3]); ASSERT_FLOAT_EQ(-4, h_res[4]); ASSERT_FLOAT_EQ(-4.5, h_res[5]); ASSERT_FLOAT_EQ(-5, h_res[6]); ASSERT_FLOAT_EQ(-5.5, h_res[7]); ASSERT_FLOAT_EQ(0, h_res[8]); ASSERT_FLOAT_EQ(0, h_res[9]); } TEST(ARIMA, applyMA) { const int length = 10; thrust::device_vector<float> last_residual(length); thrust::device_vector<float> ts_data(length, 0); for (auto i = 0; i < length; ++i) last_residual[i] = float(i % 3); const int q = 3; thrust::device_vector<float> theta(q); theta[0] = 1.0; theta[1] = -0.5; theta[2] = 0.1; thrust::device_vector<float> res(length, 0); h2o4gpu::ARIMAModel<float>::Apply( thrust::raw_pointer_cast(res.data()), thrust::raw_pointer_cast(ts_data.data()), nullptr, 0, thrust::raw_pointer_cast(last_residual.data()), thrust::raw_pointer_cast(theta.data()), q, length); OK(cudaGetLastError()); OK(cudaDeviceSynchronize()); thrust::host_vector<float> h_res = res; ASSERT_FLOAT_EQ(0.0, h_res[0]); ASSERT_FLOAT_EQ(-2.1, h_res[1]); ASSERT_FLOAT_EQ(0.3, h_res[2]); ASSERT_FLOAT_EQ(0.0, h_res[3]); ASSERT_FLOAT_EQ(-2.1, h_res[4]); ASSERT_FLOAT_EQ(0.3, h_res[5]); ASSERT_FLOAT_EQ(0.0, h_res[6]); ASSERT_FLOAT_EQ(0.0, h_res[7]); ASSERT_FLOAT_EQ(0, h_res[8]); ASSERT_FLOAT_EQ(0, h_res[9]); } TEST(ARIMA, applyARMA) { const int length = 10; thrust::device_vector<float> last_residual(length); thrust::device_vector<float> ts_data(length, 0); for (auto i = 0; i < length; ++i) { ts_data[i] = float(i % 4); last_residual[i] = float(i % 3); } const int p = 2; thrust::device_vector<float> phi(p); phi[0] = 0.8; phi[1] = -0.1; const int q = 3; thrust::device_vector<float> theta(q); theta[0] = 1.0; theta[1] = -0.5; theta[2] = 0.1; thrust::device_vector<float> res(length, 0); h2o4gpu::ARIMAModel<float>::Apply( thrust::raw_pointer_cast(res.data()), thrust::raw_pointer_cast(ts_data.data()), thrust::raw_pointer_cast(phi.data()), p, thrust::raw_pointer_cast(last_residual.data()), thrust::raw_pointer_cast(theta.data()), q, length); OK(cudaGetLastError()); OK(cudaDeviceSynchronize()); thrust::host_vector<float> h_res = res; ASSERT_FLOAT_EQ(-0.6, h_res[0]); ASSERT_FLOAT_EQ(-2.4, h_res[1]); ASSERT_NEAR(-0.1, h_res[2], 1e-6); ASSERT_FLOAT_EQ(3.1, h_res[3]); ASSERT_FLOAT_EQ(-2.7, h_res[4]); ASSERT_NEAR(0.0, h_res[5], 1e-7); ASSERT_FLOAT_EQ(-0.4, h_res[6]); ASSERT_NEAR(0.0, h_res[7], 1e-7); ASSERT_FLOAT_EQ(0, h_res[8]); ASSERT_FLOAT_EQ(0, h_res[9]); } TEST(ARIMA, d_0_p_2_q_0) { const int length = 10; thrust::device_vector<float> ts_data(length); for (auto i = 0; i < length; ++i) ts_data[i] = float(i % 3); h2o4gpu::ARIMAModel<float> model(2, 0, 0, length); model.Fit(thrust::raw_pointer_cast(ts_data.data())); ASSERT_FLOAT_EQ(0.34482756, model.Phi()[0]); ASSERT_FLOAT_EQ(0.13793102, model.Phi()[1]); } TEST(ARIMA, d_0_p_0_q_2_iter_1) { const int length = 10; thrust::device_vector<float> ts_data(length); for (auto i = 0; i < length; ++i) ts_data[i] = float(i % 3); h2o4gpu::ARIMAModel<float> model(0, 0, 2, length); model.Fit(thrust::raw_pointer_cast(ts_data.data())); ASSERT_FLOAT_EQ(0.34482756f, model.Theta()[0]); ASSERT_FLOAT_EQ(0.13793102f, model.Theta()[1]); } TEST(ARIMA, d_0_p_2_q_2_iter_1) { const int length = 7; thrust::host_vector<float> h_ts_data(length); for (auto i = 0; i < length; ++i) h_ts_data[i] = float(i % 5) + 0.1 * float(i % 7 + 1); thrust::host_vector<float> ts_data = h_ts_data; h2o4gpu::ARIMAModel<float> model(2, 0, 2, length); model.Fit(thrust::raw_pointer_cast(ts_data.data())); ASSERT_FLOAT_EQ(-2.9589546f, model.Phi()[0]); ASSERT_FLOAT_EQ(2.8828485f, model.Phi()[1]); ASSERT_FLOAT_EQ(3.9598641f, model.Theta()[0]); ASSERT_FLOAT_EQ(-0.61601555f, model.Theta()[1]); } // TEST(ARIMA, d_0_p_2_q_2_iter_2) { // const int length = 7; // thrust::host_vector<float> h_ts_data(length); // for (auto i = 0; i < length; ++i) // h_ts_data[i] = float(i % 5) + 0.1 * float(i % 7 + 1); // thrust::host_vector<float> ts_data = h_ts_data; // h2o4gpu::ARIMAModel<float> model(2, 0, 2, length); // model.Fit(thrust::raw_pointer_cast(ts_data.data()), 2); // ASSERT_FLOAT_EQ(-2.9589546f, model.Phi()[0]); // ASSERT_FLOAT_EQ(2.8828485f, model.Phi()[1]); // ASSERT_FLOAT_EQ(3.9598641f, model.Theta()[0]); // ASSERT_FLOAT_EQ(-0.61601555f, model.Theta()[1]); // } TEST(ARIMA, d_1_p_1_q_1_iter_1) { const int length = 10; thrust::device_vector<float> ts_data(length); for (auto i = 0; i < length; ++i) ts_data[i] = float(i + i % 3); h2o4gpu::ARIMAModel<float> model(1, 1, 1, length); model.Fit(thrust::raw_pointer_cast(ts_data.data())); ASSERT_FLOAT_EQ(-1.0369391f, model.Phi()[0]); ASSERT_FLOAT_EQ(1.1154615f, model.Theta()[0]); }
the_stack
#include "NavierStokesSolver.h" #include "io/io.h" //############################################################################## // INITIALISE //############################################################################## /** * \brief Constructor. Copies the database and information about the computational grid. * * \param pDB database that contains all the simulation parameters * \param dInfo information related to the computational grid */ template <typename memoryType> NavierStokesSolver<memoryType>::NavierStokesSolver(parameterDB *pDB, domain *dInfo) { paramDB = pDB; domInfo = dInfo; } // NavierStokesSolver /** * \brief Initializes parameters, arrays and matrices required for the simulation. */ template <typename memoryType> void NavierStokesSolver<memoryType>::initialise() { printf("Initializing Navier-Stokes solver ...\n"); int nx = domInfo->nx, ny = domInfo->ny; int numUV = (nx-1)*ny + nx*(ny-1), numP = nx*ny; initialiseCommon(); initialiseArrays(numUV, numP); assembleMatrices(); } // initialise /** * \brief Initializes parameters common to all Navier-Stokes solvers. */ template <typename memoryType> void NavierStokesSolver<memoryType>::initialiseCommon() { printf("Initializing common parts ...\n"); logger.startTimer("initialiseCommon"); QCoeff = 1.0; subStep = 0; timeScheme convScheme = (*paramDB)["simulation"]["convTimeScheme"].get<timeScheme>(), diffScheme = (*paramDB)["simulation"]["diffTimeScheme"].get<timeScheme>(); intgSchm.initialise(convScheme, diffScheme); // set initial timeStep timeStep = (*paramDB)["simulation"]["startStep"].get<int>(); // get folder path std::string folder = (*paramDB)["inputs"]["caseFolder"].get<std::string>(); // writes the grids information to a file io::writeGrid(folder, *domInfo); // opens the file to which the number of iterations at every step is written std::stringstream out; out << folder << "/iterations"; if (timeStep == 0) { iterationsFile.open(out.str().c_str()); } else { iterationsFile.open(out.str().c_str(), std::ofstream::app); } logger.stopTimer("initialiseCommon"); } // initialiseCommon /** * \brief Initializes all arrays required to solve the Navier-Stokes equations. * * \param numQ total number velocity (or flux) unknowns (x- and y- directions) * \param numLambda number of pressure unknowns (plus number of body force unknowns) */ template <typename memoryType> void NavierStokesSolver<memoryType>::initialiseArrays(int numQ, int numLambda) { printf("Initializing arrays ...\n"); logger.startTimer("initialiseArrays"); q.resize(numQ); qStar.resize(numQ); qOld.resize(numQ); rn.resize(numQ); H.resize(numQ); bc1.resize(numQ); rhs1.resize(numQ); temp1.resize(numQ); cusp::blas::fill(rn, 0.0); cusp::blas::fill(H, 0.0); cusp::blas::fill(bc1, 0.0); cusp::blas::fill(rhs1, 0.0); cusp::blas::fill(temp1, 0.0); lambda.resize(numLambda); bc2.resize(numLambda); rhs2.resize(numLambda); temp2.resize(numLambda); cusp::blas::fill(lambda, 0.0); cusp::blas::fill(bc2, 0.0); cusp::blas::fill(rhs2, 0.0); cusp::blas::fill(temp2, 0.0); initialiseFluxes(); initialiseBoundaryArrays(); generateRN(); cusp::blas::scal(H, 1.0/intgSchm.gamma[subStep]); logger.stopTimer("initialiseArrays"); } // initialiseArrays /** * \brief Initializes velocity flux vectors (on the device). * * It creates a raw pointer before calling a method to initialize the flux vector. * */ template<> void NavierStokesSolver <device_memory>::initialiseFluxes() { int nx = domInfo->nx, ny = domInfo->ny; vecH qHost((nx-1)*ny+nx*(ny-1)); // creating raw pointers real *qHost_r = thrust::raw_pointer_cast(&(qHost[0])); initialiseFluxes(qHost_r); q = qHost; qStar=q; } // initialiseFluxes /** * \brief Initializes velocity flux vectors. * * \param q the velocity flux vector */ template <typename memoryType> void NavierStokesSolver <memoryType>::initialiseFluxes(real *q) { if (timeStep != 0) { // case directory std::string caseFolder = (*paramDB)["inputs"]["caseFolder"].get<std::string>(); // read velocity fluxes from file io::readData(caseFolder, timeStep, q, "q"); return; } int nx = domInfo->nx, ny = domInfo->ny, numU = (nx-1)*ny; real xmin = domInfo->x[0], xmax = domInfo->x[nx], ymin = domInfo->y[0], ymax = domInfo->y[ny]; real uInitial = (*paramDB)["flow"]["uInitial"].get<real>(), uPerturb = (*paramDB)["flow"]["uPerturb"].get<real>(), vInitial = (*paramDB)["flow"]["vInitial"].get<real>(), vPerturb = (*paramDB)["flow"]["vPerturb"].get<real>(); for(int j=0; j<ny; j++) { for(int i=0; i<nx-1; i++) { q[j*(nx-1) + i] = ( uInitial + uPerturb * cos( 0.5*M_PI*(2*domInfo->xu[i]-xmax-xmin)/(xmax-xmin) ) * sin( M_PI * (2*domInfo->yu[j]-ymax-ymin)/(ymax-ymin) ) ) * domInfo->dy[j]; } } for(int j=0; j<ny-1; j++) { for(int i=0; i<nx; i++) { q[j*nx + i + numU] = ( vInitial + vPerturb * cos( 0.5*M_PI*(2*domInfo->yv[j]-ymax-ymin)/(ymax-ymin) ) * sin( M_PI * (2*domInfo->xv[i]-xmax-xmin)/(xmax-xmin) ) ) * domInfo->dx[i]; } } } // initialiseFluxes /** * \brief Initializes boundary velocity arrays with values stored in the database. */ template <typename memoryType> void NavierStokesSolver<memoryType>::initialiseBoundaryArrays() { int nx = domInfo->nx, ny = domInfo->ny; boundaryCondition **bcInfo = (*paramDB)["flow"]["boundaryConditions"].get<boundaryCondition **>(); // resize boundary arrays by the number of velocity points on boundaries (u and v points) bc[XMINUS].resize(2*ny-1); bc[XPLUS].resize(2*ny-1); bc[YMINUS].resize(2*nx-1); bc[YPLUS].resize(2*nx-1); /// Top and Bottom for(int i=0; i<nx-1; i++) { bc[YMINUS][i] = bcInfo[YMINUS][0].value; bc[YPLUS][i] = bcInfo[YPLUS][0].value; bc[YMINUS][i+nx-1] = bcInfo[YMINUS][1].value; bc[YPLUS][i+nx-1] = bcInfo[YPLUS][1].value; } bc[YMINUS][2*nx-2] = bcInfo[YMINUS][1].value; bc[YPLUS][2*nx-2] = bcInfo[YPLUS][1].value; /// Left and Right for(int i=0; i<ny-1; i++) { bc[XMINUS][i] = bcInfo[XMINUS][0].value; bc[XPLUS][i] = bcInfo[XPLUS][0].value; bc[XMINUS][i+ny] = bcInfo[XMINUS][1].value; bc[XPLUS][i+ny] = bcInfo[XPLUS][1].value; } bc[XMINUS][ny-1] = bcInfo[XMINUS][0].value; bc[XPLUS][ny-1] = bcInfo[XPLUS][0].value; } // initialiseBoundaryArrays //############################################################################## // TIME STEPPING //############################################################################## /** * \brief Calculates the variables at the next time step. */ template <typename memoryType> void NavierStokesSolver<memoryType>::stepTime() { qOld = q; for(subStep=0; subStep < intgSchm.subSteps; subStep++) { updateSolverState(); // Set up and solve the first system for the intermediate velocity generateRN(); generateBC1(); assembleRHS1(); solveIntermediateVelocity(); // Set up and solve the Poisson system generateBC2(); assembleRHS2(); solvePoisson(); // Projection step projectionStep(); } timeStep++; } // stepTime /** * \brief Doing nothing. Used in immersed boundary methods when the body moves. */ template <typename memoryType> void NavierStokesSolver<memoryType>::updateSolverState() { // generateA(intgSchm.alphaImplicit[subStep]); // updateQ(intgSchm.gamma[i]); // updateBoundaryConditions(); } // updateSolverState /** * \brief Doing nothing. * * \param gamma coefficient of the convection term at the current time step */ template <typename memoryType> void NavierStokesSolver<memoryType>::updateQ(real gamma) { // cusp::blas::scal(Q.values, gamma/QCoeff); // QCoeff = gamma; } // updateQ /** * \brief Doing nothing. */ template <typename memoryType> void NavierStokesSolver<memoryType>::updateBoundaryConditions() { } // updateBoundaryConditions /** * \brief Evaluates the condition required to stop the simulation. * * \return a Boolean to continue or stop the simulation */ template <typename memoryType> bool NavierStokesSolver<memoryType>::finished() { int startStep = (*paramDB)["simulation"]["startStep"].get<int>(); int nt = (*paramDB)["simulation"]["nt"].get<int>(); return (timeStep < startStep+nt) ? false : true; } // finished //############################################################################## // ASSEMBLE MATRICES //############################################################################## /** * \brief Assembles matrices of the intermediate flux solver and the Poisson solver. */ template <typename memoryType> void NavierStokesSolver<memoryType>::assembleMatrices() { printf("Initializing matrices ...\n"); logger.startTimer("assembleMatrices"); generateM(); generateL(); generateA(intgSchm.alphaImplicit[subStep]); PC1 = new preconditioner< cusp::coo_matrix<int, real, memoryType> >(A, (*paramDB)["velocitySolve"]["preconditioner"].get<preconditionerType>()); generateBN(); logger.stopTimer("assembleMatrices"); generateQT(); generateC(); // QT*BN*Q logger.startTimer("preconditioner2"); PC2 = new preconditioner< cusp::coo_matrix<int, real, memoryType> >(C, (*paramDB)["PoissonSolve"]["preconditioner"].get<preconditionerType>()); logger.stopTimer("preconditioner2"); } // assembleMatrices /** * \brief Generates approximate inverse of the matrix resulting from implicit velocity terms. * * It computes the N-th order Taylor expansion of the inverse matrix. * Currently, the order is N=1. * */ template <typename memoryType> void NavierStokesSolver<memoryType>::generateBN() { BN = Minv; // 1st-order } // generateBN /* template <typename memoryType> template <> void NavierStokesSolver<memoryType>::generateBN<3>() { Matrix temp1, temp2; cusp::multiply(Minv, L, temp1); cusp::multiply(temp1, Minv, BN); cusp::add(Minv, BN, BN); cusp::multiply(temp1, BN, temp2); cusp::add(Minv, temp2, BN); }*/ /** * \brief Generates the matrix of the Poisson solver. */ template <> void NavierStokesSolver<device_memory>::generateC() { logger.startTimer("generateC"); cooD temp; // Should this temp matrix be created each time step? cusp::multiply(QT, BN, temp); cusp::multiply(temp, Q, C); C.values[0] += C.values[0]; logger.stopTimer("generateC"); } // generateC //############################################################################## // GENERATE VECTORS //############################################################################## /** * \brief Assembles the right hand-side of the system for the intermediate flux. */ template <typename memoryType> void NavierStokesSolver<memoryType>::assembleRHS1() { logger.startTimer("assembleRHS1"); cusp::blas::axpby(rn, bc1, rhs1, 1.0, 1.0); logger.stopTimer("assembleRHS1"); } // assembleRHS1 /** * \brief Assembles the right hand-side of the Poisson system. */ template <typename memoryType> void NavierStokesSolver<memoryType>::assembleRHS2() { logger.startTimer("assembleRHS2"); cusp::multiply(QT, qStar, temp2); cusp::blas::axpby(temp2, bc2, rhs2, 1.0, -1.0); logger.stopTimer("assembleRHS2"); } // assembleRHS2 //############################################################################## // LINEAR SOLVES //############################################################################## /** * \brief Solves for the intermediate flux velocity. */ template <typename memoryType> void NavierStokesSolver<memoryType>::solveIntermediateVelocity() { logger.startTimer("solveIntermediateVel"); std::string krylov = (*paramDB)["velocitySolve"]["solver"].get<std::string>(); int maxIte = (*paramDB)["velocitySolve"]["maxIterations"].get<int>(); real rTol = (*paramDB)["velocitySolve"]["rTol"].get<real>(); real aTol = (*paramDB)["velocitySolve"]["aTol"].get<real>(); bool monitor = (*paramDB)["velocitySolve"]["monitor"].get<bool>(); cusp::monitor<real> sys1Mon(rhs1, maxIte, rTol, aTol, monitor); if (krylov == "CG") cusp::krylov::cg(A, qStar, rhs1, sys1Mon, *PC1); else if (krylov == "BICGSTAB") cusp::krylov::bicgstab(A, qStar, rhs1, sys1Mon, *PC1); else if (krylov == "GMRES") { int restart = (*paramDB)["velocitySolve"]["restart"].get<int>(); cusp::krylov::gmres(A, qStar, rhs1, restart, sys1Mon, *PC1); } else { printf("Error: Unknown Krylov solver '%s' for velocity system!\n", krylov.c_str()); exit(-1); } iterationCount1 = sys1Mon.iteration_count(); if (!sys1Mon.converged()) { std::cout << "Error: Solve for q* failed at time step " << timeStep << std::endl; std::cout << "Iterations : " << iterationCount1 << std::endl; std::cout << "Residual norm: " << sys1Mon.residual_norm() << std::endl; std::cout << "Tolerance : " << sys1Mon.tolerance() << std::endl; exit(-1); } logger.stopTimer("solveIntermediateVel"); } // solveIntermediateVelocity /** * \brief Solves the Poisson system. */ template <typename memoryType> void NavierStokesSolver<memoryType>::solvePoisson() { logger.startTimer("solvePoisson"); std::string krylov = (*paramDB)["PoissonSolve"]["solver"].get<std::string>(); int maxIte = (*paramDB)["PoissonSolve"]["maxIterations"].get<int>(); real rTol = (*paramDB)["PoissonSolve"]["rTol"].get<real>(); real aTol = (*paramDB)["PoissonSolve"]["aTol"].get<real>(); bool monitor = (*paramDB)["PoissonSolve"]["monitor"].get<bool>(); cusp::monitor<real> sys2Mon(rhs2, maxIte, rTol, aTol, monitor); if (krylov == "CG") cusp::krylov::cg(C, lambda, rhs2, sys2Mon, *PC2); else if (krylov == "BICGSTAB") cusp::krylov::bicgstab(C, lambda, rhs2, sys2Mon, *PC2); else if (krylov == "GMRES") { int restart = (*paramDB)["PoissonSolve"]["restart"].get<int>(); cusp::krylov::gmres(C, lambda, rhs2, restart, sys2Mon, *PC2); } else { printf("Error: Unknown Krylov solver '%s' for Poisson system!\n", krylov.c_str()); exit(-1); } iterationCount2 = sys2Mon.iteration_count(); if (!sys2Mon.converged()) { std::cout << "Error: Solve for Lambda failed at time step " << timeStep << std::endl; std::cout << "Iterations : " << iterationCount2 << std::endl; std::cout << "Residual norm: " << sys2Mon.residual_norm() << std::endl; std::cout << "Tolerance : " << sys2Mon.tolerance() << std::endl; exit(-1); } logger.stopTimer("solvePoisson"); } // solvePoisson /** * \brief Projects the flux onto the divergence-free field. */ template <typename memoryType> void NavierStokesSolver<memoryType>::projectionStep() { logger.startTimer("projectionStep"); cusp::multiply(Q, lambda, temp1); cusp::multiply(BN, temp1, q); cusp::blas::axpby(qStar, q, q, 1.0, -1.0); logger.stopTimer("projectionStep"); } // projectionStep //############################################################################## // OUTPUT //############################################################################## /** * \brief Writes numerical solution at current time-step, * as well as the number of iterations performed in each solver. */ template <typename memoryType> void NavierStokesSolver<memoryType>::writeCommon() { int nsave = (*paramDB)["simulation"]["nsave"].get<int>(); std::string folder = (*paramDB)["inputs"]["caseFolder"].get<std::string>(); // write the velocity fluxes and the pressure values if (timeStep % nsave == 0) { io::writeData(folder, timeStep, q, lambda, *domInfo); } // write the number of iterations for each solve iterationsFile << timeStep << '\t' << iterationCount1 << '\t' << iterationCount2 << std::endl; } // writeCommon /** * \brief Writes data into files. */ template <typename memoryType> void NavierStokesSolver<memoryType>::writeData() { logger.startTimer("output"); writeCommon(); logger.stopTimer("output"); } // writeData /** * \brief Prints timing information and closes the different files. */ template <typename memoryType> void NavierStokesSolver<memoryType>::shutDown() { io::printTimingInfo(logger); iterationsFile.close(); } // shutDown // include inline files #include "NavierStokes/generateM.inl" #include "NavierStokes/generateL.inl" #include "NavierStokes/generateA.inl" #include "NavierStokes/generateQT.inl" #include "NavierStokes/generateRN.inl" #include "NavierStokes/generateBC1.inl" #include "NavierStokes/generateBC2.inl" // specialization of the class NavierStokesSolver template class NavierStokesSolver<device_memory>;
the_stack
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include <float.h> #include <inttypes.h> #include <stdarg.h> #include <ctype.h> #include <assert.h> #include <cuda.h> #include "sleefquadinline_cuda.h" #include "sleefquadinline_purec_scalar.h" #define STDIN_FILENO 0 // static int startsWith(const char *str, const char *prefix) { while(*prefix != '\0') if (*str++ != *prefix++) return 0; return *prefix == '\0'; } static double u2d(uint64_t u) { union { double f; uint64_t i; } tmp; tmp.i = u; return tmp.f; } static uint64_t d2u(double d) { union { double f; uint64_t i; } tmp; tmp.f = d; return tmp.i; } // __global__ void xaddq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_addq1_u05cuda(*a0, *a1); } __global__ void xsubq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_subq1_u05cuda(*a0, *a1); } __global__ void xmulq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_mulq1_u05cuda(*a0, *a1); } __global__ void xdivq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_divq1_u05cuda(*a0, *a1); } __global__ void xnegq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_negq1_cuda(*a0); } __global__ void xicmpltq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpltq1_cuda(*a0, *a1); } __global__ void xicmpgtq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpgtq1_cuda(*a0, *a1); } __global__ void xicmpleq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpleq1_cuda(*a0, *a1); } __global__ void xicmpgeq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpgeq1_cuda(*a0, *a1); } __global__ void xicmpeqq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpeqq1_cuda(*a0, *a1); } __global__ void xicmpneq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpneq1_cuda(*a0, *a1); } __global__ void xicmpq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_icmpq1_cuda(*a0, *a1); } __global__ void xiunordq(int *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_iunordq1_cuda(*a0, *a1); } __global__ void xcast_from_doubleq(Sleef_quadx1 *r0, double *d0) { *r0 = Sleef_cast_from_doubleq1_cuda(*d0); } __global__ void xcast_to_doubleq(double *r0, Sleef_quadx1 *a0) { *r0 = Sleef_cast_to_doubleq1_cuda(*a0); } __global__ void xcast_from_int64q(Sleef_quadx1 *r0, int64_t *i0) { *r0 = Sleef_cast_from_int64q1_cuda(*i0); } __global__ void xcast_to_int64q(int64_t *r0, Sleef_quadx1 *a0) { *r0 = Sleef_cast_to_int64q1_cuda(*a0); } __global__ void xcast_from_uint64q(Sleef_quadx1 *r0, uint64_t *u0) { *r0 = Sleef_cast_from_uint64q1_cuda(*u0); } __global__ void xcast_to_uint64q(uint64_t *r0, Sleef_quadx1 *a0) { *r0 = Sleef_cast_to_uint64q1_cuda(*a0); } __global__ void xsqrtq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_sqrtq1_u05cuda(*a0); } __global__ void xcbrtq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_cbrtq1_u10cuda(*a0); } __global__ void xsinq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_sinq1_u10cuda(*a0); } __global__ void xcosq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_cosq1_u10cuda(*a0); } __global__ void xtanq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_tanq1_u10cuda(*a0); } __global__ void xasinq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_asinq1_u10cuda(*a0); } __global__ void xacosq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_acosq1_u10cuda(*a0); } __global__ void xatanq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_atanq1_u10cuda(*a0); } __global__ void xatan2q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_atan2q1_u10cuda(*a0, *a1); } __global__ void xexpq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_expq1_u10cuda(*a0); } __global__ void xexp2q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_exp2q1_u10cuda(*a0); } __global__ void xexp10q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_exp10q1_u10cuda(*a0); } __global__ void xexpm1q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_expm1q1_u10cuda(*a0); } __global__ void xlogq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_logq1_u10cuda(*a0); } __global__ void xlog2q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_log2q1_u10cuda(*a0); } __global__ void xlog10q_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_log10q1_u10cuda(*a0); } __global__ void xlog1pq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_log1pq1_u10cuda(*a0); } __global__ void xpowq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_powq1_u10cuda(*a0, *a1); } __global__ void xsinhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_sinhq1_u10cuda(*a0); } __global__ void xcoshq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_coshq1_u10cuda(*a0); } __global__ void xtanhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_tanhq1_u10cuda(*a0); } __global__ void xasinhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_asinhq1_u10cuda(*a0); } __global__ void xacoshq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_acoshq1_u10cuda(*a0); } __global__ void xatanhq_u10(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_atanhq1_u10cuda(*a0); } __global__ void xfabsq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_fabsq1_cuda(*a0); } __global__ void xcopysignq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_copysignq1_cuda(*a0, *a1); } __global__ void xfmaxq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fmaxq1_cuda(*a0, *a1); } __global__ void xfminq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fminq1_cuda(*a0, *a1); } __global__ void xfdimq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fdimq1_u05cuda(*a0, *a1); } __global__ void xfmodq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_fmodq1_cuda(*a0, *a1); } __global__ void xremainderq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_remainderq1_cuda(*a0, *a1); } __global__ void xfrexpq(Sleef_quadx1 *r, Sleef_quadx1 *a0, int *i0) { *r = Sleef_frexpq1_cuda(*a0, i0); } __global__ void xmodfq(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_modfq1_cuda(*a0, a1); } __global__ void xfmaq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1, Sleef_quadx1 *a2) { *r = Sleef_fmaq1_u05cuda(*a0, *a1, *a2); } __global__ void xhypotq_u05(Sleef_quadx1 *r, Sleef_quadx1 *a0, Sleef_quadx1 *a1) { *r = Sleef_hypotq1_u05cuda(*a0, *a1); } __global__ void xilogbq(int *r, Sleef_quadx1 *a0) { *r = Sleef_ilogbq1_cuda(*a0); } __global__ void xldexpq(Sleef_quadx1 *r, Sleef_quadx1 *a0, int *i0) { *r = Sleef_ldexpq1_cuda(*a0, *i0); } __global__ void xtruncq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_truncq1_cuda(*a0); } __global__ void xfloorq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_floorq1_cuda(*a0); } __global__ void xceilq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_ceilq1_cuda(*a0); } __global__ void xroundq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_roundq1_cuda(*a0); } __global__ void xrintq(Sleef_quadx1 *r, Sleef_quadx1 *a0) { *r = Sleef_rintq1_cuda(*a0); } // typedef union { Sleef_quad q; struct { uint64_t l, h; }; } cnv128; #define BUFSIZE 1024 #define func_q_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ funcName<<<1, 1>>>(r, a0); \ cudaDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_q_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0, c1; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l, &c1.h, &c1.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ *a1 = Sleef_setq1_cuda(*a1, 0, c1.q); \ funcName<<<1, 1>>>(r, a0, a1); \ cudaDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_q_q_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0, c1, c2; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64, \ &c0.h, &c0.l, &c1.h, &c1.l, &c2.h, &c2.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ *a1 = Sleef_setq1_cuda(*a1, 0, c1.q); \ *a2 = Sleef_setq1_cuda(*a2, 0, c2.q); \ funcName<<<1, 1>>>(r, a0, a1, a2); \ cudaDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_i_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ funcName<<<1, 1>>>(i0, a0); \ cudaDeviceSynchronize(); \ printf("%d\n", *i0); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_i_q_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0, c1; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l, &c1.h, &c1.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ *a1 = Sleef_setq1_cuda(*a1, 0, c1.q); \ funcName<<<1, 1>>>(i0, a0, a1); \ cudaDeviceSynchronize(); \ printf("%d\n", *i0); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_q_i(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ int k; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64 " %d", &c0.h, &c0.l, &k); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ *i0 = k; \ funcName<<<1, 1>>>(r, a0, i0); \ cudaDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_d_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ funcName<<<1, 1>>>(d0, a0); \ cudaDeviceSynchronize(); \ printf("%" PRIx64 "\n", d2u(*d0)); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_d(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ uint64_t u; \ sscanf(buf, funcStr " %" PRIx64, &u); \ *d0 = u2d(u); \ funcName<<<1, 1>>>(r, d0); \ cudaDeviceSynchronize(); \ cnv128 c0; \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_i64_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ funcName<<<1, 1>>>(i64, a0); \ cudaDeviceSynchronize(); \ printf("%" PRIx64 "\n", *i64); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_i64(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ sscanf(buf, funcStr " %" PRIx64, i64); \ funcName<<<1, 1>>>(r, i64); \ cudaDeviceSynchronize(); \ cnv128 c0; \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_u64_q(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ funcName<<<1, 1>>>(u64, a0); \ cudaDeviceSynchronize(); \ printf("%" PRIx64 "\n", *u64); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_u64(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ sscanf(buf, funcStr " %" PRIx64, u64); \ funcName<<<1, 1>>>(r, u64); \ cudaDeviceSynchronize(); \ cnv128 c0; \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_q_pi(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ funcName<<<1, 1>>>(r, a0, i0); \ cudaDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ printf("%" PRIx64 ":%" PRIx64 " %d\n", c0.h, c0.l, *i0); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_q_q_pq(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ sentinel = 0; \ cnv128 c0, c1; \ sscanf(buf, funcStr " %" PRIx64 ":%" PRIx64, &c0.h, &c0.l); \ *a0 = Sleef_setq1_cuda(*a0, 0, c0.q); \ funcName<<<1, 1>>>(r, a0, a1); \ cudaDeviceSynchronize(); \ c0.q = Sleef_getq1_cuda(*r, 0); \ c1.q = Sleef_getq1_cuda(*a1, 0); \ printf("%" PRIx64 ":%" PRIx64 " %" PRIx64 ":%" PRIx64 "\n", c0.h, c0.l, c1.h, c1.l); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } int main(int argc, char **argv) { #if 0 cuInit(0); int ndevice; cuDeviceGetCount(&ndevice); if (ndevice == 0) { fprintf(stderr, "No cuda device available\n"); exit(0); } CUdevice device; char deviceName[1024]; cuDeviceGet(&device, 0); cuDeviceGetName(deviceName, 1000, device); fprintf(stderr, "Device : %s\n", deviceName); #endif cudaSetDeviceFlags(cudaDeviceScheduleSpin); Sleef_quadx1 *r, *a0, *a1, *a2; double *d0; int *i0; int64_t *i64; uint64_t *u64; cudaMallocManaged(&r , 1*sizeof(Sleef_quadx1)); cudaMallocManaged(&a0, 1*sizeof(Sleef_quadx1)); cudaMallocManaged(&a1, 1*sizeof(Sleef_quadx1)); cudaMallocManaged(&a2, 1*sizeof(Sleef_quadx1)); cudaMallocManaged(&d0, 1*sizeof(double)); cudaMallocManaged(&i0, 1*sizeof(int)); cudaMallocManaged(&i64, 1*sizeof(int64_t)); cudaMallocManaged(&u64, 1*sizeof(uint64_t)); // printf("1\n"); fflush(stdout); // { *a0 = Sleef_setq1_cuda(*a0, 0, SLEEF_M_PIq); *a1 = Sleef_setq1_cuda(*a1, 0, Sleef_strtoq("2.718281828459045235360287471352662498", NULL)); xmulq_u05<<<1, 1>>>(r, a0, a1); cudaDeviceSynchronize(); Sleef_quad v0 = Sleef_getq1_cuda(*r, 0); if (Sleef_icmpneq1_purec(v0, sleef_q(+0x1114580b45d47LL, 0x49e6108579a2d0caULL, 3))) { fprintf(stderr, "Testing with Sleef_mulq1_u05cuda failed\n"); exit(-1); } } // char buf[BUFSIZE]; if (fgets(buf, BUFSIZE-1, stdin)) {} int sentinel = 0; while(!feof(stdin) && sentinel < 2) { func_q_q_q("addq_u05", xaddq_u05); func_q_q_q("subq_u05", xsubq_u05); func_q_q_q("mulq_u05", xmulq_u05); func_q_q_q("divq_u05", xdivq_u05); func_q_q("sqrtq_u05", xsqrtq_u05); func_q_q("cbrtq_u10", xcbrtq_u10); func_q_q("sinq_u10", xsinq_u10); func_q_q("cosq_u10", xcosq_u10); func_q_q("tanq_u10", xtanq_u10); func_q_q("asinq_u10", xasinq_u10); func_q_q("acosq_u10", xacosq_u10); func_q_q("atanq_u10", xatanq_u10); func_q_q_q("atan2q_u10", xatan2q_u10); func_q_q("expq_u10", xexpq_u10); func_q_q("exp2q_u10", xexp2q_u10); func_q_q("exp10q_u10", xexp10q_u10); func_q_q("expm1q_u10", xexpm1q_u10); func_q_q("logq_u10", xlogq_u10); func_q_q("log2q_u10", xlog2q_u10); func_q_q("log10q_u10", xlog10q_u10); func_q_q("log1pq_u10", xlog1pq_u10); func_q_q_q("powq_u10", xpowq_u10); func_q_q("sinhq_u10", xsinhq_u10); func_q_q("coshq_u10", xcoshq_u10); func_q_q("tanhq_u10", xtanhq_u10); func_q_q("asinhq_u10", xasinhq_u10); func_q_q("acoshq_u10", xacoshq_u10); func_q_q("atanhq_u10", xatanhq_u10); func_q_q("negq", xnegq); func_q_q("fabsq", xfabsq); func_q_q_q("copysignq", xcopysignq); func_q_q_q("fmaxq", xfmaxq); func_q_q_q("fminq", xfminq); func_q_q_q("fdimq_u05", xfdimq_u05); func_q_q_q("fmodq", xfmodq); func_q_q_q("remainderq", xremainderq); func_q_q_pi("frexpq", xfrexpq); func_q_q_pq("modfq", xmodfq); func_i_q("ilogbq", xilogbq); func_q_q_i("ldexpq", xldexpq); func_q_q_q_q("fmaq_u05", xfmaq_u05); func_q_q_q("hypotq_u05", xhypotq_u05); func_q_q("truncq", xtruncq); func_q_q("floorq", xfloorq); func_q_q("ceilq", xceilq); func_q_q("roundq", xroundq); func_q_q("rintq", xrintq); func_q_d("cast_from_doubleq", xcast_from_doubleq); func_d_q("cast_to_doubleq", xcast_to_doubleq); func_q_i64("cast_from_int64q", xcast_from_int64q); func_i64_q("cast_to_int64q", xcast_to_int64q); func_q_u64("cast_from_uint64q", xcast_from_uint64q); func_u64_q("cast_to_uint64q", xcast_to_uint64q); func_i_q_q("icmpltq", xicmpltq); func_i_q_q("icmpgtq", xicmpgtq); func_i_q_q("icmpleq", xicmpleq); func_i_q_q("icmpgeq", xicmpgeq); func_i_q_q("icmpeqq", xicmpeqq); func_i_q_q("icmpneq", xicmpneq); func_i_q_q("icmpq", xicmpq); func_i_q_q("iunordq", xiunordq); sentinel++; } // return 0; }
the_stack
using namespace std; namespace amgx { namespace idr_solver { // Constructor template< class T_Config> IDR_Solver_Base<T_Config>::IDR_Solver_Base( AMG_Config &cfg, const std::string &cfg_scope) : Solver<T_Config>( cfg, cfg_scope), m_buffer_N(0) { std::string solverName, new_scope, tmp_scope; cfg.getParameter<std::string>( "preconditioner", solverName, cfg_scope, new_scope ); s = cfg.AMG_Config::getParameter<int>("subspace_dim_s", cfg_scope); if (solverName.compare("NOSOLVER") == 0) { no_preconditioner = true; m_preconditioner = NULL; } else { no_preconditioner = false; m_preconditioner = SolverFactory<T_Config>::allocate( cfg, cfg_scope, "preconditioner" ); } } template<class T_Config> IDR_Solver_Base<T_Config>::~IDR_Solver_Base() { if (!no_preconditioner) { delete m_preconditioner; } } template<class T_Config> void IDR_Solver_Base<T_Config>::solver_setup(bool reuse_matrix_structure) { AMGX_CPU_PROFILER( "IDR_Solver::solver_setup " ); Operator<T_Config> &A = *this->m_A; ViewType oldView = A.currentView(); A.setViewExterior(); // The number of elements in temporary vectors. this->m_buffer_N = static_cast<int>( this->m_A->get_num_cols() * this->m_A->get_block_dimy() ); const int N = this->m_buffer_N; s = this->s; // Allocate memory needed for iterating. m_z.resize(N); m_Ax.resize(N); m_v.resize(N); tempg.resize(N); tempu.resize(N); temp.resize(N); t_idr.resize(N); c.resize(s); m_f.resize(s); h_chk.resize(N * s); svec_chk.resize(s); G.resize(N * s); G.set_lda(N); G.set_num_rows(N); G.set_num_cols(s); U.resize(N * s); U.set_lda(N); U.set_num_rows(N); U.set_num_cols(s); P.resize(N * s); P.set_lda(N); P.set_num_rows(N); P.set_num_cols(s); M.resize(s * s); M.set_lda(s); M.set_num_rows(s); M.set_num_cols(s); m_Ax.set_block_dimy(this->m_A->get_block_dimy()); m_Ax.set_block_dimx(1); m_Ax.dirtybit = 1; m_Ax.delayed_send = 1; m_Ax.tag = this->tag * 100 + 2; m_z.set_block_dimy(this->m_A->get_block_dimy()); m_z.set_block_dimx(1); m_z.dirtybit = 1; m_z.delayed_send = 1; m_z.tag = this->tag * 100 + 3; m_v.set_block_dimy(this->m_A->get_block_dimy()); m_v.set_block_dimx(1); m_v.dirtybit = 1; m_v.delayed_send = 1; m_v.tag = this->tag * 100 + 3; m_f.set_block_dimx(1); m_f.set_block_dimy(1); c.set_block_dimx(1); c.set_block_dimy(1); temp.set_block_dimx(1); temp.set_block_dimy(this->m_A->get_block_dimy()); tempu.set_block_dimx(1); tempu.set_block_dimy(this->m_A->get_block_dimy()); tempg.set_block_dimx(1); tempg.set_block_dimy(this->m_A->get_block_dimy()); t_idr.set_block_dimx(1); t_idr.set_block_dimy(this->m_A->get_block_dimy()); U.set_block_dimx(1); U.set_block_dimy(this->m_A->get_block_dimy()); G.set_block_dimx(1); G.set_block_dimy(this->m_A->get_block_dimy()); P.set_block_dimx(1); P.set_block_dimy(this->m_A->get_block_dimy()); M.set_block_dimx(1); M.set_block_dimy(1); // Setup the preconditionner if (!no_preconditioner) { m_preconditioner->setup(A, reuse_matrix_structure); } A.setView(oldView); } template<class T_Config> void IDR_Solver_Base<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero ) { AMGX_CPU_PROFILER( "IDR_Solver::solve_init " ); int s; Operator<T_Config> &A = *this->m_A; ViewType oldView = A.currentView(); A.setViewExterior(); int offset, size, N; A.getOffsetAndSizeForView(A.getViewExterior(), &offset, &size); N = this->m_buffer_N; s = this->s; this->omega = 1; // M is identity fill(G, (ValueTypeB)0, 0, N * s); fill(U, (ValueTypeB)0, 0, N * s); fill(M, (ValueTypeB)0, 0, s * s); fill(P, (ValueTypeB)0, 0, N * s); fill(tempg, (ValueTypeB)0, 0, N); fill(tempu, (ValueTypeB)0, 0, N); fill(temp, (ValueTypeB)0, 0, N); fill(t_idr, (ValueTypeB)0, 0, N); fill(m_f, (ValueTypeB)0, 0, s); fill(c, (ValueTypeB)0, 0, s); fill(m_v, (ValueTypeB)0, 0, N); setup_arrays(P, M, b, x, h_chk, s, N); A.setView(oldView); } template<class T_Config> bool IDR_Solver_Base<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero ) { AMGX_CPU_PROFILER( "IDR_Solver::solve_iteration " ); Operator<T_Config> &A = *this->m_A; ViewType oldView = A.currentView(); A.setViewExterior(); bool transposed = false; int offset, i, s, k, N, size; ValueTypeB alpha_blas(1), beta_blas(0), malpha_blas(-1);// malpha_blas=-1.0f; ValueTypeB alpha, beta; ValueTypeB ns, nt, ts, rho, angle(0.7); A.getOffsetAndSizeForView(A.getViewExterior(), &offset, &size); N = A.get_num_rows(); s = this->s; if (s == 1) { angle = (ValueTypeB) 0; } // New right-hand size for small system: // f = (r'*P)'; dot_ina_loop(P, *this->m_r, 0, 0, m_f, svec_chk, 0, N, 0, s); // solving the small system and making v orth. to P for (k = 0; k < s; k++) { // c = M(k:s,k:s)\f(k:s); getrf+trsv_v2 copy_ext(m_f, c, k, 0, s - k ); transposed = false; trsv_extnd(transposed, M, s, c, s - k, 1, k + s * k); // v = r - G(:,k:s)*c; dense matvec then vector update transposed = false; gemv_extnd(transposed, G, c, temp, N, s - k, alpha_blas, beta_blas, 1, 1, N, k * N, 0, 0); axpby(*this->m_r, temp, m_v, alpha_blas, malpha_blas, 0, N); if (no_preconditioner) { ; } else { m_z.delayed_send = 1; m_v.delayed_send = 1; m_preconditioner->solve( m_v, m_z, true ); m_z.delayed_send = 1; m_z.delayed_send = 1; copy(m_z, m_v, 0, N); } // U(:,k) = U(:,k:s)*c + om*v; matvec + axpy transposed = false; gemv_extnd(transposed, U, c, temp, N, s - k, alpha_blas, beta_blas, 1, 1, N, k * N, 0, 0); copy_ext(temp, U, 0, k * N, N); axpy(m_v, U, this->omega, 0, k * N, N); // G(:,k) = A*U(:,k); matvec copy_ext(U, tempu, k * N, 0, N); A.apply(tempu, tempg); copy_ext(tempg, G, 0, k * N, N); // Bi-Orthogonalise the new basis vectors: for (i = 0; i < k; i++) { //( P(:,i)'*G(:,k) )/M(i,i); dotc_div(P, G, i * N, k * N, N, M, i, s, &alpha); if (alpha == (ValueTypeB) 0) { FatalError("M(i,i)=0 breakdown condition (alpha):IDR", AMGX_ERR_INTERNAL); } // G(:,k) = G(:,k) - alpha*G(:,i); axpy(G, G, -alpha, i * N, k * N, N); // U(:,k) = U(:,k) - alpha*U(:,i); axpy(U, U, -alpha, i * N, k * N, N); } // New column of M = P'*G (first k-1 entries are zero) // M(k:s,k) = (G(:,k)'*P(:,k:s))'; transposed = true; gemv_div(transposed, P, G, M, N, s - k, alpha_blas, beta_blas, 1, 1, N, k * N, k * N, k * s + k, m_f, k, s, &beta, svec_chk); if (beta == (ValueTypeB) 0) { FatalError("M(k,k)=0 breakdown condition (beta):IDR", AMGX_ERR_INTERNAL); } // r = r - beta*G(:,k); axpy(G, *this->m_r, -beta, k * N, 0, N); // x = x + beta*U(:,k); axpy(U, x, beta, k * N, 0, N); // Do we converge ? this->m_curr_iter = this->m_curr_iter + 1; if ( this->m_monitor_convergence && this->compute_norm_and_converged() ) { A.setView(oldView); return true; } //Early exit: last iteration, no need to prepare the next one. if ( this->is_last_iter() ) { A.setView(oldView); return !this->m_monitor_convergence; } // New f = P'*r (first k components are zero) // if ( k < s ) // f(k+1:s) = f(k+1:s) - beta*M(k+1:s,k); // end if (k < s - 1) { axpy(M, m_f, -beta, k * s + k + 1, k + 1, s - k - 1); } }/// for ends for smaller space //check for convergence once again. If converged just leave the function if ( this->m_monitor_convergence && this->compute_norm_and_converged() ) { A.setView(oldView); return true; } copy( *this->m_r, m_v, 0, N); if (no_preconditioner) { ; } else { m_z.delayed_send = 1; m_v.delayed_send = 1; m_preconditioner->solve( m_v, m_z, true ); m_z.delayed_send = 1; m_v.delayed_send = 1; copy( m_z, m_v, 0, N); } A.apply(m_v, t_idr ); // calculate new omega ns = nrm2(*this->m_r, 0, N); nt = nrm2(t_idr, 0, N); ts = dotc(t_idr, *this->m_r, 0, N); rho = abs(ts / (nt * ns)); this->omega = ts / (nt * nt); if (rho < angle) { this->omega = this->omega * angle / rho; } if (this->omega == 0) { cout << "Error happened in this->omega==0" << endl; exit(1); } // r = r - omega*t; axpy( t_idr, *this->m_r, -(this->omega), 0, N ); axpy( m_v, x, this->omega, 0, N ); // No convergence so far. A.setView(oldView); return !this->m_monitor_convergence; } template<class T_Config> void IDR_Solver_Base<T_Config>::solve_finalize( VVector &b, VVector &x ) {} template<class T_Config> void IDR_Solver_Base<T_Config>::printSolverParameters() const { if (!no_preconditioner) { std::cout << "preconditioner: " << this->m_preconditioner->getName() << " with scope name: " << this->m_preconditioner->getScope() << std::endl; } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDR_Solver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::dot_ina_loop(const VVector &a, const VVector &b, int offseta, int offsetb, VVector &res, VVector &hres, int offsetres, int size, int k, int s) { int i; for (i = k; i < s; i++) { (res.raw())[i + offsetres] = dotc(a, b, offseta + i * size, offsetb, size); } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDR_Solver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::dot_ina_loop(const VVector &a, const VVector &b, int offseta, int offsetb, VVector &res, Vector_h &hres, int offsetres, int size, int k, int s) { int i; for (i = k; i < s; i++) { hres.raw()[i + offsetres] = dotc(a, b, offseta + i * size, offsetb, size); } cudaMemcpy((void *) res.raw(), (void *) hres.raw(), (s - k)*sizeof(ValueTypeB), cudaMemcpyHostToDevice); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDR_Solver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::gemv_div(bool trans, const VVector &A, const VVector &x, VVector &y, int m, int n, ValueTypeB alpha, ValueTypeB beta, int incx, int incy, int lda, int offsetA, int offsetx, int offsety, VVector &nume, int k, int s, ValueTypeB *ratio, Vector_h &svec_chk) { int j, col; ValueTypeB numer, gemv_res; ValueTypeB denom; if (s == 1) { gemv_res = dotc(A, x, 0, k * m, m); cudaMemcpy((void *) & (y.raw()[k * s + k]), (void *) &gemv_res, sizeof(ValueTypeB), cudaMemcpyHostToDevice); if (gemv_res != (ValueTypeB) 0) { cudaMemcpy((void *) &numer, (void *) & ((nume.raw())[k]), sizeof(ValueTypeB), cudaMemcpyDeviceToHost); *ratio = numer / gemv_res; } else { *ratio = (ValueTypeB) 0; } } else { for (col = k, j = 0; j < s - k; j++, col++) { (svec_chk.raw())[j] = dotc(A, x, col * m, k * m, m); } cudaMemcpy((void *) & (y.raw())[k * s + k], (void *) svec_chk.raw(), (s - k)*sizeof(ValueTypeB), cudaMemcpyHostToDevice); cudaMemcpy((void *) &denom, (void *) & (y.raw())[k + s * k], sizeof(ValueTypeB), cudaMemcpyDeviceToHost); if (denom != (ValueTypeB) 0) { cudaMemcpy((void *) &numer, (void *) & ((nume.raw())[k]), sizeof(ValueTypeB), cudaMemcpyDeviceToHost); *ratio = numer / denom; } else { *ratio = (ValueTypeB) 0; } } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDR_Solver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::gemv_div(bool trans, const VVector &A, const VVector &x, VVector &y, int m, int n, ValueTypeB alpha, ValueTypeB beta, int incx, int incy, int lda, int offsetA, int offsetx, int offsety, VVector &nume, int k, int s, ValueTypeB *ratio, Vector_h &svec) { int j, col; ValueTypeB gemv_res; ValueTypeB denom; if (s == 1) { gemv_res = dotc(A, x, 0, k * m, m); (y.raw()[k * s + k]) = gemv_res; if (gemv_res != (ValueTypeB) 0) { *ratio = ((nume.raw())[k]) / gemv_res; } else { *ratio = (ValueTypeB) 0; } } else { for (col = k, j = 0; j < s - k; j++, col++) { (y.raw())[k + s * k + j] = dotc(A, x, col * m, k * m, m); } denom = (y.raw())[k + s * k]; if (denom != (ValueTypeB) 0) { *ratio = ((nume.raw())[k]) / denom; } else { *ratio = (ValueTypeB) 0; } } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDR_Solver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::dotc_div(VVector &a, VVector &b, int offseta, int offsetb, int size, VVector &denom, int i, int s, ValueTypeB *ratio) { ValueTypeB dnr; cudaMemcpy((void *) &dnr, (void *) & (denom.raw())[i + s * i], sizeof(ValueTypeB), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); if (dnr != (ValueTypeB) 0) { *ratio = dotc(a, b, offseta, offsetb, size) / dnr; } else { *ratio = (ValueTypeB) 0; } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDR_Solver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::dotc_div(VVector &a, VVector &b, int offseta, int offsetb, int size, VVector &denom, int i, int s, ValueTypeB *ratio) { ValueTypeB alpha_iter; if ((denom.raw())[i * s + i] != (ValueTypeB) 0) { alpha_iter = dotc(a, b, offseta, offsetb, size) / denom[i * s + i]; *ratio = alpha_iter; } else { *ratio = (ValueTypeB) 0; } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDR_Solver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setup_arrays(VVector &P, VVector &M, VVector &b, VVector &x, Vector_h &hbuff, int s, int N) { int i; for (i = 0; i < s; i++) { (hbuff.raw())[i * s + i] = (ValueTypeB) 1.0; } cudaMemcpy((void *)M.raw(), (void *)hbuff.raw(), s * s * sizeof(ValueTypeB), cudaMemcpyHostToDevice); if (s == 1) { cudaMemcpy((void *)P.raw(), (void *)b.raw(), N * s * sizeof(ValueTypeB), cudaMemcpyDeviceToDevice); } else { srand(0); for (i = 0; i < N * s; i++) { (hbuff.raw())[i] = (ValueTypeB) rand() / (ValueTypeB (RAND_MAX)); } cudaMemcpy((void *)P.raw(), (void *)hbuff.raw(), N * s * sizeof(ValueTypeB), cudaMemcpyHostToDevice); } // } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void IDR_Solver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setup_arrays(VVector &P, VVector &M, VVector &b, VVector &x, VVector &hbuff, int s, int N) { int i; for (i = 0; i < s; i++) { (M.raw())[i * s + i] = (ValueTypeB) 1.0; } if (s == 1) { copy(b, P, 0, N); // copying b into P if s=1; } else { srand(0); for (i = 0; i < N * s; i++) { (P.raw())[i] = (ValueTypeB) rand() / (ValueTypeB (RAND_MAX)); } } } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class IDR_Solver_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class IDR_Solver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace idr_solver } // namespace amgx
the_stack
#if ( MODEL == HYDRO ) // external functions and GPU-related set-up #ifdef __CUDACC__ #include "CUAPI.h" #include "CUFLU_Shared_FluUtility.cu" #include "CUDA_ConstMemory.h" extern real (*d_SrcDlepProf_Data)[SRC_DLEP_PROF_NBINMAX]; extern real *d_SrcDlepProf_Radius; #endif // #ifdef __CUDACC__ // local function prototypes #ifndef __CUDACC__ void Src_SetAuxArray_Deleptonization( double [], int [] ); void Src_SetFunc_Deleptonization( SrcFunc_t & ); void Src_SetConstMemory_Deleptonization( const double AuxArray_Flt[], const int AuxArray_Int[], double *&DevPtr_Flt, int *&DevPtr_Int ); void Src_PassData2GPU_Deleptonization(); #endif /******************************************************** 1. Deleptonization source term --> Enabled by the runtime option "SRC_DELEPTONIZATION" 2. This file is shared by both CPU and GPU CUSRC_Src_Deleptonization.cu -> CPU_Src_Deleptonization.cpp 3. Four steps are required to implement a source term I. Set auxiliary arrays II. Implement the source-term function III. [Optional] Add the work to be done every time before calling the major source-term function IV. Set initialization functions 4. The source-term function must be thread-safe and not use any global variable ********************************************************/ // ======================= // I. Set auxiliary arrays // ======================= //------------------------------------------------------------------------------------------------------- // Function : Src_SetAuxArray_Deleptonization // Description : Set the auxiliary arrays AuxArray_Flt/Int[] // // Note : 1. Invoked by Src_Init_Deleptonization() // 2. AuxArray_Flt/Int[] have the size of SRC_NAUX_DLEP defined in Macro.h (default = 5) // 3. Add "#ifndef __CUDACC__" since this routine is only useful on CPU // // Parameter : AuxArray_Flt/Int : Floating-point/Integer arrays to be filled up // // Return : AuxArray_Flt/Int[] //------------------------------------------------------------------------------------------------------- #ifndef __CUDACC__ void Src_SetAuxArray_Deleptonization( double AuxArray_Flt[], int AuxArray_Int[] ) { // TBF } // FUNCTION : Src_SetAuxArray_Deleptonization #endif // #ifndef __CUDACC__ // ====================================== // II. Implement the source-term function // ====================================== //------------------------------------------------------------------------------------------------------- // Function : Src_Deleptonization // Description : Major source-term function // // Note : 1. Invoked by CPU/GPU_SrcSolver_IterateAllCells() // 2. See Src_SetAuxArray_Deleptonization() for the values stored in AuxArray_Flt/Int[] // 3. Shared by both CPU and GPU // // Parameter : fluid : Fluid array storing both the input and updated values // --> Including both active and passive variables // B : Cell-centered magnetic field // SrcTerms : Structure storing all source-term variables // dt : Time interval to advance solution // dh : Grid size // x/y/z : Target physical coordinates // TimeNew : Target physical time to reach // TimeOld : Physical time before update // --> This function updates physical time from TimeOld to TimeNew // MinDens/Pres/Eint : Density, pressure, and internal energy floors // EoS : EoS object // AuxArray_* : Auxiliary arrays (see the Note above) // // Return : fluid[] //----------------------------------------------------------------------------------------- GPU_DEVICE_NOINLINE static void Src_Deleptonization( real fluid[], const real B[], const SrcTerms_t *SrcTerms, const real dt, const real dh, const double x, const double y, const double z, const double TimeNew, const double TimeOld, const real MinDens, const real MinPres, const real MinEint, const EoS_t *EoS, const double AuxArray_Flt[], const int AuxArray_Int[] ) { // check # ifdef GAMER_DEBUG if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ ); if ( AuxArray_Int == NULL ) printf( "ERROR : AuxArray_Int == NULL in %s !!\n", __FUNCTION__ ); # endif // TBF // profiles are stored in SrcTerms->Dlep_Profile_DataDevPtr/Dlep_Profile_RadiusDevPtr/Dlep_Profile_NBin // --> see "include/SrcTerms.h" } // FUNCTION : Src_Deleptonization // ================================================== // III. [Optional] Add the work to be done every time // before calling the major source-term function // ================================================== //------------------------------------------------------------------------------------------------------- // Function : Src_WorkBeforeMajorFunc_Deleptonization // Description : Specify work to be done every time before calling the major source-term function // // Note : 1. Invoked by Src_WorkBeforeMajorFunc() // 2. Add "#ifndef __CUDACC__" since this routine is only useful on CPU // // Parameter : lv : Target refinement level // TimeNew : Target physical time to reach // TimeOld : Physical time before update // --> The major source-term function will update the system from TimeOld to TimeNew // dt : Time interval to advance solution // --> Physical coordinates : TimeNew - TimeOld == dt // Comoving coordinates : TimeNew - TimeOld == delta(scale factor) != dt // AuxArray_Flt/Int : Auxiliary arrays // --> Can be used and/or modified here // --> Must call Src_SetConstMemory_Deleptonization() after modification // // Return : AuxArray_Flt/Int[] //------------------------------------------------------------------------------------------------------- #ifndef __CUDACC__ void Src_WorkBeforeMajorFunc_Deleptonization( const int lv, const double TimeNew, const double TimeOld, const double dt, double AuxArray_Flt[], int AuxArray_Int[] ) { // TBF /* // compute profiles // --> here is just an example; see GREP for a more efficient implementation // --> SRC_DLEP_PROF_NVAR and SRC_DLEP_PROF_NBINMAX are defined in Macro.h (default = 6 and 4000, respectively) // --> be careful about the issue of center drifting const double Center[3] = { amr->BoxCenter[0], amr->BoxCenter[1], amr->BoxCenter[2] }; const double MaxRadius = 0.5*amr->BoxSize[0]; const double MinBinSize = amr->dh[MAX_LEVEL]; const bool LogBin = true; const double LogBinRatio = 1.25; const bool RemoveEmptyBin = true; const long TVar[] = { _DENS, _MOMX, _ENGY, _PRES, _VELR, _EINT_DER }; const int NProf = SRC_DLEP_PROF_NVAR; const int SingleLv = -1; const int MaxLv = -1; const PatchType_t PatchType = PATCH_LEAF; const double PrepTime = TimeNew; Profile_t *Prof[SRC_DLEP_PROF_NVAR]; for (int v=0; v<SRC_DLEP_PROF_NVAR; v++) Prof[v] = new Profile_t(); Aux_ComputeProfile( Prof, Center, MaxRadius, MinBinSize, LogBin, LogBinRatio, RemoveEmptyBin, TVar, NProf, SingleLv, MaxLv, PatchType, PrepTime ); // check and store the number of radial bins if ( Prof[0]->NBin > SRC_DLEP_PROF_NBINMAX ) Aux_Error( ERROR_INFO, "Number of radial bins (%d) exceeds the maximum size (%d) !!\n", Prof[0]->NBin, SRC_DLEP_PROF_NBINMAX ); SrcTerms.Dlep_Profile_NBin = Prof[0]->NBin; // store profiles in the host arrays // --> note the typecasting from double to real for (int v=0; v<SRC_DLEP_PROF_NVAR; v++) for (int b=0; b<Prof[v]->NBin; b++) h_SrcDlepProf_Data[v][b] = (real)Prof[v]->Data[b]; for (int b=0; b<Prof[0]->NBin; b++) h_SrcDlepProf_Radius[b] = (real)Prof[0]->Radius[b]; // pass profiles to GPU # ifdef GPU Src_PassData2GPU_Deleptonization(); # endif // uncomment the following lines if the auxiliary arrays have been modified //# ifdef GPU // Src_SetConstMemory_Deleptonization( AuxArray_Flt, AuxArray_Int, // SrcTerms.Dlep_AuxArrayDevPtr_Flt, SrcTerms.Dlep_AuxArrayDevPtr_Int ); //# endif // free memory for (int v=0; v<SRC_DLEP_PROF_NVAR; v++) delete Prof[v]; */ } // FUNCTION : Src_WorkBeforeMajorFunc_Deleptonization #endif #ifdef __CUDACC__ //------------------------------------------------------------------------------------------------------- // Function : Src_PassData2GPU_Deleptonization // Description : Transfer data to GPU // // Note : 1. Invoked by Src_WorkBeforeMajorFunc_Deleptonization() // 2. Use synchronous transfer // // Parameter : None // // Return : None //------------------------------------------------------------------------------------------------------- void Src_PassData2GPU_Deleptonization() { const long Size_Data = sizeof(real)*SRC_DLEP_PROF_NVAR*SRC_DLEP_PROF_NBINMAX; const long Size_Radius = sizeof(real)* SRC_DLEP_PROF_NBINMAX; // use synchronous transfer CUDA_CHECK_ERROR( cudaMemcpy( d_SrcDlepProf_Data, h_SrcDlepProf_Data, Size_Data, cudaMemcpyHostToDevice ) ); CUDA_CHECK_ERROR( cudaMemcpy( d_SrcDlepProf_Radius, h_SrcDlepProf_Radius, Size_Radius, cudaMemcpyHostToDevice ) ); } // FUNCTION : Src_PassData2GPU_Deleptonization #endif // #ifdef __CUDACC__ // ================================ // IV. Set initialization functions // ================================ #ifdef __CUDACC__ # define FUNC_SPACE __device__ static #else # define FUNC_SPACE static #endif FUNC_SPACE SrcFunc_t SrcFunc_Ptr = Src_Deleptonization; //----------------------------------------------------------------------------------------- // Function : Src_SetFunc_Deleptonization // Description : Return the function pointer of the CPU/GPU source-term function // // Note : 1. Invoked by Src_Init_Deleptonization() // 2. Call-by-reference // 3. Use either CPU or GPU but not both of them // // Parameter : SrcFunc_CPU/GPUPtr : CPU/GPU function pointer to be set // // Return : SrcFunc_CPU/GPUPtr //----------------------------------------------------------------------------------------- #ifdef __CUDACC__ __host__ void Src_SetFunc_Deleptonization( SrcFunc_t &SrcFunc_GPUPtr ) { CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &SrcFunc_GPUPtr, SrcFunc_Ptr, sizeof(SrcFunc_t) ) ); } #elif ( !defined GPU ) void Src_SetFunc_Deleptonization( SrcFunc_t &SrcFunc_CPUPtr ) { SrcFunc_CPUPtr = SrcFunc_Ptr; } #endif // #ifdef __CUDACC__ ... elif ... #ifdef __CUDACC__ //------------------------------------------------------------------------------------------------------- // Function : Src_SetConstMemory_Deleptonization // Description : Set the constant memory variables on GPU // // Note : 1. Adopt the suggested approach for CUDA version >= 5.0 // 2. Invoked by Src_Init_Deleptonizatio() and, if necessary, Src_WorkBeforeMajorFunc_Deleptonizatio() // 3. SRC_NAUX_DLEP is defined in Macro.h // // Parameter : AuxArray_Flt/Int : Auxiliary arrays to be copied to the constant memory // DevPtr_Flt/Int : Pointers to store the addresses of constant memory arrays // // Return : c_Src_Dlep_AuxArray_Flt[], c_Src_Dlep_AuxArray_Int[], DevPtr_Flt, DevPtr_Int //--------------------------------------------------------------------------------------------------- void Src_SetConstMemory_Deleptonization( const double AuxArray_Flt[], const int AuxArray_Int[], double *&DevPtr_Flt, int *&DevPtr_Int ) { // copy data to constant memory CUDA_CHECK_ERROR( cudaMemcpyToSymbol( c_Src_Dlep_AuxArray_Flt, AuxArray_Flt, SRC_NAUX_DLEP*sizeof(double) ) ); CUDA_CHECK_ERROR( cudaMemcpyToSymbol( c_Src_Dlep_AuxArray_Int, AuxArray_Int, SRC_NAUX_DLEP*sizeof(int ) ) ); // obtain the constant-memory pointers CUDA_CHECK_ERROR( cudaGetSymbolAddress( (void **)&DevPtr_Flt, c_Src_Dlep_AuxArray_Flt ) ); CUDA_CHECK_ERROR( cudaGetSymbolAddress( (void **)&DevPtr_Int, c_Src_Dlep_AuxArray_Int ) ); } // FUNCTION : Src_SetConstMemory_Deleptonization #endif // #ifdef __CUDACC__ #ifndef __CUDACC__ //----------------------------------------------------------------------------------------- // Function : Src_Init_Deleptonization // Description : Initialize the deleptonization source term // // Note : 1. Set auxiliary arrays by invoking Src_SetAuxArray_*() // --> Copy to the GPU constant memory and store the associated addresses // 2. Set the source-term function by invoking Src_SetFunc_*() // --> Unlike other modules (e.g., EoS), here we use either CPU or GPU but not // both of them // 3. Invoked by Src_Init() // 4. Add "#ifndef __CUDACC__" since this routine is only useful on CPU // // Parameter : None // // Return : None //----------------------------------------------------------------------------------------- void Src_Init_Deleptonization() { // set the auxiliary arrays Src_SetAuxArray_Deleptonization( Src_Dlep_AuxArray_Flt, Src_Dlep_AuxArray_Int ); // copy the auxiliary arrays to the GPU constant memory and store the associated addresses # ifdef GPU Src_SetConstMemory_Deleptonization( Src_Dlep_AuxArray_Flt, Src_Dlep_AuxArray_Int, SrcTerms.Dlep_AuxArrayDevPtr_Flt, SrcTerms.Dlep_AuxArrayDevPtr_Int ); # else SrcTerms.Dlep_AuxArrayDevPtr_Flt = Src_Dlep_AuxArray_Flt; SrcTerms.Dlep_AuxArrayDevPtr_Int = Src_Dlep_AuxArray_Int; # endif // set the major source-term function Src_SetFunc_Deleptonization( SrcTerms.Dlep_FuncPtr ); } // FUNCTION : Src_Init_Deleptonization //----------------------------------------------------------------------------------------- // Function : Src_End_Deleptonization // Description : Release the resources used by the deleptonization source term // // Note : 1. Invoked by Src_End() // 2. Add "#ifndef __CUDACC__" since this routine is only useful on CPU // // Parameter : None // // Return : None //----------------------------------------------------------------------------------------- void Src_End_Deleptonization() { // TBF } // FUNCTION : Src_End_Deleptonization #endif // #ifndef __CUDACC__ #endif // #if ( MODEL == HYDRO )
the_stack
#include <cstdio> #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename DType> __device__ DType get_gradient_weight(DType argmax_h, DType argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { // empty return 0; } argmax_h = max(argmax_h, (DType)0.0f); argmax_w = max(argmax_w, (DType)0.0f); int argmax_h_low = (int)argmax_h; int argmax_w_low = (int)argmax_w; int argmax_h_high; int argmax_w_high; if (argmax_h_low >= height - 1) { argmax_h_high = argmax_h_low = height - 1; argmax_h = (DType)argmax_h_low; } else { argmax_h_high = argmax_h_low + 1; } if (argmax_w_low >= width - 1) { argmax_w_high = argmax_w_low = width - 1; argmax_w = (DType)argmax_w_low; } else { argmax_w_high = argmax_w_low + 1; } DType weight = 0; if (h == argmax_h_low) { if (w == argmax_w_low) { weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); } } else if (h == argmax_h_high) { if (w == argmax_w_low) { weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); } else if (w == argmax_w_high) { weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); } } return weight; } template <typename DType> __global__ void depthconv_im2col_gpu_kernel( const int n, const DType *data_im, const DType *data_depth, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int height_col, const int width_col, DType *data_col) { // CxHxW --> (khxkw)x(CxHxW) CUDA_KERNEL_LOOP(index, n) { const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int c_im = (index / width_col) / height_col; const int c_col = c_im * kernel_h * kernel_w; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; DType *data_col_ptr = data_col + (c_col * height_col + h_col) * width_col + w_col; const DType *data_im_ptr = data_im + (c_im * height + h_in) * width + w_in; const DType *data_depth_ptr = data_depth + h_in * width + w_in; DType Di = 0.; bool valid = true; if ((h_in + dilation_h * (kernel_h - 1) / 2)>=0 && w_in + dilation_w * (kernel_w - 1) / 2 >= 0 && (h_in + dilation_h * (kernel_h - 1) / 2) < height && w_in + dilation_w * (kernel_w - 1) / 2 < width) Di = data_depth[(h_in + dilation_h * (kernel_h - 1) / 2) * width + w_in + dilation_w * (kernel_w - 1) / 2]; else valid = false; //const DType Di = data_depth[(h_in + (kernel_h - 1) / 2 + dilation_h - 1) * width + (w_in + (kernel_w - 1) / 2 + dilation_w - 1)]; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { DType val = static_cast<DType>(0); DType Dval = static_cast<DType>(0); const int h_im = h_in + i * dilation_h; const int w_im = w_in + j * dilation_w; if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { const int map_h = i * dilation_h; const int map_w = j * dilation_w; val = data_im_ptr[map_h * width + map_w]; if (valid) Dval = data_depth_ptr[map_h * width + map_w]; //printf("%f,%d\n",Dval,h_in * width + w_in+map_h * width + map_w - ((h_in + (kernel_h - 1) / 2 + dilation_h - 1) * width + (w_in + (kernel_w - 1) / 2 + dilation_w - 1))); // printf("Di-Dval: %f, %f\n", Di, Dval); // if (exp(-abs(Di - Dval))<0.2) // printf("Di-Dval: %f\n", exp(-abs(Di - Dval))); val *= exp(-abs(Di - Dval)); } *data_col_ptr = val; data_col_ptr += height_col * width_col; } } } } template <typename DType> void depthconv_im2col(cudaStream_t stream, const DType *data_im, const DType *data_depth, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, DType *data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col; // Launch depthconv_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_im, data_depth, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col, width_col, data_col); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in depthconv_im2col: %s\n", cudaGetErrorString(err)); // TODO(BZ) panic } } template void depthconv_im2col<float>( cudaStream_t stream, const float *data_im, const float *data_depth, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, float *data_col); /*template void depthconv_im2col<double>( cudaStream_t stream, const double *data_im, const double *data_depth, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, double *data_col);*/ template <typename DType> __global__ void depthconv_col2im_gpu_kernel( const int n, const DType *data_col, const DType *data_depth, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int height_col, const int width_col, DType *grad_im) { CUDA_KERNEL_LOOP(index, n) { for (int ii = 0; ii < kernel_h * kernel_w; ii++){ int ii_index = ii + index * kernel_h * kernel_w; const int j = (ii_index / width_col / height_col) % kernel_w; const int i = (ii_index / width_col / height_col / kernel_w) % kernel_h; const int c = ii_index / width_col / height_col / kernel_w / kernel_h; // compute the start and end of the output int w_out = ii_index % width_col; int h_out = (ii_index / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; //const DType cur_inv_h_data = h_in + i * dilation_h; //const DType cur_inv_w_data = w_in + j * dilation_w; const DType cur_top_grad = data_col[ii_index]; const int cur_h = h_in + i * dilation_h;//(int)cur_inv_h_data; const int cur_w = w_in + j * dilation_w;//(int)cur_inv_w_data; DType Di = 0.; bool valid = true; if ((h_in + dilation_h * (kernel_h - 1) / 2)>=0 && w_in + dilation_w * (kernel_w - 1) / 2 >= 0 && (h_in + dilation_h * (kernel_h - 1) / 2) < height && w_in + dilation_w * (kernel_w - 1) / 2 < width) Di = data_depth[(h_in + dilation_h * (kernel_h - 1) / 2) * width + w_in + dilation_w * (kernel_w - 1) / 2]; else valid = false; // const DType Di = data_depth[(h_in + dilation_h * (kernel_h - 1) / 2) * width + w_in + dilation_w * (kernel_w - 1) / 2]; //const DType Di = data_depth[(h_in + (kernel_h - 1) / 2 + dilation_h - 1) * width + w_in + (kernel_w - 1) / 2 + dilation_w - 1]; //printf("%d\n",(h_in + dilation_h * (kernel_h - 1) / 2) * width + w_in + dilation_w * (kernel_w - 1) / 2); //data_depth[cur_h * width + cur_w]; // data_depth[(h_in + (kernel_h - 1) / 2 + dilation_h - 1) * width + w_in + (kernel_w - 1) / 2 + dilation_w - 1]; int cur_bottom_grad_pos = (c * height + cur_h) * width + cur_w; int cur_bottom_depth_pos= (cur_h) * width + cur_w; //printf("%d,%d,%d,%d\n",i,j,((h_in + dilation_h * (kernel_h - 1) / 2) * width + w_in + dilation_w * (kernel_w - 1) / 2-cur_bottom_depth_pos),dilation_h); //printf("%d\n",((h_in + dilation_h * (kernel_h - 1) / 2) * width + w_in + dilation_w * (kernel_w - 1) / 2-cur_bottom_depth_pos)); DType Dval = 0.; if (valid) Dval = data_depth[cur_bottom_depth_pos]; if (cur_h >= 0 && cur_h < height && cur_w >= 0 && cur_w < width) atomicAdd(grad_im + cur_bottom_grad_pos, cur_top_grad * exp(-abs(Di - Dval))); } } } template <typename DType> void depthconv_col2im(cudaStream_t stream, const DType *data_col, const DType *data_depth, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, DType *grad_im) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col; // int channel_per_depthconv_group = channels / depthconv_group; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. depthconv_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col, data_depth, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col, width_col, grad_im); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in depthconv_col2im: %s\n", cudaGetErrorString(err)); // TODO(BZ) panic } } template void depthconv_col2im<float>( cudaStream_t stream, const float *data_col, const float *data_depth, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, float *grad_im); /*template void depthconv_col2im<double>( cudaStream_t stream, const double *data_col, const double *data_depth, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, double *grad_im);*/
the_stack
#pragma once #include <cstdint> #include "data_spec_packed.cuh" #include "random_util.cuh" namespace { namespace device { template<class data_type_t, class voxel_index_t> __device__ __inline__ float trilerp_one( const data_type_t* __restrict__ data, int reso, int stride, const voxel_index_t* __restrict__ l, const float* __restrict__ pos, const int idx) { const int offz = stride; const int offy = reso * stride; const int offx = reso * offy; const data_type_t* __restrict__ data_ptr = data + (offx * l[0] + offy * l[1] + offz * l[2] + idx); const float ix0y0 = lerp(data_ptr[0], data_ptr[offz], pos[2]); const float ix0y1 = lerp(data_ptr[offy], data_ptr[offy + offz], pos[2]); const float ix0 = lerp(ix0y0, ix0y1, pos[1]); const float ix1y0 = lerp(data_ptr[offx], data_ptr[offx + offz], pos[2]); const float ix1y1 = lerp(data_ptr[offy + offx], data_ptr[offy + offx + offz], pos[2]); const float ix1 = lerp(ix1y0, ix1y1, pos[1]); return lerp(ix0, ix1, pos[0]); } template<class data_type_t, class voxel_index_t> __device__ __inline__ void trilerp_backward_one( data_type_t* __restrict__ grad_data, int reso, int stride, const voxel_index_t* __restrict__ l, const float* __restrict__ pos, float grad_out, const int idx) { const float ay = 1.f - pos[1], az = 1.f - pos[2]; float xo = (1.0f - pos[0]) * grad_out; const int offz = stride; const int offy = reso * stride; const int offx = reso * offy; data_type_t* __restrict__ grad_data_ptr = grad_data + (offx * l[0] + offy * l[1] + offz * l[2] + idx); #define ADD_WT(u, val) atomicAdd(&grad_data_ptr[u], val) ADD_WT(0, ay * az * xo); ADD_WT(offz, ay * pos[2] * xo); ADD_WT(offy, pos[1] * az * xo); ADD_WT(offy + offz, pos[1] * pos[2] * xo); xo = pos[0] * grad_out; ADD_WT(offx, ay * az * xo); ADD_WT(offx + offz, ay * pos[2] * xo); ADD_WT(offx + offy, pos[1] * az * xo); ADD_WT(offx + offy + offz, pos[1] * pos[2] * xo); #undef ADD_WT } // trilerp with links template<class data_type_t, class voxel_index_t> __device__ __inline__ float trilerp_cuvol_one( const int32_t* __restrict__ links, const data_type_t* __restrict__ data, int offx, int offy, size_t stride, const voxel_index_t* __restrict__ l, const float* __restrict__ pos, const int idx) { const int32_t* __restrict__ link_ptr = links + (offx * l[0] + offy * l[1] + l[2]); #define MAYBE_READ_LINK(u) ((link_ptr[u] >= 0) ? data[link_ptr[u] * stride + idx] : 0.f) const float ix0y0 = lerp(MAYBE_READ_LINK(0), MAYBE_READ_LINK(1), pos[2]); const float ix0y1 = lerp(MAYBE_READ_LINK(offy), MAYBE_READ_LINK(offy + 1), pos[2]); const float ix0 = lerp(ix0y0, ix0y1, pos[1]); const float ix1y0 = lerp(MAYBE_READ_LINK(offx), MAYBE_READ_LINK(offx + 1), pos[2]); const float ix1y1 = lerp(MAYBE_READ_LINK(offy + offx), MAYBE_READ_LINK(offy + offx + 1), pos[2]); const float ix1 = lerp(ix1y0, ix1y1, pos[1]); return lerp(ix0, ix1, pos[0]); #undef MAYBE_READ_LINK } template<class data_type_t, class voxel_index_t> __device__ __inline__ void trilerp_backward_cuvol_one( const int32_t* __restrict__ links, data_type_t* __restrict__ grad_data, int offx, int offy, size_t stride, const voxel_index_t* __restrict__ l, const float* __restrict__ pos, float grad_out, const int idx) { const float ay = 1.f - pos[1], az = 1.f - pos[2]; float xo = (1.0f - pos[0]) * grad_out; const int32_t* __restrict__ link_ptr = links + (offx * l[0] + offy * l[1] + l[2]); #define MAYBE_ADD_LINK(u, val) if (link_ptr[u] >= 0) { \ atomicAdd(&grad_data[link_ptr[u] * stride + idx], val); \ } MAYBE_ADD_LINK(0, ay * az * xo); MAYBE_ADD_LINK(1, ay * pos[2] * xo); MAYBE_ADD_LINK(offy, pos[1] * az * xo); MAYBE_ADD_LINK(offy + 1, pos[1] * pos[2] * xo); xo = pos[0] * grad_out; MAYBE_ADD_LINK(offx + 0, ay * az * xo); MAYBE_ADD_LINK(offx + 1, ay * pos[2] * xo); MAYBE_ADD_LINK(offx + offy, pos[1] * az * xo); MAYBE_ADD_LINK(offx + offy + 1, pos[1] * pos[2] * xo); #undef MAYBE_ADD_LINK } template<class data_type_t, class voxel_index_t> __device__ __inline__ void trilerp_backward_cuvol_one_density( const int32_t* __restrict__ links, data_type_t* __restrict__ grad_data_out, bool* __restrict__ mask_out, int offx, int offy, const voxel_index_t* __restrict__ l, const float* __restrict__ pos, float grad_out) { const float ay = 1.f - pos[1], az = 1.f - pos[2]; float xo = (1.0f - pos[0]) * grad_out; const int32_t* __restrict__ link_ptr = links + (offx * l[0] + offy * l[1] + l[2]); #define MAYBE_ADD_LINK_DEN(u, val) if (link_ptr[u] >= 0) { \ atomicAdd(&grad_data_out[link_ptr[u]], val); \ if (mask_out != nullptr) \ mask_out[link_ptr[u]] = true; \ } MAYBE_ADD_LINK_DEN(0, ay * az * xo); MAYBE_ADD_LINK_DEN(1, ay * pos[2] * xo); MAYBE_ADD_LINK_DEN(offy, pos[1] * az * xo); MAYBE_ADD_LINK_DEN(offy + 1, pos[1] * pos[2] * xo); xo = pos[0] * grad_out; MAYBE_ADD_LINK_DEN(offx + 0, ay * az * xo); MAYBE_ADD_LINK_DEN(offx + 1, ay * pos[2] * xo); MAYBE_ADD_LINK_DEN(offx + offy, pos[1] * az * xo); MAYBE_ADD_LINK_DEN(offx + offy + 1, pos[1] * pos[2] * xo); #undef MAYBE_ADD_LINK_DEN } // Trilerp with xy links & wrapping (background) template<class data_type_t, class voxel_index_t> __device__ __inline__ float trilerp_bg_one( const int32_t* __restrict__ links, const data_type_t* __restrict__ data, int reso, int nlayers, int nchannels, const voxel_index_t* __restrict__ l, const float* __restrict__ pos, const int idx) { #define MAYBE_READ_LINK2(varname, u) \ float varname; \ { \ int link = links[u]; \ if (link >= 0) { \ const float* __restrict__ dptr = &data[(link * nlayers + l[2]) * nchannels + idx]; \ varname = lerp(dptr[0], dptr[nchannels], pos[2]); \ } else { \ varname = 0.f; \ } \ } const int ny = l[1] < (reso - 1) ? (l[1] + 1) : 0; MAYBE_READ_LINK2(ix0y0, reso * l[0] + l[1]); MAYBE_READ_LINK2(ix0y1, reso * l[0] + ny); const float ix0 = lerp(ix0y0, ix0y1, pos[1]); const int nx = l[0] < (2 * reso - 1) ? (l[0] + 1) : 0; MAYBE_READ_LINK2(ix1y0, reso * nx + l[1]); MAYBE_READ_LINK2(ix1y1, reso * nx + ny); const float ix1 = lerp(ix1y0, ix1y1, pos[1]); return lerp(ix0, ix1, pos[0]); #undef MAYBE_READ_LINK2 } template<class data_type_t, class voxel_index_t> __device__ __inline__ void trilerp_backward_bg_one( const int32_t* __restrict__ links, data_type_t* __restrict__ grad_data_out, bool* __restrict__ mask_out, int reso, int nlayers, int nchannels, const voxel_index_t* __restrict__ l, const float* __restrict__ pos, float grad_out, const int idx) { const float ay = 1.f - pos[1], az = 1.f - pos[2]; #define MAYBE_ADD_LINK2(u, valexpr) \ { \ int link = links[u]; \ if (link >= 0) { \ link *= nlayers; \ float* __restrict__ gdptr = &grad_data_out[(link + l[2]) \ * nchannels + idx]; \ const float val = (valexpr); \ atomicAdd(gdptr, val * az); \ atomicAdd(gdptr + nchannels, val * pos[2]); \ if (mask_out != nullptr) { \ bool* __restrict__ mptr = &mask_out[link + l[2]]; \ mptr[0] = mptr[1] = true; \ } \ } \ } const int ny = l[1] < (reso - 1) ? (l[1] + 1) : 0; float xo = (1.0f - pos[0]) * grad_out; MAYBE_ADD_LINK2(reso * l[0] + l[1], ay * xo); MAYBE_ADD_LINK2(reso * l[0] + ny, pos[1] * xo); xo = pos[0] * grad_out; const int nx = l[0] < (2 * reso - 1) ? (l[0] + 1) : 0; MAYBE_ADD_LINK2(reso * nx + l[1], ay * xo); MAYBE_ADD_LINK2(reso * nx + ny, pos[1] * xo); #undef MAYBE_READ_LINK2 } // Compute the amount to skip for negative link values __device__ __inline__ float compute_skip_dist( SingleRaySpec& __restrict__ ray, const int32_t* __restrict__ links, int offx, int offy, int pos_offset = 0) { const int32_t link_val = links[offx * (ray.l[0] + pos_offset) + offy * (ray.l[1] + pos_offset) + (ray.l[2] + pos_offset)]; if (link_val >= -1) return 0.f; // Not worth const uint32_t dist = -link_val; const uint32_t cell_ul_shift = (dist - 1); const uint32_t cell_side_len = (1 << cell_ul_shift) - 1.f; // AABB intersection // Consider caching the invdir for the ray float tmin = 0.f; float tmax = 1e9f; #pragma unroll for (int i = 0; i < 3; ++i) { int ul = (((ray.l[i] + pos_offset) >> cell_ul_shift) << cell_ul_shift); ul -= ray.l[i] + pos_offset; const float invdir = 1.0 / ray.dir[i]; const float t1 = (ul - ray.pos[i] + pos_offset) * invdir; const float t2 = (ul + cell_side_len - ray.pos[i] + pos_offset) * invdir; if (ray.dir[i] != 0.f) { tmin = max(tmin, min(t1, t2)); tmax = min(tmax, max(t1, t2)); } } // const uint32_t cell_ul_shift = 1 - dist; // const uint32_t cell_br_shift = -cell_ul_shift; // // // AABB intersection // // Consider caching the invdir for the ray // float tmin = 0.f; // float tmax = 1e9f; // #pragma unroll // for (int i = 0; i < 3; ++i) { // const float invdir = 1.0 / ray.dir[i]; // const float t1 = (cell_ul_shift - ray.pos[i] + pos_offset) * invdir; // const float t2 = (cell_br_shift - ray.pos[i] + pos_offset) * invdir; // if (ray.dir[i] != 0.f) { // tmin = max(tmin, min(t1, t2)); // tmax = min(tmax, max(t1, t2)); // } // } if (tmin > 0.f) { // Somehow the origin is not in the cube // Should not happen for distance transform // If using geometric distances: // will happen near the lowest vertex of a cell, // since l is always the lowest neighbor return 0.f; } return tmax; } // Spherical functions // SH Coefficients from https://github.com/google/spherical-harmonics __device__ __constant__ const float C0 = 0.28209479177387814; __device__ __constant__ const float C1 = 0.4886025119029199; __device__ __constant__ const float C2[] = { 1.0925484305920792, -1.0925484305920792, 0.31539156525252005, -1.0925484305920792, 0.5462742152960396 }; __device__ __constant__ const float C3[] = { -0.5900435899266435, 2.890611442640554, -0.4570457994644658, 0.3731763325901154, -0.4570457994644658, 1.445305721320277, -0.5900435899266435 }; // __device__ __constant__ const float C4[] = { // 2.5033429417967046, // -1.7701307697799304, // 0.9461746957575601, // -0.6690465435572892, // 0.10578554691520431, // -0.6690465435572892, // 0.47308734787878004, // -1.7701307697799304, // 0.6258357354491761, // }; __device__ __inline__ void calc_sh( const int basis_dim, const float* __restrict__ dir, float* __restrict__ out) { out[0] = C0; const float x = dir[0], y = dir[1], z = dir[2]; const float xx = x * x, yy = y * y, zz = z * z; const float xy = x * y, yz = y * z, xz = x * z; switch (basis_dim) { // 16 not supported rn due to warp size // case 16: // out[9] = C3[0] * y * (3 * xx - yy); // out[10] = C3[1] * xy * z; // out[11] = C3[2] * y * (4 * zz - xx - yy); // out[12] = C3[3] * z * (2 * zz - 3 * xx - 3 * yy); // out[13] = C3[4] * x * (4 * zz - xx - yy); // out[14] = C3[5] * z * (xx - yy); // out[15] = C3[6] * x * (xx - 3 * yy); // [[fallthrough]]; case 9: out[4] = C2[0] * xy; out[5] = C2[1] * yz; out[6] = C2[2] * (2.0 * zz - xx - yy); out[7] = C2[3] * xz; out[8] = C2[4] * (xx - yy); [[fallthrough]]; case 4: out[1] = -C1 * y; out[2] = C1 * z; out[3] = -C1 * x; } } __device__ __inline__ void calc_sphfunc( const PackedSparseGridSpec& grid, const int lane_id, const int ray_id, const float* __restrict__ dir, // Pre-normalized float* __restrict__ out) { // Placeholder if (grid.basis_type == BASIS_TYPE_3D_TEXTURE) { float p[3]; int32_t l[3]; for (int j = 0; j < 3; ++j) { // Note: this is align_corners=True behavior // (vs align_corners=False in the sigma/coeff grid trilerp) p[j] = (dir[j] * 0.5f + 0.5f) * (grid.basis_reso - 1.f); p[j] = min(max(p[j], 0.f), grid.basis_reso - 1.f); l[j] = min(static_cast<int32_t>(p[j]), grid.basis_reso - 2); p[j] -= static_cast<float>(l[j]); } if (lane_id < grid.basis_dim) { out[lane_id] = fmaxf( trilerp_one( grid.basis_data, grid.basis_reso, grid.basis_dim, l, p, lane_id), 0.f); } } else if (grid.basis_type == BASIS_TYPE_MLP) { const float* __restrict__ basis_ptr = grid.basis_data + grid.basis_dim * ray_id; if (lane_id < grid.basis_dim) { out[lane_id] = _SIGMOID(basis_ptr[lane_id]); } } else { calc_sh(grid.basis_dim, dir, out); } } __device__ __inline__ void calc_sphfunc_backward( const PackedSparseGridSpec& grid, const int lane_id, const int ray_id, const float* __restrict__ dir, // Pre-normalized const float* __restrict__ output_saved, const float* __restrict__ grad_output, float* __restrict__ grad_basis_data) { if (grad_basis_data == nullptr) return; // Placeholder if (grid.basis_type == BASIS_TYPE_3D_TEXTURE) { float p[3]; int32_t l[3]; for (int j = 0; j < 3; ++j) { // Note: this is align_corners=True behavior // (vs align_corners=False in the sigma/coeff grid trilerp) p[j] = (dir[j] * 0.5f + 0.5f) * (grid.basis_reso - 1.f); p[j] = min(max(p[j], 0.f), grid.basis_reso - 1.f); l[j] = min(static_cast<int32_t>(p[j]), grid.basis_reso - 2); p[j] -= static_cast<float>(l[j]); } __syncwarp((1U << grid.sh_data_dim) - 1); if (lane_id < grid.basis_dim && output_saved[lane_id] > 0.f) { trilerp_backward_one<float, int32_t>(grad_basis_data, grid.basis_reso, grid.basis_dim, l, p, grad_output[lane_id], lane_id); } } else if (grid.basis_type == BASIS_TYPE_MLP) { float* __restrict__ grad_basis_ptr = grad_basis_data + grid.basis_dim * ray_id; if (lane_id < grid.basis_dim) { const float sig = output_saved[lane_id]; grad_basis_ptr[lane_id] = sig * (1.f - sig) * grad_output[lane_id]; } } else { // nothing needed } } __device__ __inline__ float _intersect_aabb_unit( const float* __restrict__ cen, const float* __restrict__ invdir) { // Intersect unit AABB float tmax = 1e9f; float t1, t2; #pragma unroll for (int i = 0; i < 3; ++i) { t1 = - cen[i] * invdir[i]; t2 = t1 + invdir[i]; tmax = min(tmax, max(t1, t2)); } return tmax; } __device__ __inline__ float _get_delta_scale( const float* __restrict__ scaling, float* __restrict__ dir) { dir[0] *= scaling[0]; dir[1] *= scaling[1]; dir[2] *= scaling[2]; float delta_scale = _rnorm(dir); dir[0] *= delta_scale; dir[1] *= delta_scale; dir[2] *= delta_scale; return delta_scale; } __device__ __inline__ static void _normalize( float* __restrict__ dir) { const float rnorm = _rnorm(dir); dir[0] *= rnorm; dir[1] *= rnorm; dir[2] *= rnorm; } __device__ __inline__ static void _unitvec2equirect( const float* __restrict__ unit_dir, int reso, float* __restrict__ xy) { const float lat = asinf(unit_dir[1]); const float lon = atan2f(unit_dir[0], unit_dir[2]); xy[0] = reso * 2 * (0.5 + lon * 0.5 * M_1_PI); xy[1] = reso * (0.5 - lat * M_1_PI); } __device__ __inline__ static void _equirect2unitvec( float x, float y, int reso, float* __restrict__ unit_dir) { const float lon = (x * (1.0 / (reso * 2)) - 0.5) * (2 * M_PI); const float lat = -(y * (1.0 / reso) - 0.5) * M_PI; const float coslat = cosf(lat); unit_dir[0] = coslat * sinf(lon); unit_dir[1] = sinf(lat); unit_dir[2] = coslat * cosf(lon); } __device__ __inline__ static void world2ndc( const PackedCameraSpec& __restrict__ cam, float* __restrict__ dir, float* __restrict__ cen, float near = 1.f) { // Shift ray origins to near plane, not sure if needed const float t = (near - cen[2]) / dir[2]; #pragma unroll 3 for (int i = 0; i < 3; ++i) { cen[i] = fmaf(t, dir[i], cen[i]); } dir[0] = cam.ndc_coeffx * (dir[0] / dir[2] - cen[0] / cen[2]); dir[1] = cam.ndc_coeffy * (dir[1] / dir[2] - cen[1] / cen[2]); dir[2] = 2 * near / cen[2]; cen[0] = cam.ndc_coeffx * (cen[0] / cen[2]); cen[1] = cam.ndc_coeffy * (cen[1] / cen[2]); cen[2] = 1 - 2 * near / cen[2]; _normalize(dir); } __device__ __inline__ void cam2world_ray( int ix, int iy, const PackedCameraSpec& __restrict__ cam, // Outputs float* __restrict__ dir, float* __restrict__ origin) { // OpenCV convention (contrary to svox 1, which uses OpenGL) float x = (ix + 0.5f - cam.cx) / cam.fx; float y = (iy + 0.5f - cam.cy) / cam.fy; float z = sqrtf(x * x + y * y + 1.0); x /= z; y /= z; z = 1.0f / z; dir[0] = cam.c2w[0][0] * x + cam.c2w[0][1] * y + cam.c2w[0][2] * z; dir[1] = cam.c2w[1][0] * x + cam.c2w[1][1] * y + cam.c2w[1][2] * z; dir[2] = cam.c2w[2][0] * x + cam.c2w[2][1] * y + cam.c2w[2][2] * z; origin[0] = cam.c2w[0][3]; origin[1] = cam.c2w[1][3]; origin[2] = cam.c2w[2][3]; if (cam.ndc_coeffx > 0.f) world2ndc(cam, dir, origin); } struct ConcentricSpheresIntersector { __device__ ConcentricSpheresIntersector( const float* __restrict__ origin, const float* __restrict__ dir) { q2a = 2 * _dot(dir, dir); qb = 2 * _dot(origin, dir); f = qb * qb - 2 * q2a * _dot(origin, origin); } // Get the far intersection, which we want for rendering MSI __device__ bool intersect(float r, float* __restrict__ out, bool near=false) { float det = _det(r); if (det < 0) return false; if (near) { *out = (-qb - sqrtf(det)) / q2a; } else { *out = (-qb + sqrtf(det)) / q2a; } return true; } __device__ __host__ float _det (float r) { return f + 2 * q2a * r * r; } float q2a, qb, f; }; __device__ __inline__ void ray_find_bounds( SingleRaySpec& __restrict__ ray, const PackedSparseGridSpec& __restrict__ grid, const RenderOptions& __restrict__ opt, uint32_t ray_id) { // Warning: modifies ray.origin transform_coord(ray.origin, grid._scaling, grid._offset); // Warning: modifies ray.dir ray.world_step = _get_delta_scale(grid._scaling, ray.dir) * opt.step_size; if (opt.use_spheric_clip) { // Horrible hack const float sphere_scaling[3] { 2.f / float(grid.size[0]), 2.f / float(grid.size[1]), 2.f / float(grid.size[2]) }; float sph_origin[3], sph_dir[3]; #pragma unroll 3 for (int i = 0; i < 3; ++i) { sph_origin[i] = fmaf(ray.origin[i] + 0.5f, sphere_scaling[i], -1.f); sph_dir[i] = ray.dir[i] * sphere_scaling[i]; } ConcentricSpheresIntersector csi(sph_origin, sph_dir); if (!csi.intersect(1.f, &ray.tmax) || !csi.intersect(1.f - opt.near_clip, &ray.tmin, true)) { ray.tmin = 1e-9f; ray.tmax = 0.f; } } else { ray.tmin = opt.near_clip / ray.world_step * opt.step_size; ray.tmax = 2e3f; for (int i = 0; i < 3; ++i) { const float invdir = 1.0 / ray.dir[i]; const float t1 = (-0.5f - ray.origin[i]) * invdir; const float t2 = (grid.size[i] - 0.5f - ray.origin[i]) * invdir; if (ray.dir[i] != 0.f) { ray.tmin = max(ray.tmin, min(t1, t2)); ray.tmax = min(ray.tmax, max(t1, t2)); } } } // if (opt.randomize && opt.random_sigma_std > 0.0) { // // Seed the RNG // ray.rng.x = opt._m1 ^ ray_id; // ray.rng.y = opt._m2 ^ ray_id; // ray.rng.z = opt._m3 ^ ray_id; // } } __device__ __inline__ void ray_find_bounds_bg( SingleRaySpec& __restrict__ ray, const PackedSparseGridSpec& __restrict__ grid, const RenderOptions& __restrict__ opt, uint32_t ray_id) { // Warning: modifies ray.origin transform_coord(ray.origin, grid._scaling, grid._offset); // Warning: modifies ray.dir ray.world_step = _get_delta_scale(grid._scaling, ray.dir);// * opt.step_size; const float sphere_scaling[3] { 2.f / float(grid.size[0]), 2.f / float(grid.size[1]), 2.f / float(grid.size[2]) }; #pragma unroll 3 for (int i = 0; i < 3; ++i) { ray.origin[i] = fmaf(ray.origin[i] + 0.5f, sphere_scaling[i], -1.f); ray.dir[i] = ray.dir[i] * sphere_scaling[i]; } const float inorm = _rnorm(ray.dir); ray.world_step *= inorm; #pragma unroll 3 for (int i = 0; i < 3; ++i) { ray.dir[i] *= inorm; } // float q2a = 2 * _dot(ray.dir, ray.dir); // float qb = 2 * _dot(ray.origin, ray.dir); // float f = qb * qb - 2 * q2a * _dot(ray.origin, ray.origin); // const float det = f + 2 * q2a * opt.background_msi_scale * opt.background_msi_scale; // // if (det < 0.f) { // ray.tmin = opt.background_msi_scale; // } else { // ray.tmin = (-qb + sqrtf(det)) / q2a; // } // if (opt.randomize && opt.random_sigma_std_background > 0) { // // Seed the RNG (hacks) // ray.rng.x = opt._m2 ^ (ray_id - 1); // ray.rng.y = opt._m3 ^ (ray_id - 1); // ray.rng.z = opt._m1 ^ (ray_id - 1); // } } } // namespace device } // namespace
the_stack
namespace lightseq { namespace cuda { /** @brief: ker_norm_layer_prepost layer normalization, modify input according to is_post_ln @thread gridDim.x = batch_size * batch_seq_len blockDim.x = max_thread_per_block @param input: [batch_size, batch_seq_len, hidden_size] output: [batch_size, batch_seq_len, hidden_size] scale: [hidden_size] bias: [hidden_size] */ template <typename T> __global__ void ker_norm_layer_prepost(T* input, T* output, const T* scale, const T* bias, const int hidden_size, bool is_post_ln) { uint block_start = blockIdx.x * hidden_size; uint start = block_start + threadIdx.x; uint end = block_start + hidden_size; float val = 0.0; for (uint i = start; i < end; i += blockDim.x) { val += input[i]; } // step 0. compute mean __shared__ float s_mean; float reduce_res = blockReduceSum<float>(val); if (threadIdx.x == 0) s_mean = reduce_res / float(hidden_size); __syncthreads(); // step 1. compute variance val = 0.0; for (uint i = start; i < end; i += blockDim.x) { float tmp = input[i] - s_mean; val += tmp * tmp; } __shared__ float s_var; reduce_res = blockReduceSum(val); if (threadIdx.x == 0) s_var = rsqrtf(reduce_res / float(hidden_size) + epsilon); __syncthreads(); // step 2. layer norm for (uint i = start; i < end; i += blockDim.x) { val = input[i] - s_mean; output[i] = val * s_var * __ldg(&scale[i - block_start]) + __ldg(&bias[i - block_start]); if (is_post_ln) { input[i] = output[i]; } } } template <> __global__ void ker_norm_layer_prepost<__half>(__half* input, __half* output, const __half* scale, const __half* bias, const int half_hidden_size, bool is_post_ln) { uint block_start = blockIdx.x * half_hidden_size; uint start = block_start + threadIdx.x; uint end = blockIdx.x * half_hidden_size + half_hidden_size; half2* pinput = (half2*)input; half2* poutput = (half2*)output; const half2* pscale = (const half2*)scale; const half2* pbias = (const half2*)bias; float mean_dim = float(half_hidden_size) * 2.f; float val = 0.0; // step 0. compute mean for (uint i = start; i < end; i += blockDim.x) { float2 local_f2 = safe_half2_to_float2(pinput[i]); val += local_f2.x + local_f2.y; } __shared__ float s_mean; float reduce_res = blockReduceSum<float>(val); if (threadIdx.x == 0) s_mean = reduce_res / mean_dim; __syncthreads(); // step 1. compute variance val = 0.0; for (uint i = start; i < end; i += blockDim.x) { float2 local_f2 = safe_half2_to_float2(pinput[i]); float tmpx = local_f2.x - s_mean; float tmpy = local_f2.y - s_mean; val += tmpx * tmpx + tmpy * tmpy; } __shared__ float s_var; reduce_res = blockReduceSum(val); if (threadIdx.x == 0) s_var = rsqrtf(reduce_res / mean_dim + epsilon); __syncthreads(); // step 2. layer norm for (uint i = start; i < end; i += blockDim.x) { float2 scale_val = __half22float2(__ldg(&pscale[i - block_start])); float2 bias_val = __half22float2(__ldg(&pbias[i - block_start])); float2 local_f2 = safe_half2_to_float2(pinput[i]); local_f2.x = (local_f2.x - s_mean) * s_var * scale_val.x + bias_val.x; local_f2.y = (local_f2.y - s_mean) * s_var * scale_val.y + bias_val.y; poutput[i] = __float22half2_rn(local_f2); if (is_post_ln) { pinput[i] = poutput[i]; } } } template <typename T> void ker_norm_layer_prepost_launcher(int token_num, int hidden_size, cudaStream_t stream, T* input, T* output, const T* scale, const T* bias, const int max_thread_per_block, bool is_post_ln) { ker_norm_layer_prepost<T><<<token_num, max_thread_per_block, 0, stream>>>( input, output, scale, bias, hidden_size, is_post_ln); } template <> void ker_norm_layer_prepost_launcher<__half>( int token_num, int hidden_size, cudaStream_t stream, __half* input, __half* output, const __half* scale, const __half* bias, const int max_thread_per_block, bool is_post_ln) { ker_norm_layer_prepost<__half> <<<token_num, max_thread_per_block, 0, stream>>>( input, output, scale, bias, hidden_size / 2, is_post_ln); } template void ker_norm_layer_prepost_launcher<float>( int token_num, int hidden_size, cudaStream_t stream, float* input, float* output, const float* scale, const float* bias, const int max_thread_per_block, bool is_post_ln); template void ker_norm_layer_prepost_launcher<__half>( int token_num, int hidden_size, cudaStream_t stream, __half* input, __half* output, const __half* scale, const __half* bias, const int max_thread_per_block, bool is_post_ln); /** @brief: ker_softmax_topk_router softmax of gate output and route each token to topk experts Currently, support topk = 1, 2 @thread gridDim.x = batch_token_num blockDim.x = first multiple of WARP_SIZE greater than expert_num @param gate_out: [batch_token_num, expert_num] score_routed: [expert_num, max_token_num] score if the token is routed to the expert, else -1.0 expert_routed: [max_token_num * topk] ids of two routed experts. */ template <typename T> __global__ void ker_softmax_topk_router(const T* gate_out, float* score_routed, int* expert_routed, int batch_token_num, int expert_num, int max_token_num, int topk) { int token_id = blockIdx.x, expert_id = threadIdx.x; // softmax float val = expert_id < expert_num ? (float)__ldg(&gate_out[token_id * expert_num + expert_id]) : CUDA_FLOAT_INF_NEG; float max_val = blockReduceMax(val); __shared__ float smax; if (threadIdx.x == 0) smax = max_val; __syncthreads(); float score = expert_id < expert_num ? expf(val - smax) : 0.f; float rsum = blockReduceSum(score); __shared__ float ssum; if (threadIdx.x == 0) ssum = rsum; __syncthreads(); score /= ssum; // routing int idx = expert_id * max_token_num + token_id; score_routed[idx] = -1.0f; __shared__ int first_expert; __shared__ float first_score; if (val == smax) { first_expert = expert_id; } __syncthreads(); if (expert_id == first_expert) { first_score = score; expert_routed[token_id] = expert_id; val = CUDA_FLOAT_INF_NEG; } if (topk == 1) { if (expert_id == first_expert) { score_routed[idx] = first_score; } return; } max_val = blockReduceMax(val); if (threadIdx.x == 0) smax = max_val; __syncthreads(); __shared__ int second_expert; if (val == smax) { second_expert = expert_id; } __syncthreads(); if (expert_id == second_expert) { expert_routed[token_id + max_token_num] = expert_id; score_routed[first_expert * max_token_num + token_id] = first_score / (first_score + score); score_routed[idx] = score / (first_score + score); } } template <typename T> void ker_softmax_topk_router_launcher(int batch_token_num, int expert_num, int max_token_num, int topk, cudaStream_t stream, const T* gate_out, float* score_routed, int* expert_routed) { int block_dim = (expert_num + 31) >> 5 << 5; ker_softmax_topk_router<T><<<batch_token_num, block_dim, 0, stream>>>( gate_out, score_routed, expert_routed, batch_token_num, expert_num, max_token_num, topk); } template void ker_softmax_topk_router_launcher<float>( int batch_token_num, int expert_num, int max_token_num, int topk, cudaStream_t stream, const float* gate_out, float* score_routed, int* expert_routed); template void ker_softmax_topk_router_launcher<__half>( int batch_token_num, int expert_num, int max_token_num, int topk, cudaStream_t stream, const __half* gate_out, float* score_routed, int* expert_routed); /** @brief: ker_reorder_tokens reorder tokens by expert routing @thread gridDim.x = expert_num gridDim.y = batch_token_num blockDim.x = max_thread_per_block @param input: [batch_token_num, hidden_size] score: [expert_num, max_token_num] output: [expert_num, max_token_num, hidden_size] */ template <typename T> __global__ void ker_reorder_tokens(const T* input, const float* score, T* output, int max_token_num, int hidden_size) { int expert_id = blockIdx.x, token_id = blockIdx.y; int score_pos = expert_id * max_token_num + token_id; if (__ldg(&score[score_pos]) > 0.) { for (std::size_t i = threadIdx.x; i < hidden_size; i += blockDim.x) { output[score_pos * hidden_size + i] = __ldg(&input[token_id * hidden_size + i]); } } } template <typename T> void ker_reorder_tokens_launcher(int batch_token_num, int expert_num, int max_token_num, int hidden_size, int max_thread_per_block, cudaStream_t stream, const T* input, const float* score, T* output) { ker_reorder_tokens<T> <<<dim3(expert_num, batch_token_num), max_thread_per_block, 0, stream>>>( input, score, output, max_token_num, hidden_size); } template void ker_reorder_tokens_launcher<float>( int batch_token_num, int expert_num, int max_token_num, int hidden_size, int max_thread_per_block, cudaStream_t stream, const float* input, const float* score, float* output); template void ker_reorder_tokens_launcher<__half>( int batch_token_num, int expert_num, int max_token_num, int hidden_size, int max_thread_per_block, cudaStream_t stream, const __half* input, const float* score, __half* output); /** @brief: ker_strided_bias_gelu activated by gelu, add bias, each expert has unique bias @thread gridDim.x = expert_num gridDim.y = batch_token_num blockDim.x = max_thread_per_block @param input: [expert_num, max_token_num, feature_dim] bias: [expert_num, feature_dim] feature_dim: the dim of input feature */ template <typename T> __global__ void ker_strided_bias_gelu(T* input, const T* bias, int feature_dim, int max_token_num) { int offset = (blockIdx.x * max_token_num + blockIdx.y) * feature_dim; for (int idx = threadIdx.x; idx < feature_dim; idx += blockDim.x) { int cur_offset = offset + idx; input[cur_offset] = gelu<float>( input[cur_offset] + __ldg(&bias[blockIdx.x * feature_dim + idx])); } } /* fp16 version */ template <> __global__ void ker_strided_bias_gelu<__half>(__half* input, const __half* bias, int feature_dim, int max_token_num) { int offset = (blockIdx.x * max_token_num + blockIdx.y) * feature_dim; half2* pinput = (half2*)input; const half2* pbias = (const half2*)bias; for (int idx = threadIdx.x; idx < feature_dim; idx += blockDim.x) { int cur_offset = offset + idx; pinput[cur_offset] = gelu<half2>(__hadd2( pinput[cur_offset], __ldg(&pbias[blockIdx.x * feature_dim + idx]))); } } template <typename T> void ker_strided_bias_gelu_launcher(int batch_token_num, int expert_num, int max_token_num, int feature_dim, int block_dim, cudaStream_t stream, T* input, const T* bias) { ker_strided_bias_gelu<T> <<<dim3(expert_num, batch_token_num), block_dim, 0, stream>>>( input, bias, feature_dim, max_token_num); } template <> void ker_strided_bias_gelu_launcher<__half>(int batch_token_num, int expert_num, int max_token_num, int feature_dim, int block_dim, cudaStream_t stream, __half* input, const __half* bias) { ker_strided_bias_gelu<__half> <<<dim3(expert_num, batch_token_num), block_dim, 0, stream>>>( input, bias, feature_dim / 2, max_token_num); } template void ker_strided_bias_gelu_launcher<float>( int batch_token_num, int expert_num, int max_token_num, int feature_dim, int block_dim, cudaStream_t stream, float* input, const float* bias); template void ker_strided_bias_gelu_launcher<__half>( int batch_token_num, int expert_num, int max_token_num, int feature_dim, int block_dim, cudaStream_t stream, __half* input, const __half* bias); /** @brief: ker_strided_bias_relu activated by relu, add bias, each expert has unique bias @thread gridDim.x = expert_num gridDim.y = batch_token_num blockDim.x = max_thread_per_block @param input: [expert_num, max_token_num, feature_dim] bias: [expert_num, feature_dim] feature_dim: the dim of input feature */ template <typename T> __global__ void ker_strided_bias_relu(T* input, const T* bias, int feature_dim, int max_token_num) { int offset = (blockIdx.x * max_token_num + blockIdx.y) * feature_dim; for (int idx = threadIdx.x; idx < feature_dim; idx += blockDim.x) { int cur_offset = offset + idx; input[cur_offset] = max(input[cur_offset] + __ldg(&bias[blockIdx.x * feature_dim + idx]), (T)0.f); } } template <> __global__ void ker_strided_bias_relu<__half>(__half* input, const __half* bias, int feature_dim, int max_token_num) { int offset = (blockIdx.x * max_token_num + blockIdx.y) * feature_dim; half2* pinput = (half2*)input; const half2* pbias = (const half2*)bias; for (int idx = threadIdx.x; idx < feature_dim; idx += blockDim.x) { int cur_offset = offset + idx; float2 f2_inp = __half22float2(pinput[cur_offset]); float2 f2_bias = __half22float2(__ldg(&pbias[blockIdx.x * feature_dim + idx])); f2_inp.x = fmaxf(f2_inp.x + f2_bias.x, 0.f); f2_inp.y = fmaxf(f2_inp.y + f2_bias.y, 0.f); pinput[cur_offset] = __float22half2_rn(f2_inp); } } template <typename T> void ker_strided_bias_relu_launcher(int batch_token_num, int expert_num, int max_token_num, int feature_dim, int block_dim, cudaStream_t stream, T* input, const T* bias) { ker_strided_bias_relu<T> <<<dim3(expert_num, batch_token_num), block_dim, 0, stream>>>( input, bias, feature_dim, max_token_num); } template <> void ker_strided_bias_relu_launcher<__half>(int batch_token_num, int expert_num, int max_token_num, int feature_dim, int block_dim, cudaStream_t stream, __half* input, const __half* bias) { ker_strided_bias_relu<__half> <<<dim3(expert_num, batch_token_num), block_dim, 0, stream>>>( input, bias, feature_dim / 2, max_token_num); } template void ker_strided_bias_relu_launcher<float>( int batch_token_num, int expert_num, int max_token_num, int feature_dim, int block_dim, cudaStream_t stream, float* input, const float* bias); template void ker_strided_bias_relu_launcher<__half>( int batch_token_num, int expert_num, int max_token_num, int feature_dim, int block_dim, cudaStream_t stream, __half* input, const __half* bias); /** @brief: ker_bias_redirect_tokens add second bias, each expert has unique bias, redirect tokens to original positions, combine by score @thread gridDim.x = batch_token_num blockDim.x = max_thread_per_block @param input: [expert_num, max_token_num, feature_dim] bias: [expert_num, feature_dim] score: [expert_num, max_token_num] expert_routed: [max_token_num * topk] output: [batch_token_num, feature_dim] */ template <typename T> __global__ void ker_bias_redirect_residual(const T* input, const T* bias, const float* score, const int* expert_routed, T* output, int feature_dim, int max_token_num, int topk) { int expert_id = -1, token_id = blockIdx.x; float input_val, score_val, bias_val, output_val; for (int idx = threadIdx.x; idx < feature_dim; idx += blockDim.x) { output_val = 0.0; for (int k = 0; k < topk; ++k) { expert_id = __ldg(&expert_routed[k * max_token_num + token_id]); score_val = __ldg(&score[expert_id * max_token_num + token_id]); input_val = __ldg( &input[(expert_id * max_token_num + token_id) * feature_dim + idx]); bias_val = __ldg(&bias[expert_id * feature_dim + idx]); output_val += ((input_val + bias_val) * score_val); } output[token_id * feature_dim + idx] += output_val; } } template <> __global__ void ker_bias_redirect_residual<__half>( const __half* input, const __half* bias, const float* score, const int* expert_routed, __half* output, int feature_dim, int max_token_num, int topk) { int expert_id = -1, token_id = blockIdx.x; const half2 *pinput = (const half2*)input, *pbias = (const half2*)bias; half2* poutput = (half2*)output; float2 f2_input_val, f2_bias_val, f2_output_val; float score_val; for (int idx = threadIdx.x; idx < feature_dim; idx += blockDim.x) { f2_output_val.x = 0.f; f2_output_val.y = 0.f; for (int k = 0; k < topk; ++k) { expert_id = __ldg(&expert_routed[k * max_token_num + token_id]); score_val = __ldg(&score[expert_id * max_token_num + token_id]); f2_input_val = __half22float2(__ldg( &pinput[(expert_id * max_token_num + token_id) * feature_dim + idx])); f2_bias_val = __half22float2(__ldg(&pbias[expert_id * feature_dim + idx])); f2_output_val.x += ((f2_input_val.x + f2_bias_val.x) * score_val); f2_output_val.y += ((f2_input_val.y + f2_bias_val.y) * score_val); } poutput[token_id * feature_dim + idx] = __hadd2(poutput[token_id * feature_dim + idx], __float22half2_rn(f2_output_val)); } } template <typename T> void ker_bias_redirect_residual_launcher(int hidden_size, int max_token_num, int topk, int batch_token_num, int block_dim, cudaStream_t stream, const T* input, const T* bias, const float* score, const int* expert_routed, T* output) { ker_bias_redirect_residual<T><<<batch_token_num, block_dim, 0, stream>>>( input, bias, score, expert_routed, output, hidden_size, max_token_num, topk); } template <> void ker_bias_redirect_residual_launcher<__half>( int hidden_size, int max_token_num, int topk, int batch_token_num, int block_dim, cudaStream_t stream, const __half* input, const __half* bias, const float* score, const int* expert_routed, __half* output) { ker_bias_redirect_residual<__half><<<batch_token_num, block_dim, 0, stream>>>( input, bias, score, expert_routed, output, hidden_size / 2, max_token_num, topk); } template void ker_bias_redirect_residual_launcher<float>( int hidden_size, int max_token_num, int topk, int batch_token_num, int block_dim, cudaStream_t stream, const float* input, const float* bias, const float* score, const int* expert_routed, float* output); template void ker_bias_redirect_residual_launcher<__half>( int hidden_size, int max_token_num, int topk, int batch_token_num, int block_dim, cudaStream_t stream, const __half* input, const __half* bias, const float* score, const int* expert_routed, __half* output); } // namespace cuda } // namespace lightseq
the_stack
#define _SIZE_T_DEFINED #include <cuda.h> #include <device_launch_parameters.h> #include <texture_fetch_functions.h> #include "float.h" #include <builtin_types.h> #include <vector_functions.h> #include <math.h> #include "../NeuralNetwork/Activation/ActivationFunction.cu" #include "../Common/Reduction/Reduction.cu" extern "C" { __device__ float Clip(float value, float clip) { return (clip == 0) * value + (clip != 0) * ((value > clip) * clip + (value < -clip) * -clip + (value >= -clip && value <= clip) * value); /* avoids thread divergence, equivalent to: if (clip == 0) return value; else if (value > clip) return clip; else if (value < -clip) return -clip; else return value; */ } __global__ void GetNetInput( float* netInput, float* temporary, int cellsPerBlock, float* weights, float* input, int inputCount, float* previousOutput, int previousOutputCount, float* cellStates, int peephole, int bias ) { const int THREAD_CNT = 512; int size = inputCount + previousOutputCount + peephole * cellsPerBlock + bias; int memoryBlockId = blockIdx.x; int blockOffset = memoryBlockId * size; int tid = threadIdx.x; int weightOffset = blockOffset; // signal from external input for (int i = tid; i < inputCount; i += THREAD_CNT) { temporary[weightOffset + i] = weights[weightOffset + i] * input[i]; } weightOffset += inputCount; //// signal from previous output of memory blocks for (int i = tid; i < previousOutputCount; i += THREAD_CNT) { temporary[weightOffset + i] = weights[weightOffset + i] * previousOutput[i]; } weightOffset += previousOutputCount; // signal from peephole connections if (peephole) { for (int i = tid; i < cellsPerBlock; i += THREAD_CNT) { temporary[weightOffset + i] = weights[weightOffset + i] * cellStates[memoryBlockId * cellsPerBlock + i]; } weightOffset += cellsPerBlock; } if (bias) { temporary[weightOffset] = weights[weightOffset]; } DReduction<f_Sum_f, float, THREAD_CNT>((void*)netInput, (void*)temporary, nullptr, size, memoryBlockId, memoryBlockId * size, 1, true); } __global__ void CellStateFeedForwardKernelBPTT( ActivationFunctionEnum inputActivationFunction, ActivationFunctionEnum gateActivationFunction, float* previousCellStates, float* cellStates, float* cellStatesActivations, float* cellStateActivationDerivatives, float* cellInputNetInput, float* cellInputActivations, float* cellInputActivationDerivatives, float* inputGateNetInput, float* inputGateActivations, float* inputGateActivationDerivatives, float* forgetGateNetInput, float* forgetGateActivations, float* forgetGateActivationDerivatives, int cellCount, int cellsPerBlock, float clipCellState ) { int memoryBlockId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (memoryBlockId < cellCount / cellsPerBlock) { // activation function of all gates must be in range [0,1], sigmoid activation function is used float inputGateActivation = Evaluate(gateActivationFunction, inputGateNetInput[memoryBlockId]); inputGateActivations[memoryBlockId] = inputGateActivation; inputGateActivationDerivatives[memoryBlockId] = EvaluateDerivative(gateActivationFunction, inputGateNetInput[memoryBlockId]); float forgetGateActivation = Evaluate(gateActivationFunction, forgetGateNetInput[memoryBlockId]); forgetGateActivations[memoryBlockId] = forgetGateActivation; forgetGateActivationDerivatives[memoryBlockId] = EvaluateDerivative(gateActivationFunction, forgetGateNetInput[memoryBlockId]); // step 2: calculate activation of memory block's cells for (int cellId = memoryBlockId * cellsPerBlock; cellId < (memoryBlockId + 1) * cellsPerBlock; cellId++) { float cellInputActivation = Evaluate(inputActivationFunction, cellInputNetInput[cellId]); cellInputActivations[cellId] = cellInputActivation; cellInputActivationDerivatives[cellId] = EvaluateDerivative(inputActivationFunction, cellInputNetInput[cellId]); cellStates[cellId] = Clip(forgetGateActivation * previousCellStates[cellId] + inputGateActivation * cellInputActivation, clipCellState); cellStatesActivations[cellId] = Evaluate(inputActivationFunction, cellStates[cellId]); cellStateActivationDerivatives[cellId] = EvaluateDerivative(inputActivationFunction, cellStates[cellId]); } } } __global__ void OutputStateFeedForwardKernelBPTT( ActivationFunctionEnum gateActivationFunction, float* cellStatesActivations, float* output, float* outputGateNetInput, float* outputGateActivations, float* outputGateActivationDerivatives, int cellCount, int cellsPerBlock, float clipCellState ) { int memoryBlockId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (memoryBlockId < cellCount / cellsPerBlock) { // step 3: calculate output gate activation float outputGateActivation = Evaluate(gateActivationFunction, outputGateNetInput[memoryBlockId]); outputGateActivations[memoryBlockId] = outputGateActivation; outputGateActivationDerivatives[memoryBlockId] = EvaluateDerivative(gateActivationFunction, outputGateNetInput[memoryBlockId]); // step 4: calculate output of all memory block's cells for (int cellId = memoryBlockId * cellsPerBlock; cellId < (memoryBlockId + 1) * cellsPerBlock; cellId++) { output[cellId] = outputGateActivation * cellStatesActivations[cellId]; } } } /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /* /* ORIGINAL FROM KAREL */ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ /*****************************************************************************************************************************************************************/ __device__ float DeviceGetNetInput( int memoryBlockId, int cellsPerBlock, float* weights, int weightsOffset, float *input, int inputCount, float *previousOutput, int previousOutputCount, float *cellStates, bool peephole, bool bias ) { int weightId = weightsOffset; float netInput = 0; // signal from external input for (int i = 0; i < inputCount; i++) { netInput += weights[weightId] * input[i]; weightId++; } // signal from previous output of memory blocks for (int i = 0; i < previousOutputCount; i++) { netInput += weights[weightId] * previousOutput[i]; weightId++; } // signal from peephole connections if (peephole) { for (int i = 0; i < cellsPerBlock; i++) { netInput += weights[weightId] * cellStates[memoryBlockId * cellsPerBlock + i]; weightId++; } } if (bias) { netInput += weights[weightId]; } return netInput; } __global__ void LSTMFeedForwardKernel( ActivationFunctionEnum inputActivationFunction, ActivationFunctionEnum gateActivationFunction, ActivationFunctionEnum activationFunction, float *input, float *output, float *previousOutput, float *cellStates, float *previousCellStates, float *cellInputActivations, float *cellInputActivationDerivatives, float *inputGateActivations, float *inputGateActivationDerivatives, float *forgetGateActivations, float *forgetGateActivationDerivatives, float *outputGateActivations, float *outputGateActivationDerivatives, float *cellInputWeights, float *inputGateWeights, float *forgetGateWeights, float *outputGateWeights, float clipCellState, int inputCount, int cellCount, int cellsPerBlock ) { int memoryBlockId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (memoryBlockId < cellCount / cellsPerBlock) { // step 1: calculate activations of input and forget gate float inputGateNetInput = DeviceGetNetInput( memoryBlockId, cellsPerBlock, inputGateWeights, memoryBlockId * (inputCount + cellCount + cellsPerBlock + 1), input, inputCount, previousOutput, cellCount, previousCellStates, true, true ); float forgetGateNetInput = DeviceGetNetInput( memoryBlockId, cellsPerBlock, forgetGateWeights, memoryBlockId * (inputCount + cellCount + cellsPerBlock + 1), input, inputCount, previousOutput, cellCount, previousCellStates, true, true ); // activation function of all gates must be in range [0,1], sigmoid activation function is used float inputGateActivation = Evaluate(gateActivationFunction, inputGateNetInput); float forgetGateActivation = Evaluate(gateActivationFunction, forgetGateNetInput); inputGateActivations[memoryBlockId] = inputGateActivation; forgetGateActivations[memoryBlockId] = forgetGateActivation; inputGateActivationDerivatives[memoryBlockId] = EvaluateDerivative(gateActivationFunction, inputGateNetInput); forgetGateActivationDerivatives[memoryBlockId] = EvaluateDerivative(gateActivationFunction, forgetGateNetInput); // step 2: calculate activation of memory block's cells for (int cellId = memoryBlockId * cellsPerBlock; cellId < (memoryBlockId + 1) * cellsPerBlock; cellId++) { float cellNetInput = DeviceGetNetInput( memoryBlockId, cellsPerBlock, cellInputWeights, cellId * (inputCount + cellCount + 1), input, inputCount, previousOutput, cellCount, NULL, false, true ); float cellInputActivation = Evaluate(inputActivationFunction, cellNetInput); cellInputActivations[cellId] = cellInputActivation; cellInputActivationDerivatives[cellId] = EvaluateDerivative(inputActivationFunction, cellNetInput); cellStates[cellId] = Clip(forgetGateActivation * previousCellStates[cellId] + inputGateActivation * cellInputActivation, clipCellState); } // step 3: calculate output gate activation float outputGateNetInput = DeviceGetNetInput( memoryBlockId, cellsPerBlock, outputGateWeights, memoryBlockId * (inputCount + cellCount + cellsPerBlock + 1), input, inputCount, previousOutput, cellCount, cellStates, true, true ); float outputGateActivation = Evaluate(gateActivationFunction, outputGateNetInput); outputGateActivations[memoryBlockId] = outputGateActivation; outputGateActivationDerivatives[memoryBlockId] = EvaluateDerivative(gateActivationFunction, outputGateNetInput); // step 4: calculate output of all memory block's cells for (int cellId = memoryBlockId * cellsPerBlock; cellId < (memoryBlockId + 1) * cellsPerBlock; cellId++) { output[cellId] = outputGateActivation * cellStates[cellId]; //Evaluate(activationFunction, cellStates[cellId]); } } } }
the_stack
#include <vector> #define MAX_BLOCKS 256 #define NUM_THREADS 256 #define MAX(a, b) ((a) > (b)) ? (a) : (b) #define MIN(a, b) ((a) < (b)) ? (a) : (b) #define HARDTANH(x) ((x) < (-1.0)) ? (-1.0) : (((x) <= (1.0)) ? (x) : (1.0)) const int WARP_SIZE = 32; // Crude benchmarks suggest 256 is better than 512 and 1024 // TODO: Autotune/use better heuristics, improve speed more. const int MAX_BLOCK_SIZE = 256; static int getGradParamsNumThreads(int batchSize) { //warp per item in a batch, up to a maximum return std::min(batchSize * WARP_SIZE, MAX_BLOCK_SIZE); } int get_blocks(int n) { // return MAX(1, MIN(MAX_BLOCKS, (n - NUM_THREADS + 1) / NUM_THREADS)); return MIN(MAX_BLOCKS, (n - NUM_THREADS + 1) / NUM_THREADS) + 1; } template <typename scalar_t> __global__ void adder_forward_kernel( const scalar_t const *input, const scalar_t const *weight, // const scalar_t const *bias, scalar_t *output, const int num_elem, const int out_channels, const int in_channels, const int IW, const int IH, const int OW, const int OH, const int KW, const int KH, const int SW, const int SH, const int PW, const int PH) { for (int index = blockDim.x * blockIdx.x + threadIdx.x; index < num_elem; index += gridDim.x * blockDim.x) { const int n = index / OW / OH / out_channels; const int m = index / OW / OH % out_channels; const int h = index / OW % OH; const int w = index % OW; const scalar_t *p_weight = weight + m * in_channels * KH * KW; // scalar_t value = bias[m]; scalar_t value = 0; // #pragma unroll for (int cc = 0; cc < in_channels; cc++) { // #pragma unroll const int image_offset0 = (n * in_channels + cc) * IH * IW; for (int kh = 0; kh < KH; kh++) { // #pragma unroll for (int kw = 0; kw < KW; kw++) { const int ih = h * SH - PH + kh; const int iw = w * SW - PW + kw; bool boundary_condition = (ih >= 0) && (ih < IH) && (iw >= 0) && (iw < IW); if (boundary_condition) { // value += input[image_offset0 + ih * IW + iw] * (*p_weight); value -= abs(input[image_offset0 + ih * IW + iw] - (*p_weight)); } else // padded area { value -= abs(*p_weight); } p_weight++; } } } output[index] = value; } } template <typename scalar_t> __global__ void adder_backward_grad_in_kernel( scalar_t *grad_out, scalar_t *input, scalar_t *weight, scalar_t *grad_in, const int num_elem, const int out_channels, const int in_channels, const int IW, const int IH, const int OW, const int OH, const int KW, const int KH, const int SW, const int SH, const int PW, const int PH) { for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_elem; index += gridDim.x * blockDim.x) { const int n = index / IW / IH / in_channels; const int c = index / IW / IH % in_channels; const int h = index / IW % IH; const int w = index % IW; scalar_t value = 0; for (int mm = 0; mm < out_channels; mm++) { const int grad_out_offset0 = (n * out_channels + mm) * OH * OW; scalar_t *p_weight = weight + (mm * in_channels + c) * KH * KW; for (int kh = 0; kh < KH; kh++) { for (int kw = 0; kw < KW; kw++) { int oh = h + PH - kh; int ow = w + PW - kw; if ((oh % SH == 0) && (ow % SW == 0)) { const bool boundary_condition = (oh >= 0) && (oh < OH) && (ow >= 0) && (ow < OW); if (boundary_condition) { oh = oh / SH; ow = ow / SW; // value += grad_out[grad_out_offset0 + oh * OW + ow] * (*p_weight); scalar_t ht = HARDTANH(*p_weight - input[index]); value += grad_out[grad_out_offset0 + oh * OW + ow] * ht; } } p_weight++; } } } grad_in[index] = value; } } template <typename scalar_t> __global__ void adder_backward_grad_weight_kernel( scalar_t *grad_out, scalar_t *input, scalar_t *weight, scalar_t *grad_weight, const int batch_size, const int out_channels, const int in_channels, const int IW, const int IH, const int OW, const int OH, const int KW, const int KH, const int SW, const int SH, const int PW, const int PH) { SharedMem<scalar_t> smem; int bidx = blockIdx.x; int kW = bidx % KW; int kH = bidx / KW % KH; int ch = bidx / KW / KH % in_channels; int mh = bidx / KW / KH / in_channels; scalar_t grad = 0; const int laneId = threadIdx.x % WARP_SIZE; const int batch = threadIdx.x / WARP_SIZE; const int nwarps = blockDim.x / WARP_SIZE; const int imageElements = OW * OH; for (int batchIdx = batch; batchIdx < batch_size; batchIdx += nwarps) { // Warp-stride loop over elements in a batch item for (int idx = laneId; idx < imageElements; idx += WARP_SIZE) { // Need to calculate the following: batch position, and offset into the gradOutput // in height, and width. We can intuit the corresponding position in the input from // the other parameters we have int go_w_offset = idx % OW; int go_h_offset = (idx / OW); int i_w_offset = go_w_offset * SW + kW - PW; int i_h_offset = go_h_offset * SH + kH - PH; int outputOffset = ((batchIdx * out_channels + mh) * OH) * OW + idx; if (i_w_offset >= 0 && i_h_offset >= 0 && i_w_offset < IW && i_h_offset < IH) { int inputOffset = ((batchIdx * in_channels + ch) * IH + i_h_offset) * IW + i_w_offset; // int outputOffset = ((batchIdx * out_channels + mh) * OH) * OW + idx; // grad += input[inputOffset] * grad_out[outputOffset]; grad += (input[inputOffset] - weight[bidx]) * grad_out[outputOffset]; } else // padded area { grad += - weight[bidx] * grad_out[outputOffset]; } } } __syncthreads(); scalar_t *buf = smem.getPointer(); scalar_t tval = reduceBlock<scalar_t, ReduceAdd<scalar_t>>( buf, blockDim.x, grad, ReduceAdd<scalar_t>(), 0); // After reduction, first thread in the block has the gradient, so its responsible // for writing it to gradWeight if (threadIdx.x == 0) { int weightOffset = kW + (KW * kH) + (KW * KH * ch) + (KW * KH * in_channels * mh); grad_weight[weightOffset] = tval; } } //////////////////////////////////////////////////////////////////////// ////////////////////////////END OF KERNEL/////////////////////////////// //////////////////////////////////////////////////////////////////////// int adder_cuda_forward( const at::Tensor &input, const at::Tensor &weight, // const at::Tensor &bias, at::Tensor &output, int KW, int KH, int SW, int SH, int PW, int PH) { const int batch_size = output.size(0); const int in_channels = input.size(1); const int out_channels = output.size(1); const int IW = input.size(3); const int IH = input.size(2); const int OW = output.size(3); const int OH = output.size(2); const int num_elem = batch_size * out_channels * OH * OW; const int num_blocks = get_blocks(num_elem); AT_DISPATCH_FLOATING_TYPES(output.type(), "adder_cuda_forward", ([&] { adder_forward_kernel<scalar_t><<<num_blocks, NUM_THREADS>>>( input.data<scalar_t>(), weight.data<scalar_t>(), // bias.data<scalar_t>(), output.data<scalar_t>(), num_elem, out_channels, in_channels, IW, IH, OW, OH, KW, KH, SW, SH, PW, PH); })); THCudaCheck(cudaGetLastError()); return 1; } /* scalar_t *grad_out, scalar_t *weight, scalar_t *grad_in, const int num_elem, const int out_channels, const int in_channels, const int IW, const int IH, const int OW, const int OH, const int KW, const int KH, const int SW, const int SH, const int PW, const int PH */ int adder_cuda_backward_grad_in( at::Tensor &grad_out, at::Tensor &input, at::Tensor &weight, at::Tensor &grad_in, int KW, int KH, int SW, int SH, int PW, int PH) { const int batch_size = grad_in.size(0); const int in_channels = grad_in.size(1); const int out_channels = grad_out.size(1); const int IW = grad_in.size(3); const int IH = grad_in.size(2); const int OW = grad_out.size(3); const int OH = grad_out.size(2); const int num_elem = batch_size * in_channels * IH * IW; const int num_blocks = get_blocks(num_elem); AT_DISPATCH_FLOATING_TYPES(grad_in.type(), "adder_cuda_backward_grad_in", ([&] { adder_backward_grad_in_kernel<scalar_t><<<num_blocks, NUM_THREADS>>>( grad_out.data<scalar_t>(), input.data<scalar_t>(), weight.data<scalar_t>(), grad_in.data<scalar_t>(), num_elem, out_channels, in_channels, IW, IH, OW, OH, KW, KH, SW, SH, PW, PH); })); THCudaCheck(cudaGetLastError()); return 1; } int adder_cuda_backward_grad_weight( at::Tensor &grad_out, at::Tensor &input, at::Tensor &weight, at::Tensor &grad_weight, int KW, int KH, int SW, int SH, int PW, int PH) { const int batch_size = input.size(0); const int in_channels = input.size(1); const int out_channels = grad_out.size(1); const int IW = input.size(3); const int IH = input.size(2); const int OW = grad_out.size(3); const int OH = grad_out.size(2); int blocks = out_channels * in_channels * KH * KW; // Make sure we have enough threads to perform the reduction, and use this number // to create the shared memory size for the reduction dim3 grid(blocks); dim3 block(getGradParamsNumThreads(batch_size)); // int smem = block.x * sizeof(accreal); AT_DISPATCH_FLOATING_TYPES(grad_weight.type(), "adder_cuda_backward_grad_weight", ([&] { adder_backward_grad_weight_kernel<scalar_t><<<grid, block, block.x * sizeof(scalar_t)>>>( grad_out.data<scalar_t>(), input.data<scalar_t>(), weight.data<scalar_t>(), grad_weight.data<scalar_t>(), batch_size, out_channels, in_channels, IW, IH, OW, OH, KW, KH, SW, SH, PW, PH); })); THCudaCheck(cudaGetLastError()); return 1; } /* scalar_t *grad_out, scalar_t *input, scalar_t *grad_weight, const int batch_size, const int out_channels, const int in_channels, const int IW, const int IH, const int OW, const int OH, const int KW, const int KH, const int SW, const int SH, const int PW, const int PH */
the_stack
#define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) # define M_PI 3.14159265358979323846 const int block_num = 512; #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) const int threadsPerBlock = sizeof(unsigned long long) * 8; __device__ static int point_inside_box_3d(float x, float y, float z, float cx, float by, float cz, float l, float h, float w, float ry, float max_distance){ float cos_ry, sin_ry; float canonical_x, canonical_z; int inside; if ((fabsf(x - cx) > max_distance) || (y > by) || ((by - y) > h) || (fabsf(z - cz) > max_distance)){ return 0; } cos_ry = cos(ry); sin_ry = sin(ry); canonical_x = (x - cx) * cos_ry - (z - cz) * sin_ry; canonical_z = (x - cx) * sin_ry + (z - cz) * cos_ry; inside = (canonical_x >= -l / 2.0) & (canonical_x <= l / 2.0) & (canonical_z >= -w / 2.0) & (canonical_z <= w / 2.0); return inside; } /* query boxes 3d points */ // input: nsample (1), xyz (b,n,3), proposals (b,m,7) // output: idx (b,m,nsample), pts_cnt (b,m) __global__ void query_boxes_3d_points_gpu(int b, int n, int m, int nsample, const float *xyz, const float *proposals, int *idx, int *pts_cnt) { int total_idx = b * m; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int batch_index = point_inds / m; const float* cur_xyz; const float* cur_proposal; cur_xyz = xyz + n*3*batch_index; cur_proposal = proposals + point_inds * 7; int* cur_idx; int* cur_pts_cnt; cur_idx = idx + nsample*point_inds; cur_pts_cnt = pts_cnt + point_inds; // counting how many unique points selected in local region float cx= cur_proposal[0]; float by= cur_proposal[1]; float cz= cur_proposal[2]; float l = cur_proposal[3]; float h = cur_proposal[4]; float w = cur_proposal[5]; float ry= cur_proposal[6]; float max_distance = max(sqrtf((l / 2.) * (l / 2.)+(w / 2.)*(w / 2.)),1e-20f); float x, y, z; int inside; int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball x=cur_xyz[k*3+0]; y=cur_xyz[k*3+1]; z=cur_xyz[k*3+2]; inside = point_inside_box_3d(x, y, z, cx, by, cz, l, h, w, ry, max_distance); if (inside) { if (cnt==0) { for (int l=0;l<nsample;++l) cur_idx[l] = k; } cur_idx[cnt] = k; cnt+=1; } } cur_pts_cnt[0] = cnt; } } /* query boxes 3d mask */ // input: xyz (b,n,3), boxes_3d (b,m,7) // output: mask (b,m,n) __global__ void query_boxes_3d_mask_gpu(int b, int n, int m, const float *xyz, const float *boxes_3d, int *mask){ int total_idx = b * m * n; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int batch_index = point_inds / (m * n); int box_index = point_inds / n; int point_index = point_inds % n; const float* cur_xyz; const float* cur_boxes_3d; cur_xyz = xyz + batch_index * n * 3 + point_index * 3; cur_boxes_3d = boxes_3d + box_index * 7; int* cur_mask; cur_mask = mask + point_inds; float cx= cur_boxes_3d[0]; float by= cur_boxes_3d[1]; float cz= cur_boxes_3d[2]; float l = cur_boxes_3d[3]; float h = cur_boxes_3d[4]; float w = cur_boxes_3d[5]; float ry= cur_boxes_3d[6]; float max_distance = max(sqrtf((l / 2.) * (l / 2.)+(w / 2.)*(w / 2.)),1e-20f); float x = cur_xyz[0]; float y = cur_xyz[1]; float z = cur_xyz[2]; int inside; inside = point_inside_box_3d(x, y, z, cx, by, cz, l, h, w, ry, max_distance); cur_mask[0] = inside; } } /* query points iou */ // input: xyz (b,n,3), anchors_3d (b,anchors_num,7), gt_boxes_3d (b, gt_num, 7) // input: iou_matrix (b, anchors_num, gt_num) // output: iou_points(b, anchors_num, gt_num) __global__ void query_points_iou_gpu(int b, int n, int anchors_num, int gt_num, const float* xyz, const float* anchors_3d, const float* gt_boxes_3d, const float* iou_matrix, float* iou_points){ int total_idx = b * anchors_num * gt_num; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ float iou_value = iou_matrix[point_inds]; if (iou_value < 1e-3f){ // if no overlaps around two boxes_3d, then directly return 0 iou_points[point_inds] = 0.; continue; } // has overlaps, then calculate PointIoU int batch_index = point_inds / (anchors_num * gt_num); int anchor_index = point_inds / gt_num; int gt_index = point_inds % gt_num; const float* cur_xyz; const float* cur_anchors_3d; const float* cur_gt_boxes_3d; cur_xyz = xyz + batch_index * n * 3; cur_anchors_3d = anchors_3d + anchor_index * 7; cur_gt_boxes_3d = gt_boxes_3d + batch_index * gt_num * 7 + gt_index * 7; float* cur_iou_points; cur_iou_points = iou_points + point_inds; int in = 0, un = 0; float gt_boxes_cx= cur_gt_boxes_3d[0]; float gt_boxes_by= cur_gt_boxes_3d[1]; float gt_boxes_cz= cur_gt_boxes_3d[2]; float gt_boxes_l = cur_gt_boxes_3d[3]; float gt_boxes_h = cur_gt_boxes_3d[4]; float gt_boxes_w = cur_gt_boxes_3d[5]; float gt_boxes_ry= cur_gt_boxes_3d[6]; float gt_boxes_max_distance = max(sqrtf((gt_boxes_l / 2.) * (gt_boxes_l / 2.) + (gt_boxes_w / 2.) * (gt_boxes_w / 2.)),1e-20f); float anchors_cx= cur_anchors_3d[0]; float anchors_by= cur_anchors_3d[1]; float anchors_cz= cur_anchors_3d[2]; float anchors_l = cur_anchors_3d[3]; float anchors_h = cur_anchors_3d[4]; float anchors_w = cur_anchors_3d[5]; float anchors_ry= cur_anchors_3d[6]; float anchors_max_distance = max(sqrtf((anchors_l / 2.) * (anchors_l / 2.) + (anchors_w / 2.) * (anchors_w / 2.)),1e-20f); float x, y, z; int inside_anchors, inside_gt; for (int k=0;k<n;++k) { x=cur_xyz[k*3+0]; y=cur_xyz[k*3+1]; z=cur_xyz[k*3+2]; inside_anchors = point_inside_box_3d(x, y, z, anchors_cx, anchors_by, anchors_cz, anchors_l, anchors_h, anchors_w, anchors_ry, anchors_max_distance); inside_gt = point_inside_box_3d(x, y, z, gt_boxes_cx, gt_boxes_by, gt_boxes_cz, gt_boxes_l, gt_boxes_h, gt_boxes_w, gt_boxes_ry, gt_boxes_max_distance); un += (inside_gt | inside_anchors); in += (inside_gt & inside_anchors); } un = max(un, 1); cur_iou_points[0] = float(in) / float(un); } } // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) // output: idx (b,m,nsample), pts_cnt (b,m) __global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { int total_idx = b * m; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int batch_index = point_inds / m; const float* cur_xyz1; const float* cur_xyz2; cur_xyz1 = xyz1 + n*3*batch_index; cur_xyz2 = xyz2 + point_inds * 3; int* cur_idx; int* cur_pts_cnt; cur_idx = idx + nsample*point_inds; cur_pts_cnt = pts_cnt + point_inds; // counting how many unique points selected in local region float x2=cur_xyz2[0]; float y2=cur_xyz2[1]; float z2=cur_xyz2[2]; float x1, y1, z1, d; int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball x1=cur_xyz1[k*3+0]; y1=cur_xyz1[k*3+1]; z1=cur_xyz1[k*3+2]; d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f); if (d<radius) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) cur_idx[l] = k; } cur_idx[cnt] = k; cnt+=1; } } cur_pts_cnt[0] = cnt; } } // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3), sort_idx (b, m, n) // output: idx (b,m,nsample), pts_cnt (b,m) __global__ void query_ball_point_withidx_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, const int* sort_idx, int *idx, int *pts_cnt) { int total_idx = b * m; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int batch_index = point_inds / m; const float* cur_xyz1; const float* cur_xyz2; const int* cur_sort_idx; cur_xyz1 = xyz1 + n*3*batch_index; cur_xyz2 = xyz2 + point_inds * 3; cur_sort_idx = sort_idx + point_inds * n; int* cur_idx; int* cur_pts_cnt; cur_idx = idx + nsample*point_inds; cur_pts_cnt = pts_cnt + point_inds; // counting how many unique points selected in local region float x2=cur_xyz2[0]; float y2=cur_xyz2[1]; float z2=cur_xyz2[2]; float x1, y1, z1, d; int cnt = 0; int k; for (int i=0;i<n;++i) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball k = cur_sort_idx[i]; x1=cur_xyz1[k*3+0]; y1=cur_xyz1[k*3+1]; z1=cur_xyz1[k*3+2]; d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f); if (d<radius) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) cur_idx[l] = k; } cur_idx[cnt] = k; cnt+=1; } } cur_pts_cnt[0] = cnt; } } // input: min_radius (1), max_radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) // output: idx (b,m,nsample), pts_cnt (b,m) __global__ void query_ball_point_dilated_gpu(int b, int n, int m, float min_radius, float max_radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { int total_idx = b * m; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int batch_index = point_inds / m; const float* cur_xyz1; const float* cur_xyz2; cur_xyz1 = xyz1 + n*3*batch_index; cur_xyz2 = xyz2 + point_inds * 3; int* cur_idx; int* cur_pts_cnt; cur_idx = idx + nsample*point_inds; cur_pts_cnt = pts_cnt + point_inds; // counting how many unique points selected in local region float x2=cur_xyz2[0]; float y2=cur_xyz2[1]; float z2=cur_xyz2[2]; float x1, y1, z1, d; int cnt = 0; for (int k=0;k<n;++k) { if (cnt == nsample) break; // only pick the FIRST nsample points in the ball x1=cur_xyz1[k*3+0]; y1=cur_xyz1[k*3+1]; z1=cur_xyz1[k*3+2]; d=sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)); if (d == 0){ // x2, y2, z2: set all indices to k if (cnt == 0){ for (int l=0;l<nsample;++l) cur_idx[l] = k; } cur_idx[cnt] = k; cnt += 1; } else if (d >= min_radius && d < max_radius) { if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices for (int l=0;l<nsample;++l) cur_idx[l] = k; } cur_idx[cnt] = k; cnt+=1; } } cur_pts_cnt[0] = cnt; } } // input: points (b,n,c), idx (b,m,nsample) // output: out (b,m,nsample,c) __global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) { int total_idx = b * m * nsample * c; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int batch_inds = point_inds / (m * nsample * c); int idx_inds = point_inds / c; int cur_channel = point_inds % c; const float* cur_points = points + batch_inds * n * c; int cur_idx = idx[idx_inds]; float *cur_out = out + point_inds; if (cur_idx == -1){ cur_out[0] = float(0); } else{ cur_out[0] = cur_points[cur_idx * c + cur_channel]; } } } // input: grad_out (b,m,nsample,c), idx (b,m,nsample), // output: grad_points (b,n,c) __global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) { int total_idx = b * m * nsample * c; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int batch_index = point_inds / (m * nsample * c); int idx_inds = point_inds / c; int cur_channel = point_inds % c; const float* cur_grad_out = grad_out + point_inds; int cur_idx = idx[idx_inds]; float* cur_grad_points = grad_points + batch_index * n * c; if (cur_idx != -1){ atomicAdd(&cur_grad_points[cur_idx * c + cur_channel], cur_grad_out[0]); } } } // input: k (1), distance matrix dist (b,m,n) // output: idx (b,m,n), dist_out (b,m,n) // only the top k results within n are useful __global__ void selection_sort_gpu(int b, int n, int m, int k, const float *dist, int *outi, float *out) { int batch_index = blockIdx.x; dist+=m*n*batch_index; outi+=m*n*batch_index; out+=m*n*batch_index; int index = threadIdx.x; int stride = blockDim.x; // copy from dist to dist_out for (int j=index;j<m;j+=stride) { for (int s=0;s<n;++s) { out[j*n+s] = dist[j*n+s]; outi[j*n+s] = s; } } float *p_dist; for (int j=index;j<m;j+=stride) { p_dist = out+j*n; // selection sort for the first k elements for (int s=0;s<k;++s) { int min=s; // find the min for (int t=s+1;t<n;++t) { if (p_dist[t]<p_dist[min]) { min = t; } } // swap min-th and i-th element if (min!=s) { float tmp = p_dist[min]; p_dist[min] = p_dist[s]; p_dist[s] = tmp; int tmpi = outi[j*n+min]; outi[j*n+min] = outi[j*n+s]; outi[j*n+s] = tmpi; } } } } void queryBoxes3dPointsLauncher(int b, int n, int m, int nsample, const float *xyz, const float *proposals, int *idx, int *pts_cnt){ query_boxes_3d_points_gpu<<<block_num, threadsPerBlock>>>(b,n,m,nsample,xyz,proposals,idx,pts_cnt); } void queryBoxes3dMaskLauncher(int b, int n, int m, const float *xyz, const float *boxes_3d, int *mask){ query_boxes_3d_mask_gpu<<<block_num, threadsPerBlock>>>(b,n,m,xyz,boxes_3d,mask); } void queryPointsIouLauncher(int b, int n, int anchors_num, int gt_num, const float* xyz, const float* anchors_3d, const float* gt_boxes_3d, const float* iou_matrix, float* iou_points){ query_points_iou_gpu<<<block_num, threadsPerBlock>>>(b,n,anchors_num,gt_num, xyz, anchors_3d, gt_boxes_3d, iou_matrix, iou_points); } void queryBallPointLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { query_ball_point_gpu<<<block_num,threadsPerBlock>>>(b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt); //cudaDeviceSynchronize(); } void queryBallPointDilatedLauncher(int b, int n, int m, float min_radius, float max_radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { query_ball_point_dilated_gpu<<<block_num,threadsPerBlock>>>(b,n,m,min_radius,max_radius,nsample,xyz1,xyz2,idx,pts_cnt); //cudaDeviceSynchronize(); } void queryBallPointWithidxLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, const int* sort_idx, int *idx, int *pts_cnt){ query_ball_point_withidx_gpu<<<block_num,threadsPerBlock>>>(b,n,m,radius,nsample,xyz1,xyz2,sort_idx,idx,pts_cnt); } void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out) { selection_sort_gpu<<<b,256>>>(b,n,m,k,dist,outi,out); //cudaDeviceSynchronize(); } void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){ group_point_gpu<<<block_num,threadsPerBlock>>>(b,n,c,m,nsample,points,idx,out); //cudaDeviceSynchronize(); } void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){ group_point_grad_gpu<<<block_num,threadsPerBlock>>>(b,n,c,m,nsample,grad_out,idx,grad_points); //group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points); //cudaDeviceSynchronize(); }
the_stack
namespace anakin { namespace saber { static void cudnn_gemm(cublasHandle_t handle, const bool TransA, const bool TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (!TransA/* == CblasNoTrans*/) ? K : M; int ldb = (!TransB/* == CblasNoTrans*/) ? N : K; cublasOperation_t cuTransA = (!TransA/* == CblasNoTrans*/) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (!TransB/* == CblasNoTrans*/) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(handle, cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <typename Dtype> __global__ void cal_reset_kernel(Dtype* w_x_r,Dtype* w_h_r,const Dtype* b_r,int hidden_size,int batch_size, Dtype* output, const Dtype* hidden_pre,const ActiveType gate_activity) { const int thread_id = blockIdx.x*blockDim.x+threadIdx.x; const int batch_id = thread_id/hidden_size; const int index=thread_id%hidden_size; if (index < hidden_size&&batch_id<batch_size) { int w_base_index = batch_id * hidden_size * 3 + index; int u_base_index = batch_id * hidden_size * 2 + index; int h_base_index = batch_id * hidden_size + index; Dtype hidden_pre_value = hidden_pre[h_base_index]; Dtype before_act_r = w_x_r[w_base_index] + w_h_r[u_base_index] + b_r[index]; Dtype act_r = activate_cuda_float(before_act_r,gate_activity); output[h_base_index] = hidden_pre_value * act_r; } }; template <typename Dtype> __global__ void cal_final_kernel( Dtype* w_x_z, Dtype* w_x_o,Dtype* w_h_z,const Dtype* b_z, const Dtype* b_o, int hidden_size, int batch_size,Dtype* output, const Dtype* hidden_pre,const Dtype* w_h_o, const ActiveType gate_activity,const ActiveType h_activity) { const int thread_id = blockIdx.x*blockDim.x+threadIdx.x; const int batch_id = thread_id/hidden_size; const int index=thread_id%hidden_size; if (index < hidden_size&&batch_id<batch_size) { int w_base_index = batch_id* hidden_size * 3 + index; int u_base_index = batch_id* hidden_size * 2 + index; int h_base_index = batch_id* hidden_size + index; Dtype hidden_pre_value = hidden_pre[h_base_index]; Dtype before_act_z = w_x_z[w_base_index] + w_h_z[u_base_index] + b_z[index]; Dtype act_z = activate_cuda_float(before_act_z, gate_activity); Dtype before_act_h = w_x_o[w_base_index] + w_h_o[h_base_index] + b_o[index]; Dtype acted = activate_cuda_float(before_act_h, h_activity); output[h_base_index] = (static_cast<Dtype>(1.0) - act_z) * hidden_pre_value + act_z * acted; } } template <typename Dtype> __global__ void cal_cudnn_kernel( const Dtype* w_x_r,const Dtype* w_x_z, const Dtype* w_x_o, const Dtype* w_h_r,const Dtype* w_h_z,const Dtype* w_h_o, const Dtype* b_r,const Dtype* b_z, const Dtype* b_o, int hidden_size, int batch_size,Dtype* output, const Dtype* hidden_pre) { const int thread_id = blockIdx.x*blockDim.x+threadIdx.x; const int batch_id = thread_id/hidden_size; const int index=thread_id%hidden_size; if (index < hidden_size&&batch_id<batch_size) { int w_base_index = batch_id* hidden_size * 3 + index; int h_base_index = batch_id* hidden_size + index; Dtype hidden_pre_value = hidden_pre[h_base_index]; Dtype r= Sigmoid(w_x_r[w_base_index] + w_h_r[w_base_index] + b_r[index]); Dtype z = Sigmoid(w_x_z[w_base_index] + w_h_z[w_base_index] + b_z[index]); Dtype _h = Tanh(w_x_o[w_base_index] + w_h_o[w_base_index]*r+ b_o[index]); output[h_base_index] = (static_cast<Dtype>(1.0) - z) * _h + z * hidden_pre_value; } } template<> SaberStatus SaberGru<NV, AK_FLOAT>::dispatch(\ const std::vector<OpTensor*>& inputs, std::vector<OpTensor*>& outputs, GruParam <NV>& param) { // CHECK_GE(param.formula,GRU_ORIGIN)<<"ONLY SUPPORT GRU_ORIGIN NOW"; OpTensor* x = inputs[0]; std::vector<std::vector<int>> offset_vec_vec = x->get_seq_offset(); std::vector<int> offset = offset_vec_vec[offset_vec_vec.size()-1]; const OpDataType* x_data = static_cast<const OpDataType*>(x->data()); const OpDataType* h; OpTensor* dout = outputs[0]; OpDataType* dout_data = static_cast<OpDataType*>(dout->mutable_data()); const OpDataType* weights_i2h=static_cast<const OpDataType*>(param.weight()->data()); const OpDataType* weights_h2h=weights_i2h+3*_hidden_size*_word_size; const OpDataType* weights_bias=static_cast<const OpDataType*>(param.bias()->data()); int batch_size = offset.size() - 1; int seq_sum = x->num(); bool is_batched = offset.size() > 2; int o_offset = 0; int r_offset = 1; int z_offset = 2; utils::try_expand_tensor(_temp_map_dev,seq_sum); is_batched = _seq_util.get_sorted_map(offset, this->_ctx->get_compute_stream()); std::vector<int> emit_offset_vec=_seq_util.get_emit_offset_vec(); int emit_length = emit_offset_vec.size()-1; if (is_batched) { Shape seq_shape({1, 1, seq_sum, _word_size}); utils::try_expand_tensor(_temp_tensor_in,seq_shape); Shape seq_out_shape({1, 1, seq_sum, _hidden_size}); utils::try_expand_tensor(_temp_tensor_out,seq_out_shape); _seq_util.seq_2_sorted_seq(x_data, static_cast<OpDataType*>(_temp_tensor_in.mutable_data()), _word_size, _ctx->get_compute_stream()); x_data = static_cast<const OpDataType*>(_temp_tensor_in.data()); dout_data = static_cast<OpDataType*>(_temp_tensor_out.mutable_data()); } Shape shape_wx({seq_sum, 1, 3, _hidden_size}); utils::try_expand_tensor(_temp_wx,shape_wx); Shape shape_wh({1, batch_size, 2, _hidden_size}); utils::try_expand_tensor(_temp_wh,shape_wh); Shape shape_whr({1, batch_size, 1, _hidden_size}); utils::try_expand_tensor(_temp_whr,shape_whr); // _gemm_wx(seq_sum, 3 * _hidden_size, _word_size, 1.f, x_data, 0.f, weights_i2h, // static_cast<OpDataType*>(_temp_wx.mutable_data()), _ctx->get_compute_stream()); cudnn_gemm(_handle,false,false,seq_sum, 3 * _hidden_size, _word_size,1.f, x_data,weights_i2h,0.f,static_cast<OpDataType*>(_temp_wx.mutable_data())); const OpDataType* b_r = weights_bias + r_offset * _hidden_size; const OpDataType* b_z = weights_bias + z_offset * _hidden_size; const OpDataType* b_o = weights_bias + o_offset * _hidden_size; if (inputs.size() == 1) { if (_temp_zero.valid_size() < batch_size * _hidden_size) { utils::try_expand_tensor(_temp_zero,batch_size * _hidden_size); CUDA_CHECK(cudaMemsetAsync(_temp_zero.mutable_data(), 0, sizeof(OpDataType)*batch_size * _hidden_size, _ctx->get_compute_stream())); } h = static_cast<const OpDataType*>(_temp_zero.data()); } else { h = static_cast<const OpDataType*>(inputs[1]->data()); } for (int word_id = 0; word_id < emit_length; word_id++) { int real_word_id = word_id; int last_word_id = word_id - 1; if (param.is_reverse && batch_size == 1) { real_word_id = emit_length - word_id - 1; last_word_id = real_word_id + 1; } int emit_word_id_start = emit_offset_vec[real_word_id]; int emit_word_id_end = emit_offset_vec[real_word_id + 1]; int emit_word_length = emit_word_id_end - emit_word_id_start; const OpDataType* hidden_in; OpDataType* hidden_out = dout_data + emit_offset_vec[real_word_id] * _hidden_size; if (word_id == 0) { hidden_in = h; } else { hidden_in = dout_data + emit_offset_vec[last_word_id] * _hidden_size; } OpDataType* w_x_r = static_cast<OpDataType*>(_temp_wx.mutable_data()) + r_offset * _hidden_size + emit_word_id_start * _hidden_size * 3; OpDataType* w_x_z = static_cast<OpDataType*>(_temp_wx.mutable_data()) + z_offset * _hidden_size + emit_word_id_start * _hidden_size * 3; OpDataType* w_x_o = static_cast<OpDataType*>(_temp_wx.mutable_data()) + o_offset * _hidden_size + emit_word_id_start * _hidden_size * 3; if(param.formula==GRU_ORIGIN) { OpDataType* w_h_r = static_cast<OpDataType*>(_temp_wh.mutable_data()) + 0 * _hidden_size; OpDataType* w_h_z = static_cast<OpDataType*>(_temp_wh.mutable_data()) + 1 * _hidden_size; // _gemm_wh_2(emit_word_length, 2 * _hidden_size, _hidden_size, 1.f, hidden_in, 0.f, // weights_h2h + _hidden_size * _hidden_size, static_cast<OpDataType *>( _temp_wh.mutable_data()), // _ctx->get_compute_stream()); cudnn_gemm(_handle,false,false,emit_word_length, 2 * _hidden_size, _hidden_size, 1.f, hidden_in, weights_h2h + _hidden_size * _hidden_size,0.f, static_cast<OpDataType *>( _temp_wh.mutable_data())); const OpDataType *w_o = weights_h2h; const int block_dim = 512; const int grid_dim = utils::div_up(emit_word_length * _hidden_size, block_dim); cal_reset_kernel << < grid_dim, block_dim, 0 , _ctx->get_compute_stream() >> > ( w_x_r, w_h_r , b_r, _hidden_size, emit_word_length, hidden_out, hidden_in, param.gate_activity); // _gemm_wh_o(emit_word_length, _hidden_size, _hidden_size, 1.f, hidden_out, 0.f, w_o, // static_cast<OpDataType *>(_temp_whr.mutable_data()), _ctx->get_compute_stream()); cudnn_gemm(_handle,false,false,emit_word_length, _hidden_size, _hidden_size,1.f,hidden_out, w_o,0.f,static_cast<OpDataType *>(_temp_whr.mutable_data())); cal_final_kernel << < grid_dim, block_dim, 0 , _ctx->get_compute_stream() >> > ( w_x_z, w_x_o, w_h_z, b_z, b_o, _hidden_size, emit_word_length, hidden_out, hidden_in, static_cast<const OpDataType *>(_temp_whr.data()), param.gate_activity, param.h_activity); } else{ OpDataType* w_h_r = static_cast<OpDataType*>(_temp_wh.mutable_data()) + r_offset * _hidden_size; OpDataType* w_h_z = static_cast<OpDataType*>(_temp_wh.mutable_data()) + z_offset * _hidden_size; OpDataType* w_h_o = static_cast<OpDataType*>(_temp_wh.mutable_data()) + o_offset * _hidden_size; // _gemm_wh_2(emit_word_length, 3 * _hidden_size, _hidden_size, 1.f, hidden_in, 0.f, // static_cast<const OpDataType *>(_temp_weights_h2h.data()), static_cast<OpDataType *>( _temp_wh.mutable_data()), // _ctx->get_compute_stream()); cudnn_gemm(_handle,false,false,emit_word_length, 3 * _hidden_size, _hidden_size, 1.f,hidden_in, static_cast<const OpDataType *>(_temp_weights_h2h.data()),0.f,static_cast<OpDataType *>( _temp_wh.mutable_data())); const OpDataType *w_o = weights_h2h; const int block_dim = 512; const int grid_dim = utils::div_up(emit_word_length * _hidden_size, block_dim); cal_cudnn_kernel<< < grid_dim, block_dim, 0 , _ctx->get_compute_stream() >> >( w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_h_o,b_r, b_z, b_o,_hidden_size, emit_word_length, hidden_out, hidden_in); } } if (is_batched) { _seq_util.sorted_seq_2_seq(static_cast<const OpDataType*>(_temp_tensor_out.data()), static_cast<OpDataType*>(dout->mutable_data()), _hidden_size, _ctx->get_compute_stream()); } outputs[0]->set_seq_offset(inputs[0]->get_seq_offset()); return SaberSuccess; } template class SaberGru<NV, AK_FLOAT>; DEFINE_OP_TEMPLATE(SaberGru, GruParam, NV, AK_HALF); DEFINE_OP_TEMPLATE(SaberGru, GruParam, NV, AK_INT8); } }
the_stack
typedef double Matches[][3]; /* SFILE_END */ //============================================================================= // Various operators on the polynomial classes //============================================================================= // const int poly3_1::size_ = 3; // const int poly3_2::size_ = 6; // const int poly3_3::size_ = 10; __host__ __device__ poly3_2 poly3_1::operator * (poly3_1 p2) { poly3_1 &p1 = *this; poly3_2 prod; prod[zz_] = p1[z_]*p2[z_]; prod[zx_] = p1[z_]*p2[x_] + p1[x_]*p2[z_]; prod[zy_] = p1[z_]*p2[y_] + p1[y_]*p2[z_]; prod[xx_] = p1[x_]*p2[x_]; prod[xy_] = p1[x_]*p2[y_] + p1[y_]*p2[x_]; prod[yy_] = p1[y_]*p2[y_]; return prod; } __host__ __device__ poly3_3 poly3_2::operator * (poly3_1 p2) { poly3_2 &p1 = *this; poly3_3 prod; prod[zzz_] = p1[zz_]*p2[z_]; prod[zzx_] = p1[zz_]*p2[x_] + p1[zx_]*p2[z_]; prod[zzy_] = p1[zz_]*p2[y_] + p1[zy_]*p2[z_]; prod[zxx_] = p1[zx_]*p2[x_] + p1[xx_]*p2[z_]; prod[zxy_] = p1[zx_]*p2[y_] + p1[zy_]*p2[x_] + p1[xy_]*p2[z_]; prod[zyy_] = p1[zy_]*p2[y_] + p1[yy_]*p2[z_]; prod[xxx_] = p1[xx_]*p2[x_]; prod[xxy_] = p1[xx_]*p2[y_] + p1[xy_]*p2[x_]; prod[xyy_] = p1[xy_]*p2[y_] + p1[yy_]*p2[x_]; prod[yyy_] = p1[yy_]*p2[y_]; #ifdef RH_DEBUG printf ("In poly3_2 * poly3_1\n"); printf ("poly3_2 = \n"); p1.print(); printf ("poly3_1 = \n"); p2.print(); printf ("poly3_2 * poly3_2 = \n"); prod.print(); #endif return prod; } __host__ __device__ poly3_3 poly3_3::operator * (double k) { poly3_3 &p1 = *this; poly3_3 prod; for (int i=0; i<size_(); i++) prod[i] = p1[i]*k; return prod; } __host__ __device__ poly3_3 poly3_3::operator + (poly3_3 p2) { poly3_3 &p1 = *this; poly3_3 sum; for (int i=0; i<size_(); i++) sum[i] = p1[i] + p2[i]; return sum; } __host__ __device__ void poly3_3::operator += (poly3_3 p2) { poly3_3 &p1 = *this; for (int i=0; i<size_(); i++) p1[i] += p2[i]; } __host__ __device__ poly3_3 poly3_3::operator - (poly3_3 p2) { poly3_3 &p1 = *this; poly3_3 dif; for (int i=0; i<size_(); i++) dif[i] = p1[i] - p2[i]; return dif; } __host__ __device__ poly3_2 poly3_2::operator + (poly3_2 p2) { poly3_2 &p1 = *this; poly3_2 sum; for (int i=0; i<size_(); i++) sum[i] = p1[i] + p2[i]; return sum; } __host__ __device__ void poly3_2::operator += (poly3_2 p2) { poly3_2 &p1 = *this; for (int i=0; i<size_(); i++) p1[i] += p2[i]; } __host__ __device__ poly3_2 poly3_2::operator - (poly3_2 p2) { poly3_2 &p1 = *this; poly3_2 dif; for (int i=0; i<size_(); i++) dif[i] = p1[i] - p2[i]; return dif; } __host__ __device__ poly3_1 poly3_1::operator + (poly3_1 p2) { poly3_1 &p1 = *this; poly3_1 sum; for (int i=0; i<size_(); i++) sum[i] = p1[i] + p2[i]; return sum; } __host__ __device__ poly3_1 poly3_1::operator - (poly3_1 p2) { poly3_1 &p1 = *this; poly3_1 dif; for (int i=0; i<size_(); i++) dif[i] = p1[i] - p2[i]; return dif; } //============================================================================= __host__ __device__ poly3_3 polydet3 (EmatrixSet_6pt E) { // Takes the determinant of a polynomial poly3_3 det = (E(1,1)*E(2,2) - E(2,1)*E(1,2)) * E(0,0) + (E(2,1)*E(0,2) - E(0,1)*E(2,2)) * E(1,0) + (E(0,1)*E(1,2) - E(1,1)*E(0,2)) * E(2,0); #ifdef RH_DEBUG printf ("Det =\n"); det.print(); #endif return det; } __host__ __device__ static poly3_2 traceEEt (EmatrixSet_6pt E, int deg) { // Takes the trace of E E' -- returns a quadratic polynomial, // giving the values of the trace in degree deg. // Trace of product is the elementwise product of the elements poly3_2 tr; switch (deg) { case 0: tr = E(2,2) * E(2, 2); break; case 1: tr = E(0,2) * E(0, 2) + E(1,2) * E(1, 2) + E(2,0) * E(2, 0) + E(2,1) * E(2, 1); break; case 2: tr = E(0,0) * E(0, 0) + E(0,1) * E(0, 1) + E(1,0) * E(1, 0) + E(1,1) * E(1, 1); break; } #ifdef RH_DEBUG printf ("Trace is:\n"); tr.print(); #endif return tr; } __host__ __device__ static void mono_coeff (poly3_3 B, PolyMatrix A, int n, int deg) { // Extracts the monomial coefficients in x and y (with z = 1) from // a cubic homogeneous polynomial. Returns 4 vectors (degrees 0 to 3 in w) for (int i=0; i<B.size_(); i++) A[n][i][deg] = B[i]; } __host__ __device__ static void EEeqns (EmatrixSet_6pt E, PolyMatrix A, PolyDegree degrees) { // // Computes the equations that will be used to input to polyeig. // void EEeqns(E, A) // where E has dimensions E(3, 3, 4). The output is a matrix // of dimension A(4, 10, 10, where A(i, :, :) is the coeffient of w^{i-1} // // Makes all the equations from the essential matrix E // First of all, set the equations to zero memset (&(A[0][0][0]), 0, sizeof(PolyMatrix)); // First equation is from the determinant mono_coeff (polydet3(E), A, 0, 0); // Other equations from the equation 2 E*E'*E - tr(E*E') E = 0 // In the following loop, we compute EE'E(i,j) = sum_pq E(i,p)*E(q,p)*E(q,j) // The way this is done is optimized for speed. We compute first the matrix // EE'(i, q) and then use this to accumulate EE'E(i, j) // Find the trace - this is a quadratic polynomial poly3_2 tr0 = traceEEt(E, 0); // Degree 0 in w poly3_2 tr1 = traceEEt(E, 1); // Degree 1 in w poly3_2 tr2 = traceEEt(E, 2); // Degree 2 in w // Constant term for (int i=0; i<3; i++) { // An array of cubic polynomials, one for each j = 0 ... 2 poly3_3 EEE_i[3]; // Will hold (EE'E)(i,j) for (int j=0; j<3; j++) EEE_i[j].clear(); // Compute each EE'(i,q) = sum_p E(i,p) E(q,p) // Only term that counts is p=2, q=2 poly3_2 EE_i2 = E(i,2)*E(2,2); for (int j=0; j<3; j++) EEE_i[j] += EE_i2 * E(2,j); // Now, EE'E(i,j) is computed for this i and all j // We can complete the computation of the coefficients from EE'E(i, j) for (int j=0; j<3; j++) mono_coeff(EEE_i[j]*2.0 - tr0*E(i,j), A, 3*i+j+1, 0); } // Term in w for (int i=0; i<3; i++) { // An array of cubic polynomials, one for each j = 0 ... 2 poly3_3 EEE_i[3]; // Will hold (EE'E)(i,j) for (int j=0; j<3; j++) EEE_i[j].clear(); // Compute each EE'(i,q) = sum_p E(i,p) E(q,p) // Only term that counts is p=2, q=2 poly3_2 EE_i0 = E(i,2)*E(0,2); for (int j=0; j<3; j++) EEE_i[j] += EE_i0 * E(0,j); poly3_2 EE_i1 = E(i,2)*E(1,2); for (int j=0; j<3; j++) EEE_i[j] += EE_i1 * E(1,j); poly3_2 EE_i2 = E(i,0)*E(2,0) + E(i,1)*E(2,1); for (int j=0; j<3; j++) EEE_i[j] += EE_i2 * E(2,j); // Now, EE'E(i,j) is computed for this i and all j // We can complete the computation of the coefficients from EE'E(i, j) for (int j=0; j<3; j++) mono_coeff(EEE_i[j]*2.0 - tr1*E(i,j), A, 3*i+j+1, 1); } // Term in w^2 for (int i=0; i<3; i++) { // An array of cubic polynomials, one for each j = 0 ... 2 poly3_3 EEE_i[3]; // Will hold (EE'E)(i,j) for (int j=0; j<3; j++) EEE_i[j].clear(); // Compute each EE'(i,q) = sum_p E(i,p) E(q,p) // Only term that counts is p=2, q=2 poly3_2 EE_i0 = E(i,0)*E(0,0) + E(i,1)*E(0,1); for (int j=0; j<3; j++) EEE_i[j] += EE_i0 * E(0,j); poly3_2 EE_i1 = E(i,0)*E(1,0) + E(i,1)*E(1,1); for (int j=0; j<3; j++) EEE_i[j] += EE_i1 * E(1,j); // Now, EE'E(i,j) is computed for this i and all j // We can complete the computation of the coefficients from EE'E(i, j) for (int j=0; j<3; j++) mono_coeff(EEE_i[j]*2.0 - tr2*E(i,j), A, 3*i+j+1, 2); } // Return also the degrees of the equations for (int j=0; j<Ncols; j++) { degrees[0][j] = 0; // Equations from determinant have no w for (int i=1; i<Nrows; i++) degrees[i][j] = 2; // Other equations have degree 2 } } __host__ __device__ static void null_space_solve_6x9 ( double A[NMatches][9], EmatrixSet_6pt &E) { // This will compute the set of solutions for the equations // Sweep out one column at a time, starting with highest column number // We do Gaussian elimination to convert M to the form M = [X | I] // Then the null space will be [-I | X]. // For present, this is done without pivoting. // Mostly, do not need to actually change right hand part (that becomes I) const int lastrow = NMatches-1; // Last real row in the matrix const int firstcol = Nvar; // First column to do elimination to make I const int lastcol = 8; // First sweep is to get rid of the above diagonal parts for (int col=lastcol; col>firstcol; col--) // No need to do first col { // Remove column col const int row = col-firstcol; // Row to pivot around const double pivot = A[row][col]; // Sweep out all rows up to the current one for (int i=0; i<row; i++) { // This factor of the pivot row is to subtract from row i const double fac = A[i][col] / pivot; // Constant terms for (int j=0; j<col; j++) A[i][j] -= fac * A[row][j]; } } // Now, do backward sweep to clear below the diagonal for (int col=firstcol; col<lastcol; col++) // No need to do lastcol { // Remove column col const int row = col-firstcol; // Row to pivot around const double pivot = A[row][col]; // Sweep out all rows up to the current one for (int i=row+1; i<=lastrow; i++) { // This factor of the pivot row is to subtract from row i const double fac = A[i][col] / pivot; // Constant terms for (int j=0; j<firstcol; j++) A[i][j] -= fac * A[row][j]; } } // Make this into a matrix of solutions // This code is specific to 6x9. Change for 5x9 double fac; E(0, 0) = poly3_1(1.0, 0.0, 0.0); // z E(0, 1) = poly3_1(0.0, 1.0, 0.0); // x E(0, 2) = poly3_1(0.0, 0.0, 1.0); // y fac = -1.0/A[0][3]; E(1, 0) = poly3_1(fac*A[0][0], fac*A[0][1], fac*A[0][2]); fac = -1.0/A[1][4]; E(1, 1) = poly3_1(fac*A[1][0], fac*A[1][1], fac*A[1][2]); fac = -1.0/A[2][5]; E(1, 2) = poly3_1(fac*A[2][0], fac*A[2][1], fac*A[2][2]); fac = -1.0/A[3][6]; E(2, 0) = poly3_1(fac*A[3][0], fac*A[3][1], fac*A[3][2]); fac = -1.0/A[4][7]; E(2, 1) = poly3_1(fac*A[4][0], fac*A[4][1], fac*A[4][2]); fac = -1.0/A[5][8]; E(2, 2) = poly3_1(fac*A[5][0], fac*A[5][1], fac*A[5][2]); // Now make them orthogonal for (int i=0; i<Nvar; i++) { // Take the inner product of the others for (int j=0; j<i; j++) { // Form the inner product of i-th and j-th layers double val = 0.0; for (int k=0; k<3; k++) for (int l=0; l<3; l++) val += E(k,l)[i] * E(k,l)[j]; // Subtract it from E(.,.)[i] for (int k=0; k<3; k++) for (int l=0; l<3; l++) E(k,l)[i] -= val * E(k,l)[j]; } // Now, normalize this one to length 1 double lensq = 0.0; for (int k=0; k<3; k++) for (int l=0; l<3; l++) lensq += E(k,l)[i] * E(k,l)[i]; double fac = 1.0 / sqrt(lensq); for (int k=0; k<3; k++) for (int l=0; l<3; l++) E(k,l)[i] *= fac; } //#define USE_TEST_VALUES #ifdef USE_TEST_VALUES // Put an artificial value in E(0,0)[0] = 2; E(0,1)[0] = 4; E(0,2)[0] = -1; E(1,0)[0] = 4; E(1,1)[0] = 5; E(1,2)[0] = -8; E(2,0)[0] = 2; E(2,1)[0] = -11; E(2,2)[0] = 8; E(0,0)[1] = 0; E(0,1)[1] = -1; E(0,2)[1] = 2; E(1,0)[1] = 1; E(1,1)[1] = 7; E(1,2)[1] = 1; E(2,0)[1] = -2; E(2,1)[1] = 6; E(2,2)[1] = 7; E(0,0)[2] = 2; E(0,1)[2] = -3; E(0,2)[2] = 7; E(1,0)[2] = 1; E(1,1)[2] = -3; E(1,2)[2] = -9; E(2,0)[2] = 4; E(2,1)[2] = 1; E(2,2)[2] = -9; #endif } // Forward declaration __host__ __device__ void print_polymatrix (PolyMatrix A, PolyDegree degrees, int maxdegree); __host__ __device__ static void compute_E_A( Matches q, Matches qp, // Input points EmatrixSet_6pt &E, // Basis for E-matrix (returned) PolyMatrix &A, PolyDegree &degrees // Matrix of polynomials ) { // Computes the E-matrix from match inputs // A matrix to solve linearly for the ematrix double M[NMatches][9]; memset (&(M[0][0]), 0, sizeof (M)); for (int i=0; i<NMatches; i++) { M[i][0] = qp[i][0]*q[i][0]; M[i][1] = qp[i][0]*q[i][1]; M[i][2] = qp[i][0]*q[i][2]; M[i][3] = qp[i][1]*q[i][0]; M[i][4] = qp[i][1]*q[i][1]; M[i][5] = qp[i][1]*q[i][2]; M[i][6] = qp[i][2]*q[i][0]; M[i][7] = qp[i][2]*q[i][1]; M[i][8] = qp[i][2]*q[i][2]; } // Solve using null_space_solve to get a basis for E null_space_solve_6x9 (M, E); // # define RH_DEBUG # ifdef RH_DEBUG printf ("E = \n"); E.print(); // Check that this is right for (int m=0; m<Nvar; m++) { printf ("Matrix %d\n", m); for (int pt=0; pt<NMatches; pt++) { double val = 0.0; for (int i=0; i<3; i++) for (int j=0; j<3; j++) val += qp[pt][i] * E(i,j)[m] * q[pt][j]; printf ("Point %d : %12.4e\n", pt, val); } } # endif // #define USE_TEST_DATA #ifdef USE_TEST_DATA // Override by getting the input data E(1,1) = poly3_1 ( 0, 2, 2); E(1,1) = poly3_1 (-1, -3, 4); E(1,1) = poly3_1 ( 2, 7, -1); E(1,1) = poly3_1 ( 1, 1, 4); E(1,1) = poly3_1 ( 7, -3, 5); E(1,1) = poly3_1 ( 1, -9, -8); E(1,1) = poly3_1 (-2, 4, 2); E(1,1) = poly3_1 ( 6, 1, -11); E(1,1) = poly3_1 ( 7, -9, 8); #endif // Now, get the polynomial equations in A EEeqns(E, A, degrees); // #define RH_DEBUG # ifdef RH_DEBUG # undef RH_DEBUG print_polymatrix (A, degrees, 2); # endif } __host__ __device__ void compute_E_A_6pt ( Matches q, Matches qp, double EE[3][3][3], double AA[3][10][10]) { // This is used by the Matlab interface. // It takes the matches and returns the basis for the E-matrices (EE) // along with a 3x3 matrix of polynomials, which allows us to solve // for w. // Get the matrix set PolyMatrix A; PolyDegree degrees; EmatrixSet_6pt E; compute_E_A (q, qp, E, A, degrees); // print_polymatrix (A, degrees, 2); // Finally, get the 10-th degree polynomial out of this // if (poly) compute_determinant (A, poly); // Now, copy to the simple arrays if (EE) for (int d=0; d<3; d++) for (int i=0; i<3; i++) for (int j=0; j<3; j++) EE[d][i][j] = E(i,j)[d]; // Do not transpose - we want Ematrices thus if (AA) for (int d=0; d<3; d++) for (int i=0; i<10; i++) for (int j=0; j<10; j++) AA[d][i][j] = A[j][i][d]; // Transpose } __host__ __device__ void print_polymatrix (PolyMatrix A, PolyDegree degrees, int maxdegree) { // Print out the matrix printf ("Equation matrix\n"); for (int degree=0; degree<=maxdegree; degree++) { printf ("A%1d = {", degree); for (int i=0; i<10; i++) { if (i != 0) printf (","); printf ("{"); for (int j=0; j<10; j++) { if (j != 0) printf (","); if (degree <= degrees[i][j]) printf ("%.12f ", A[i][j][degree]); else printf ("%.12f ", 0.0); } printf ("\n"); printf ("}"); } printf ("};\n"); } } // Declaration of the function to find roots __host__ __device__ int find_real_roots_sturm( double *p, int order, double *roots, int *nroots, bool non_neg = true); __host__ __device__ static inline double pval (double *p, int deg, double x) { // Evaluates a polynomial at a given point x. Assumes deg >= 0 double val = p[deg]; for (int i=deg-1; i>=0; i--) val = x*val + p[i]; return val; } __host__ __device__ static void compute_E_matrix ( EmatrixSet_6pt &Es, PolyMatrix A, PolyDegree deg, // Degree of each entry in A int rows[Nrows], double w, Ematrix &E ) { // Compute the essential matrix corresponding to this root from // the matrix of equations A, assumed to be in row-echelon form // as defined by the array rows. double a10 = pval(A[rows[1]][0], deg[rows[1]][0], w); double a11 = pval(A[rows[1]][1], deg[rows[1]][1], w); double a20 = pval(A[rows[2]][0], deg[rows[2]][0], w); double a21 = pval(A[rows[2]][1], deg[rows[2]][1], w); double a22 = pval(A[rows[2]][2], deg[rows[2]][2], w); double x = -a10/a11; double y = -(a20 + x*a21) / a22; // #define RH_DEBUG #ifdef RH_DEBUG #undef RH_DEBUG printf ("In c_E_m: %10.3e %10.3e %10.3e %10.3e %10.3e %10.3e %10.3e\n", a10, a11, a20, a21, a22, x, y); #endif // Multiply out the solution to get the essential matrix for (int i=0; i<3; i++) for (int j=0; j<3; j++) { poly3_1 &p = Es(i, j); E[i][j] = p[z_] + x*p[x_] + y*p[y_]; } } __host__ __device__ void compute_F_matrices_6pt ( Matches q, Matches qp, Ematrix Ematrices[Maxdegree], double *flengths, int &nroots) { // Compute the F-matrix and focal lengths from matches. // What is returned here are not exactly E-matrices, but rather the // matrices such that qp * E * q = 0. That is, they are F-matrices. // To get the E-matrices, one should scale rows and columns by f, // as is done in compute_E_matrices_6pt // Declare and clear the matrix of equations // Get the matrix set EmatrixSet_6pt E; PolyMatrix A; PolyDegree degrees; compute_E_A(q, qp, E, A, degrees); // #define PRINT_RESULTS #ifdef PRINT_RESULTS printf ("Polymatrix\n"); print_polymatrix (A, degrees, 2); #endif // Go ahead and find the polynomial determinant int rows[Nrows]; double scale_factor = 1.0; # define PRE_PROCESS # ifdef PRE_PROCESS det_preprocess_6pt (A, degrees, 3); do_scale (A, degrees, scale_factor, true); // printf ("Scaled: scale_factor = %f\n", scale_factor); // printf ("\nPolymatrix after preprocessing\n"); // print_polymatrix (A, degrees, 2); #endif find_polynomial_determinant (A, degrees, rows); double *poly = A[rows[0]][0]; int poly_degree = degrees[rows[0]][0]; // Find the positive real roots #ifdef PRE_PROCESS double roots[Maxdegree]; find_real_roots_sturm(poly, poly_degree, roots, &nroots); #else double roots[Maxdegree]; find_real_roots_sturm(poly+3, poly_degree-3, roots, &nroots); #endif // Only accept roots that are beyond a threshold - also multiply by scale int goodroots = 0; for (int i=0; i<nroots; i++) if (roots[i] > 1.0e-8) roots[goodroots++] = roots[i]; nroots = goodroots; // Put their square roots in the array flengths for (int i=0; i<nroots; i++) flengths[i] = sqrt(roots[i]*scale_factor); // Now, get the ematrices for (int i=0; i<nroots; i++) compute_E_matrix (E, A, degrees, rows, roots[i], Ematrices[i]); // #define PRINT_RESULTS #ifdef PRINT_RESULTS #undef PRINT_RESULTS // printf ("Polynomial[0] = %13.6e\n", poly[0]); printf ("Polynomial\n"); for (int i=0; i<=poly_degree; i++) printf ("\t%14.6f\n", poly[i]/poly[0]); // printf ("\t%14.6e\n", poly[i]); #endif // #define PRINT_RESULTS #ifdef PRINT_RESULTS #undef PRINT_RESULTS // Print out the roots printf ("Roots\n"); for (int i=0; i<nroots; i++) printf ("\t%14.6f\n", roots[i]); #endif // #define PRINT_RESULTS #ifdef PRINT_RESULTS #undef PRINT_RESULTS // Print out the essential matrices printf ("Ematrices\n"); for (int m=0; m<nroots; m++) { const Ematrix &E = Ematrices[m]; for (int i=0; i<3; i++) printf ("\t%12.5f %12.5f %12.5f\n", E[i][0], E[i][1], E[i][2]); printf ("\n"); // Now, compute to see if it has worked printf ("Verify: "); for (int pt=0; pt<NMatches; pt++) { double sum = 0.0; for (int i=0; i<3; i++) for (int j=0; j<3; j++) sum += qp[pt][i] * E[i][j] * q[pt][j]; printf ("%11.3e ", sum); } printf ("\n\n"); } #endif } __host__ __device__ void compute_E_matrices_6pt ( Matches q, Matches qp, Ematrix E[Maxdegree], double *flengths, int &nroots) { // Same as compute_F_matrices_6pt, but returns true E-matrices // First, compute the E-matrices compute_F_matrices_6pt (q, qp, E, flengths, nroots); // Now, convert the F-matrices to E-matrices for (int m=0; m<nroots; m++) { // Get the focal length double f = flengths[m]; // Scale it E[m][0][2] /= f; E[m][1][2] /= f; E[m][2][0] /= f; E[m][2][1] /= f; E[m][2][2] /= f*f; // Normalize double scale = 0.0; for (int i=0; i<3; i++) for (int j=0; j<3; j++) scale += E[m][i][j]*E[m][i][j]; scale = sqrt(2.0 /scale); for (int i=0; i<3; i++) for (int j=0; j<3; j++) E[m][i][j] *= scale; } }
the_stack
__device__ __inline__ unsigned long long int gclock64() { unsigned long long int rv; asm volatile ( "mov.u64 %0, %%globaltimer;" : "=l"(rv) ); return rv; } template <typename T_weight, typename T_data, int R, int S, int BATCH_UNROLL> __device__ void nv_wavenet_dualBlock_skip(int sample, int num_layers, int batch_offset, int batch_size, T_weight* Wskip, T_data* Bskip, T_data skip_out_sh[BATCH_UNROLL][S], T_data* skip_out, bool dumpActivations, volatile T_data* h, volatile int* hSample) { const int WV = sizeof(T_weight)/sizeof(T_data); T_weight weights[R/WV]; T_data accum[BATCH_UNROLL]; T_data skip_accum_last[BATCH_UNROLL]; __shared__ T_data h_sh[2][BATCH_UNROLL][R]; for (int b=0; b<BATCH_UNROLL; b++) { skip_accum_last[b] = 0.f; } int ping_pong = 0; if (threadIdx.x < 32) { for (int layer=0; layer<num_layers; layer++) { int row = threadIdx.x; if (row < BATCH_UNROLL) while (hSample[layer*batch_size + batch_offset + row] <= sample); __syncthreads(); } __syncthreads(); } else if (threadIdx.x >= 32 && threadIdx.x < 32 + R) { __syncthreads(); for (int layer=0; layer<num_layers; layer++) { int row = threadIdx.x - 32; #pragma unroll for (int b=0; b<BATCH_UNROLL; b++) { h_sh[ping_pong][b][row] = loadVolatile(h,layer*batch_size*R + (batch_offset+b)*R + row); } __syncthreads(); ping_pong = ping_pong ^ 1; } } else if (threadIdx.x >= 32+R && threadIdx.x < S + 32 + R) { __syncthreads(); for (int layer=0; layer<num_layers; layer++) { __syncthreads(); int row = threadIdx.x - (32+R); T_data bias = Bskip[layer*S + row]; loadWeights<S,R>(weights,Wskip,layer,row); GEMM<R,2,BATCH_UNROLL>(weights,h_sh[ping_pong],accum); for (int b=0; b<BATCH_UNROLL; b++) { accum[b] += bias; T_data val = accum[b] + skip_accum_last[b]; skip_accum_last[b] += accum[b]; skip_out_sh[b][row] = val; if (dumpActivations) skip_out[layer*batch_size*S + (batch_offset+b)*S + row] = val; } ping_pong = ping_pong ^ 1; } } else { for (int layer=0; layer<num_layers; layer++) { __syncthreads(); } __syncthreads(); } } template <typename T_weight, typename T_data, int R, int S, int A, int BATCH_UNROLL> __device__ void nv_wavenet_dualBlock_A(nv_wavenet_params<T_weight, T_data> params, int batch_offset) { __shared__ T_data xt_sh[BATCH_UNROLL][R]; __shared__ T_data a_cur_sh[BATCH_UNROLL][2*R]; __shared__ T_data h_sh[BATCH_UNROLL][R]; for (int sample = params.init_sample; sample < params.init_sample + params.num_samples_per_chunk; sample++) { // Pipeline the prev computation with final layers of prior sample nv_wavenet_prev<T_weight, T_data, R, BATCH_UNROLL>(sample, threadIdx.x, params.num_layers, params.maxDilation, batch_offset, params.batch_size, params.Wprev, params.L, params.xt, params.a_prev, params.dumpActivations); uint64_t prev = (threadIdx.x == 0) ? gclock64() : 0; // Now wait for the prior sample to be computed if (threadIdx.x < BATCH_UNROLL) { while (params.ySample[batch_offset + threadIdx.x] < sample); } __syncthreads(); // Begin current sample // Embedding if (threadIdx.x < R) { int row = threadIdx.x; int yPrev[BATCH_UNROLL]; int yCur[BATCH_UNROLL]; for (int b=0; b<BATCH_UNROLL; b++) { yPrev[b] = params.yInPrev[batch_offset+b]; yCur[b] = params.yInCur[batch_offset+b]; T_data val = params.embedPrev[yPrev[b]*R + row] + params.embedCur[yCur[b]*R + row]; if (params.tanhEmbed) val = _tanh(val); xt_sh[b][row] = val; T_data* Xt = params.xt + (sample%(params.maxDilation+1))*(params.num_layers+1)*R*params.batch_size; Xt[(batch_offset+b)*R + row] = val; } } __syncthreads(); if (threadIdx.x < 2*R) { int row = threadIdx.x; nv_wavenet_cur<T_weight, T_data, R, BATCH_UNROLL>(sample, row, params.num_layers, batch_offset, params.batch_size, params.Wcur, params.B, params.L, xt_sh, a_cur_sh, params.a_prev); } else if (threadIdx.x < 3*R) { int row = threadIdx.x - 2*R; nv_wavenet_pointwise<T_weight, T_data, R, S, BATCH_UNROLL, true>(sample, row, params.num_layers, batch_offset, params.batch_size, params.xtmd, xt_sh, a_cur_sh, h_sh, params.h, params.hSample); } else if (threadIdx.x < 4*R) { int row = threadIdx.x - 3*R; nv_wavenet_res<T_weight, T_data, R, S, BATCH_UNROLL, true>(sample, row, params.num_layers, params.maxDilation, batch_offset, params.batch_size, params.Wres, params.Bres, h_sh, xt_sh, params.xt, params.xtOut, params.dumpActivations); } else { for (int layer=0; layer<params.num_layers;layer++) { __syncthreads(); } } } } template <typename T_weight, typename T_data, int R, int S, int A, int BATCH_UNROLL> __device__ void nv_wavenet_dualBlock_B(nv_wavenet_params<T_weight, T_data> params, int batch_offset) { for (int sample = params.init_sample; sample < params.init_sample + params.num_samples_per_chunk; sample++) { int row = threadIdx.x; __shared__ T_data skip_out_sh[BATCH_UNROLL][S]; nv_wavenet_dualBlock_skip<T_weight, T_data, R, S, BATCH_UNROLL>(sample, params.num_layers, batch_offset, params.batch_size, params.Wskip, params.Bskip, skip_out_sh, params.skip_out, params.dumpActivations, params.h, params.hSample); __syncthreads(); const int WV = sizeof(T_weight)/sizeof(T_data); T_weight weights[R/WV]; T_data accum[BATCH_UNROLL]; __shared__ T_data out_sh[BATCH_UNROLL][A]; __shared__ T_data skip_out_final_sh[BATCH_UNROLL][A]; const int M = 4*R; T_data zero = 0.f; // relu for (int r = threadIdx.x; r < S; r += blockDim.x) { for (int b=0; b<BATCH_UNROLL; b++) { T_data d = skip_out_sh[b][r]; skip_out_sh[b][r] = d < zero ? zero : d; } } __syncthreads(); if (threadIdx.x < M) { // SkipOut: AxS for (int tile_m = 0; tile_m < A/M; tile_m++) { T_data bias = params.BskipOut[tile_m*M+row]; T_data split_accum[BATCH_UNROLL]; for (int b=0; b<BATCH_UNROLL; b++) { split_accum[b] = 0.f; } for (int tile_k = 0; tile_k < S/R; tile_k++) { loadWeights<M,R>(weights, params.WskipOut + tile_m*M, tile_k, threadIdx.x, A); T_data activations[BATCH_UNROLL][R]; for (int b=0; b<BATCH_UNROLL; b++) { for (int i=0; i<R; i++) { activations[b][i] = skip_out_sh[b][tile_k*R + i]; } } GEMM<R,2,BATCH_UNROLL>(weights,activations,accum); for (int b=0; b<BATCH_UNROLL; b++) { split_accum[b] += accum[b]; } } for (int b=0; b<BATCH_UNROLL; b++) { int finalLayer = S/R - 1; split_accum[b] += bias; skip_out_final_sh[b][tile_m*M + row] = split_accum[b] < zero ? zero : split_accum[b]; // relu if (params.dumpActivations) params.skipOutFinal[finalLayer*params.batch_size*A + (batch_offset+b)*A + tile_m*M + row] = split_accum[b]; } } } __syncthreads(); if (threadIdx.x < M) { // Out: AxA for (int tile_m = 0; tile_m < A/M; tile_m++) { T_data bias = params.Bout[tile_m*M+row]; T_data split_accum[BATCH_UNROLL]; for (int b=0; b<BATCH_UNROLL; b++) { split_accum[b] = 0.f; } for (int tile_k = 0; tile_k < A/R; tile_k++) { loadWeights<M,R>(weights, params.Wout + tile_m*M, tile_k, threadIdx.x, A); T_data activations[BATCH_UNROLL][R]; for (int b=0; b<BATCH_UNROLL; b++) { for (int i=0; i<R; i++) { activations[b][i] = skip_out_final_sh[b][tile_k*R + i]; } } GEMM<R,2,BATCH_UNROLL>(weights,activations,accum); for (int b=0; b<BATCH_UNROLL; b++) { split_accum[b] += accum[b]; } } for (int b=0; b<BATCH_UNROLL; b++) { int finalLayer = A/R - 1; split_accum[b] += bias; out_sh[b][tile_m*M + row] = split_accum[b]; if (params.dumpActivations) params.out[finalLayer*params.batch_size*A + (batch_offset+b)*A + tile_m*M + row] = split_accum[b]; } } } __syncthreads(); //__shared__ T_data p_sh[BATCH_UNROLL][A]; T_data (*p_sh)[A] = skip_out_final_sh; __shared__ int yOut_sh[BATCH_UNROLL]; if (threadIdx.x < M) { softmax_select<T_data, 4*R, A,BATCH_UNROLL>(0,BATCH_UNROLL, (T_data*)out_sh, params.dumpActivations ? (T_data*)p_sh : NULL, params.outputSelectors + sample*params.batch_size + batch_offset, yOut_sh, 1, 4*R); } __syncthreads(); if (threadIdx.x < 4*R) { for (int u=0; u<BATCH_UNROLL; u++) { if (params.dumpActivations) { for (int i=threadIdx.x; i<A; i += M) { params.p[(batch_offset+u)*A + i] = p_sh[u][i]; } } } } // Now that we're done, prepare for next sample: yInPrev = yInCur, yIn = yOut if (threadIdx.x < BATCH_UNROLL) { int u = threadIdx.x; params.yOut[(batch_offset+u)*params.num_samples + sample] = yOut_sh[u]; params.yInPrev[batch_offset+u] = params.yInCur[batch_offset+u]; params.yInCur[batch_offset+u] = yOut_sh[u]; } __syncthreads(); __threadfence(); if (row < BATCH_UNROLL) { params.ySample[batch_offset+row] = sample+1; } } } template <typename T_weight, typename T_data, int R, int S, int A, int BATCH_UNROLL> __global__ void nv_wavenet_dualBlock(nv_wavenet_params<T_weight, T_data> params) { int batch_offset = blockIdx.x/2 * BATCH_UNROLL; int is_skip = blockIdx.x & 1; if (is_skip) nv_wavenet_dualBlock_B<T_weight, T_data, R, S, A, BATCH_UNROLL>(params, batch_offset); else nv_wavenet_dualBlock_A<T_weight, T_data, R, S, A, BATCH_UNROLL>(params, batch_offset); } template <typename T_weight, typename T_data, int R, int S, int A, int BATCH_UNROLL> struct launch_dualBlock { bool operator() (nv_wavenet_params<T_weight, T_data> params, cudaStream_t stream) { assert(BATCH_UNROLL <= 32); //Single-thread-per-batch things get parallelized across a warp dim3 grid(2*params.batch_size/BATCH_UNROLL); dim3 block(S + R + 32); if (4*R > block.x) block.x = 4*R; int occ = getOccupancy(0, block.x*block.y*block.z,(void*)nv_wavenet_dualBlock<T_weight, T_data, R, S, A, BATCH_UNROLL>); assert(occ>0); if(!params.init_sample) { gpuErrChk(cudaMemset((void*)params.hSample,0,params.num_layers*params.batch_size*sizeof(int))); gpuErrChk(cudaMemset((void*)params.ySample,0,params.batch_size*sizeof(int))); } // Since the two CTAs are communicating, launch as a cooperative kernel void* p_params = {&params}; cudaError_t code = cudaLaunchCooperativeKernel((void*)nv_wavenet_dualBlock<T_weight,T_data,R,S,A,BATCH_UNROLL>, grid, block, &p_params, 0, stream); gpuAssert(code, __FILE__, __LINE__, false); return code == cudaSuccess; } }; template <typename T_weight, typename T_data, int S, int A, int BATCH_UNROLL> struct launch_dualBlock<T_weight,T_data,128,S,A,BATCH_UNROLL> { bool operator() (nv_wavenet_params<T_weight, T_data> params, cudaStream_t stream) { printf("R=128 with dual block not supported\n"); return false; } }; template <typename T_weight, typename T_data, int S, int A, int BATCH_UNROLL> struct launch_dualBlock<T_weight,T_data,256,S,A,BATCH_UNROLL> { bool operator() (nv_wavenet_params<T_weight, T_data> params, cudaStream_t stream) { printf("R=256 with dual block not supported\n"); return false; } };
the_stack
* \file * cub::DeviceRadixSort provides device-wide, parallel operations for computing a radix sort across a sequence of data items residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include "dispatch/dispatch_radix_sort.cuh" #include "../config.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief DeviceRadixSort provides device-wide, parallel operations for computing a radix sort across a sequence of data items residing within device-accessible memory. ![](sorting_logo.png) * \ingroup SingleModule * * \par Overview * The [<em>radix sorting method</em>](http://en.wikipedia.org/wiki/Radix_sort) arranges * items into ascending (or descending) order. The algorithm relies upon a positional representation for * keys, i.e., each key is comprised of an ordered sequence of symbols (e.g., digits, * characters, etc.) specified from least-significant to most-significant. For a * given input sequence of keys and a set of rules specifying a total ordering * of the symbolic alphabet, the radix sorting method produces a lexicographic * ordering of those keys. * * \par Supported Types * DeviceRadixSort can sort all of the built-in C++ numeric primitive types * (`unsigned char`, `int`, `double`, etc.) as well as CUDA's `__half` * and `__nv_bfloat16` 16-bit floating-point types. * * \par Floating-Point Special Cases * * - Positive and negative zeros are considered equivalent, and will be treated * as such in the output. * - No special handling is implemented for NaN values; these are sorted * according to their bit representations after any transformations. * * \par Transformations * Although the direct radix sorting method can only be applied to unsigned * integral types, DeviceRadixSort is able to sort signed and floating-point * types via simple bit-wise transformations that ensure lexicographic key * ordering. Additional transformations occur for descending sorts. These * transformations must be considered when restricting the * `[begin_bit, end_bit)` range, as the bitwise transformations will occur * before the bit-range truncation. * * Any transformations applied to the keys prior to sorting are reversed * while writing to the final output buffer. * * \par Type Specific Bitwise Transformations * To convert the input values into a radix-sortable bitwise representation, * the following transformations take place prior to sorting: * * - For unsigned integral values, the keys are used directly. * - For signed integral values, the sign bit is inverted. * - For positive floating point values, the sign bit is inverted. * - For negative floating point values, the full key is inverted. * * For floating point types, positive and negative zero are a special case and * will be considered equivalent during sorting. * * \par Descending Sort Bitwise Transformations * If descending sort is used, the keys are inverted after performing any * type-specific transformations, and the resulting keys are sorted in ascending * order. * * \par Stability * DeviceRadixSort is stable. For floating-point types, -0.0 and +0.0 are * considered equal and appear in the result in the same order as they appear in * the input. * * \par Usage Considerations * \cdp_class{DeviceRadixSort} * * \par Performance * \linear_performance{radix sort} The following chart illustrates DeviceRadixSort::SortKeys * performance across different CUDA architectures for uniform-random \p uint32 keys. * \plots_below * * \image html lsb_radix_sort_int32_keys.png * */ struct DeviceRadixSort { /******************************************************************//** * \name KeyT-value pairs *********************************************************************/ //@{ /** * \brief Sorts key-value pairs into ascending order. (~<em>2N </em>auxiliary storage required) * * \par * - The contents of the input data are not altered by the sorting operation. * - Pointers to contiguous memory must be used; iterators are not currently * supported. * - In-place operations are not supported. There must be no overlap between * any of the provided ranges: * - `[d_keys_in, d_keys_in + num_items)` * - `[d_keys_out, d_keys_out + num_items)` * - `[d_values_in, d_values_in + num_items)` * - `[d_values_out, d_values_out + num_items)` * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageNP For sorting using only <em>O</em>(<tt>P</tt>) temporary storage, see the sorting interface using DoubleBuffer wrappers below. * - \devicestorage * * \par Performance * The following charts illustrate saturated sorting performance across different * CUDA architectures for uniform-random <tt>uint32,uint32</tt> and * <tt>uint64,uint64</tt> pairs, respectively. * * \image html lsb_radix_sort_int32_pairs.png * \image html lsb_radix_sort_int64_pairs.png * * \par Snippet * The code snippet below illustrates the sorting of a device vector of \p int keys * with associated vector of \p int values. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_keys_out; // e.g., [ ... ] * int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6] * int *d_values_out; // e.g., [ ... ] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, * d_keys_in, d_keys_out, d_values_in, d_values_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, * d_keys_in, d_keys_out, d_values_in, d_values_out, num_items); * * // d_keys_out <-- [0, 3, 5, 6, 7, 8, 9] * // d_values_out <-- [5, 4, 3, 1, 2, 0, 6] * * \endcode * * \tparam KeyT <b>[inferred]</b> KeyT type * \tparam ValueT <b>[inferred]</b> ValueT type */ template < typename KeyT, typename ValueT> CUB_RUNTIME_FUNCTION static cudaError_t SortPairs( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation const KeyT *d_keys_in, ///< [in] Pointer to the input data of key data to sort KeyT *d_keys_out, ///< [out] Pointer to the sorted output sequence of key data const ValueT *d_values_in, ///< [in] Pointer to the corresponding input sequence of associated value items ValueT *d_values_out, ///< [out] Pointer to the correspondingly-reordered output sequence of associated value items int num_items, ///< [in] Number of items to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // We cast away const-ness, but will *not* write to these arrays. // `DispatchRadixSort::Dispatch` will allocate temporary storage and // create a new double-buffer internally when the `is_overwrite_ok` flag // is not set. constexpr bool is_overwrite_okay = false; DoubleBuffer<KeyT> d_keys(const_cast<KeyT*>(d_keys_in), d_keys_out); DoubleBuffer<ValueT> d_values(const_cast<ValueT*>(d_values_in), d_values_out); return DispatchRadixSort<false, KeyT, ValueT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, is_overwrite_okay, stream, debug_synchronous); } /** * \brief Sorts key-value pairs into ascending order. (~<em>N </em>auxiliary storage required) * * \par * - The sorting operation is given a pair of key buffers and a corresponding * pair of associated value buffers. Each pair is managed by a DoubleBuffer * structure that indicates which of the two buffers is "current" (and thus * contains the input data to be sorted). * - The contents of both buffers within each pair may be altered by the sorting * operation. * - In-place operations are not supported. There must be no overlap between * any of the provided ranges: * - `[d_keys.Current(), d_keys.Current() + num_items)` * - `[d_keys.Alternate(), d_keys.Alternate() + num_items)` * - `[d_values.Current(), d_values.Current() + num_items)` * - `[d_values.Alternate(), d_values.Alternate() + num_items)` * - Upon completion, the sorting operation will update the "current" indicator * within each DoubleBuffer wrapper to reference which of the two buffers * now contains the sorted output sequence (a function of the number of key bits * specified and the targeted device architecture). * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageP * - \devicestorage * * \par Performance * The following charts illustrate saturated sorting performance across different * CUDA architectures for uniform-random <tt>uint32,uint32</tt> and * <tt>uint64,uint64</tt> pairs, respectively. * * \image html lsb_radix_sort_int32_pairs.png * \image html lsb_radix_sort_int64_pairs.png * * \par Snippet * The code snippet below illustrates the sorting of a device vector of \p int keys * with associated vector of \p int values. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_key_alt_buf; // e.g., [ ... ] * int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6] * int *d_value_alt_buf; // e.g., [ ... ] * ... * * // Create a set of DoubleBuffers to wrap pairs of device pointers * cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf); * cub::DoubleBuffer<int> d_values(d_value_buf, d_value_alt_buf); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items); * * // d_keys.Current() <-- [0, 3, 5, 6, 7, 8, 9] * // d_values.Current() <-- [5, 4, 3, 1, 2, 0, 6] * * \endcode * * \tparam KeyT <b>[inferred]</b> KeyT type * \tparam ValueT <b>[inferred]</b> ValueT type */ template < typename KeyT, typename ValueT> CUB_RUNTIME_FUNCTION static cudaError_t SortPairs( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation DoubleBuffer<KeyT> &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys DoubleBuffer<ValueT> &d_values, ///< [in,out] Double-buffer of values whose "current" device-accessible buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values int num_items, ///< [in] Number of items to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; constexpr bool is_overwrite_okay = true; return DispatchRadixSort<false, KeyT, ValueT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, is_overwrite_okay, stream, debug_synchronous); } /** * \brief Sorts key-value pairs into descending order. (~<em>2N</em> auxiliary storage required). * * \par * - The contents of the input data are not altered by the sorting operation. * - Pointers to contiguous memory must be used; iterators are not currently * supported. * - In-place operations are not supported. There must be no overlap between * any of the provided ranges: * - `[d_keys_in, d_keys_in + num_items)` * - `[d_keys_out, d_keys_out + num_items)` * - `[d_values_in, d_values_in + num_items)` * - `[d_values_out, d_values_out + num_items)` * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageNP For sorting using only <em>O</em>(<tt>P</tt>) temporary storage, see the sorting interface using DoubleBuffer wrappers below. * - \devicestorage * * \par Performance * Performance is similar to DeviceRadixSort::SortPairs. * * \par Snippet * The code snippet below illustrates the sorting of a device vector of \p int keys * with associated vector of \p int values. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_keys_out; // e.g., [ ... ] * int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6] * int *d_values_out; // e.g., [ ... ] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, * d_keys_in, d_keys_out, d_values_in, d_values_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, * d_keys_in, d_keys_out, d_values_in, d_values_out, num_items); * * // d_keys_out <-- [9, 8, 7, 6, 5, 3, 0] * // d_values_out <-- [6, 0, 2, 1, 3, 4, 5] * * \endcode * * \tparam KeyT <b>[inferred]</b> KeyT type * \tparam ValueT <b>[inferred]</b> ValueT type */ template < typename KeyT, typename ValueT> CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation const KeyT *d_keys_in, ///< [in] Pointer to the input data of key data to sort KeyT *d_keys_out, ///< [out] Pointer to the sorted output sequence of key data const ValueT *d_values_in, ///< [in] Pointer to the corresponding input sequence of associated value items ValueT *d_values_out, ///< [out] Pointer to the correspondingly-reordered output sequence of associated value items int num_items, ///< [in] Number of items to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // We cast away const-ness, but will *not* write to these arrays. // `DispatchRadixSort::Dispatch` will allocate temporary storage and // create a new double-buffer internally when the `is_overwrite_ok` flag // is not set. constexpr bool is_overwrite_okay = false; DoubleBuffer<KeyT> d_keys(const_cast<KeyT*>(d_keys_in), d_keys_out); DoubleBuffer<ValueT> d_values(const_cast<ValueT*>(d_values_in), d_values_out); return DispatchRadixSort<true, KeyT, ValueT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, is_overwrite_okay, stream, debug_synchronous); } /** * \brief Sorts key-value pairs into descending order. (~<em>N </em>auxiliary storage required). * * \par * - The sorting operation is given a pair of key buffers and a corresponding * pair of associated value buffers. Each pair is managed by a DoubleBuffer * structure that indicates which of the two buffers is "current" (and thus * contains the input data to be sorted). * - The contents of both buffers within each pair may be altered by the sorting * operation. * - In-place operations are not supported. There must be no overlap between * any of the provided ranges: * - `[d_keys.Current(), d_keys.Current() + num_items)` * - `[d_keys.Alternate(), d_keys.Alternate() + num_items)` * - `[d_values.Current(), d_values.Current() + num_items)` * - `[d_values.Alternate(), d_values.Alternate() + num_items)` * - Upon completion, the sorting operation will update the "current" indicator * within each DoubleBuffer wrapper to reference which of the two buffers * now contains the sorted output sequence (a function of the number of key bits * specified and the targeted device architecture). * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageP * - \devicestorage * * \par Performance * Performance is similar to DeviceRadixSort::SortPairs. * * \par Snippet * The code snippet below illustrates the sorting of a device vector of \p int keys * with associated vector of \p int values. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_key_alt_buf; // e.g., [ ... ] * int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6] * int *d_value_alt_buf; // e.g., [ ... ] * ... * * // Create a set of DoubleBuffers to wrap pairs of device pointers * cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf); * cub::DoubleBuffer<int> d_values(d_value_buf, d_value_alt_buf); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items); * * // d_keys.Current() <-- [9, 8, 7, 6, 5, 3, 0] * // d_values.Current() <-- [6, 0, 2, 1, 3, 4, 5] * * \endcode * * \tparam KeyT <b>[inferred]</b> KeyT type * \tparam ValueT <b>[inferred]</b> ValueT type */ template < typename KeyT, typename ValueT> CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation DoubleBuffer<KeyT> &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys DoubleBuffer<ValueT> &d_values, ///< [in,out] Double-buffer of values whose "current" device-accessible buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values int num_items, ///< [in] Number of items to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; constexpr bool is_overwrite_okay = true; return DispatchRadixSort<true, KeyT, ValueT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, is_overwrite_okay, stream, debug_synchronous); } //@} end member group /******************************************************************//** * \name Keys-only *********************************************************************/ //@{ /** * \brief Sorts keys into ascending order. (~<em>2N </em>auxiliary storage required) * * \par * - The contents of the input data are not altered by the sorting operation. * - Pointers to contiguous memory must be used; iterators are not currently * supported. * - In-place operations are not supported. There must be no overlap between * any of the provided ranges: * - `[d_keys_in, d_keys_in + num_items)` * - `[d_keys_out, d_keys_out + num_items)` * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageNP For sorting using only <em>O</em>(<tt>P</tt>) temporary storage, see the sorting interface using DoubleBuffer wrappers below. * - \devicestorage * * \par Performance * The following charts illustrate saturated sorting performance across different * CUDA architectures for uniform-random \p uint32 and \p uint64 keys, respectively. * * \image html lsb_radix_sort_int32_keys.png * \image html lsb_radix_sort_int64_keys.png * * \par Snippet * The code snippet below illustrates the sorting of a device vector of \p int keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_keys_out; // e.g., [ ... ] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items); * * // d_keys_out <-- [0, 3, 5, 6, 7, 8, 9] * * \endcode * * \tparam KeyT <b>[inferred]</b> KeyT type */ template <typename KeyT> CUB_RUNTIME_FUNCTION static cudaError_t SortKeys( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation const KeyT *d_keys_in, ///< [in] Pointer to the input data of key data to sort KeyT *d_keys_out, ///< [out] Pointer to the sorted output sequence of key data int num_items, ///< [in] Number of items to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // We cast away const-ness, but will *not* write to these arrays. // `DispatchRadixSort::Dispatch` will allocate temporary storage and // create a new double-buffer internally when the `is_overwrite_ok` flag // is not set. constexpr bool is_overwrite_okay = false; DoubleBuffer<KeyT> d_keys(const_cast<KeyT*>(d_keys_in), d_keys_out); // Null value type DoubleBuffer<NullType> d_values; return DispatchRadixSort<false, KeyT, NullType, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, is_overwrite_okay, stream, debug_synchronous); } /** * \brief Sorts keys into ascending order. (~<em>N </em>auxiliary storage required). * * \par * - The sorting operation is given a pair of key buffers managed by a * DoubleBuffer structure that indicates which of the two buffers is * "current" (and thus contains the input data to be sorted). * - The contents of both buffers may be altered by the sorting operation. * - In-place operations are not supported. There must be no overlap between * any of the provided ranges: * - `[d_keys.Current(), d_keys.Current() + num_items)` * - `[d_keys.Alternate(), d_keys.Alternate() + num_items)` * - Upon completion, the sorting operation will update the "current" indicator * within the DoubleBuffer wrapper to reference which of the two buffers * now contains the sorted output sequence (a function of the number of key bits * specified and the targeted device architecture). * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageP * - \devicestorage * * \par Performance * The following charts illustrate saturated sorting performance across different * CUDA architectures for uniform-random \p uint32 and \p uint64 keys, respectively. * * \image html lsb_radix_sort_int32_keys.png * \image html lsb_radix_sort_int64_keys.png * * \par Snippet * The code snippet below illustrates the sorting of a device vector of \p int keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_key_alt_buf; // e.g., [ ... ] * ... * * // Create a DoubleBuffer to wrap the pair of device pointers * cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys, num_items); * * // d_keys.Current() <-- [0, 3, 5, 6, 7, 8, 9] * * \endcode * * \tparam KeyT <b>[inferred]</b> KeyT type */ template <typename KeyT> CUB_RUNTIME_FUNCTION static cudaError_t SortKeys( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation DoubleBuffer<KeyT> &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys int num_items, ///< [in] Number of items to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; constexpr bool is_overwrite_okay = true; // Null value type DoubleBuffer<NullType> d_values; return DispatchRadixSort<false, KeyT, NullType, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, is_overwrite_okay, stream, debug_synchronous); } /** * \brief Sorts keys into descending order. (~<em>2N</em> auxiliary storage required). * * \par * - The contents of the input data are not altered by the sorting operation. * - Pointers to contiguous memory must be used; iterators are not currently * supported. * - In-place operations are not supported. There must be no overlap between * any of the provided ranges: * - `[d_keys_in, d_keys_in + num_items)` * - `[d_keys_out, d_keys_out + num_items)` * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageNP For sorting using only <em>O</em>(<tt>P</tt>) temporary storage, see the sorting interface using DoubleBuffer wrappers below. * - \devicestorage * * \par Performance * Performance is similar to DeviceRadixSort::SortKeys. * * \par Snippet * The code snippet below illustrates the sorting of a device vector of \p int keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_keys_out; // e.g., [ ... ] * ... * * // Create a DoubleBuffer to wrap the pair of device pointers * cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items); * * // d_keys_out <-- [9, 8, 7, 6, 5, 3, 0]s * * \endcode * * \tparam KeyT <b>[inferred]</b> KeyT type */ template <typename KeyT> CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation const KeyT *d_keys_in, ///< [in] Pointer to the input data of key data to sort KeyT *d_keys_out, ///< [out] Pointer to the sorted output sequence of key data int num_items, ///< [in] Number of items to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // We cast away const-ness, but will *not* write to these arrays. // `DispatchRadixSort::Dispatch` will allocate temporary storage and // create a new double-buffer internally when the `is_overwrite_ok` flag // is not set. constexpr bool is_overwrite_okay = false; DoubleBuffer<KeyT> d_keys(const_cast<KeyT*>(d_keys_in), d_keys_out); DoubleBuffer<NullType> d_values; return DispatchRadixSort<true, KeyT, NullType, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, is_overwrite_okay, stream, debug_synchronous); } /** * \brief Sorts keys into descending order. (~<em>N </em>auxiliary storage required). * * \par * - The sorting operation is given a pair of key buffers managed by a * DoubleBuffer structure that indicates which of the two buffers is * "current" (and thus contains the input data to be sorted). * - The contents of both buffers may be altered by the sorting operation. * - In-place operations are not supported. There must be no overlap between * any of the provided ranges: * - `[d_keys.Current(), d_keys.Current() + num_items)` * - `[d_keys.Alternate(), d_keys.Alternate() + num_items)` * - Upon completion, the sorting operation will update the "current" indicator * within the DoubleBuffer wrapper to reference which of the two buffers * now contains the sorted output sequence (a function of the number of key bits * specified and the targeted device architecture). * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageP * - \devicestorage * * \par Performance * Performance is similar to DeviceRadixSort::SortKeys. * * \par Snippet * The code snippet below illustrates the sorting of a device vector of \p int keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_key_alt_buf; // e.g., [ ... ] * ... * * // Create a DoubleBuffer to wrap the pair of device pointers * cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys, num_items); * * // d_keys.Current() <-- [9, 8, 7, 6, 5, 3, 0] * * \endcode * * \tparam KeyT <b>[inferred]</b> KeyT type */ template <typename KeyT> CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation DoubleBuffer<KeyT> &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys int num_items, ///< [in] Number of items to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; constexpr bool is_overwrite_okay = true; // Null value type DoubleBuffer<NullType> d_values; return DispatchRadixSort<true, KeyT, NullType, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, is_overwrite_okay, stream, debug_synchronous); } //@} end member group }; /** * \example example_device_radix_sort.cu */ } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) #ifdef USE_MAGMA static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, real *src, int k) { long size[1] = { k }; long stride[1] = { 1 }; THCTensor_(resizeNd)(state, self, 1, size, stride); size_t len = k * sizeof(real); THCudaCheck(cudaMemcpy(self->storage->data + self->storageOffset, src, len, cudaMemcpyHostToDevice)); } static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, real *src, int m, int n) { long size[2] = { m, n }; long stride[2] = { 1, m }; THCTensor_(resizeNd)(state, self, 2, size, stride); size_t len = m * n * sizeof(real); THCudaCheck(cudaMemcpy(self->storage->data + self->storageOffset, src, len, cudaMemcpyHostToDevice)); } static void THCTensor_(copyTensor2d)(THCState *state, real *dst, THCTensor *self) { THAssert(self->nDimension == 2); size_t len = THCTensor_(nElement)(state, self)*sizeof(real); THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1); THCTensor *selfc = THCTensor_(newContiguous)(state, temp); THCudaCheck(cudaMemcpy(dst, selfc->storage->data + selfc->storageOffset, len, cudaMemcpyDeviceToHost)); THCTensor_(free)(state, temp); THCTensor_(free)(state, selfc); } #endif // USE_MAGMA static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src) { THAssert(src->nDimension == 2); if (self == src && self->stride[0] == 1 && self->stride[1] == self->size[0]) { THCTensor_(retain)(state, self); return self; } if (self == src) self = THCTensor_(new)(state); else THCTensor_(retain)(state, self); long size[2] = { src->size[0], src->size[1] }; long stride[2] = { 1, src->size[0] }; THCTensor_(resizeNd)(state, self, 2, size, stride); THCTensor_(copy)(state, self, src); return self; } THC_API void THCTensor_(gesv)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional"); THArgCheck(b_->nDimension == 2, 2, "b should be 2 dimensional"); THArgCheck(a_->size[0] == a_->size[1], 1, "A should be square"); THArgCheck(b_->size[0] == a_->size[0], 2, "A,b size incompatible"); int n = a_->size[0]; int nrhs = b_->size[1]; THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); real *a_data = THCTensor_(data)(state, a); real *b_data = THCTensor_(data)(state, b); int *ipiv = th_magma_malloc_pinned<int>(n); int info; #if defined(THC_REAL_IS_FLOAT) magma_sgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info); #else magma_dgesv_gpu(n, nrhs, a_data, n, ipiv, b_data, n, &info); #endif if (info < 0) THError("MAGMA gesv : Argument %d : illegal value", -info); else if (info > 0) THError("MAGMA gesv : U(%d,%d) is zero, singular U.", info, info); magma_free_pinned(ipiv); THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(freeCopyTo)(state, b, rb_); #else THError(NoMagma(gesv)); #endif } THC_API void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional"); THArgCheck(b_->nDimension == 2, 1, "b should be 2 dimensional"); THArgCheck(a_->size[0] == b_->size[0], 2, "size incompatible A,b"); THArgCheck(a_->size[0] >= a_->size[1], 2, "A should have m >= n"); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); real *a_data = THCTensor_(data)(state, a); real *b_data = THCTensor_(data)(state, b); int m = a->size[0]; int n = a->size[1]; int nrhs = b->size[1]; real wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); #else magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); #endif real *hwork = th_magma_malloc_pinned<real>((size_t)wkopt); #if defined(THC_REAL_IS_FLOAT) magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); #else magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); #endif magma_free_pinned(hwork); if (info != 0) THError("MAGMA gels : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(freeCopyTo)(state, b, rb_); #else THError(NoMagma(gels)); #endif } THC_API void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a, const char *jobzs, const char *uplos) { #ifdef USE_MAGMA int n = a->size[0]; int lda = n; magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower; magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec; THCTensor *input = THCTensor_(newColumnMajor)(state, rv_, a); real *input_data = THCTensor_(data)(state, input); // eigen values and workspace real *w = th_magma_malloc_pinned<real>(n); real *wA = th_magma_malloc_pinned<real>(lda); // compute optimal size of work array int info; real lwork; int liwork; #if defined(THC_REAL_IS_FLOAT) magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); #else magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); #endif real *work = th_magma_malloc_pinned<real>((size_t)lwork); int *iwork = th_magma_malloc_pinned<int>(liwork); // compute eigenvalues and, optionally, eigenvectors #if defined(THC_REAL_IS_FLOAT) magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info); #else magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info); #endif // copy eigen values from w to re_ if (info == 0) THCTensor_(copyArray1d)(state, re_, w, n); magma_free_pinned(iwork); magma_free_pinned(work); magma_free_pinned(wA); magma_free_pinned(w); // check error value if (info > 0) THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA syev : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, input, rv_); #else THError(NoMagma(syev)); #endif } THC_API void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, const char *jobvrs) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 3, "A should be 2 dimensional"); THArgCheck(a_->size[0] == a_->size[1], 3, "A should be square"); magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec; int n = a_->size[0]; real *a_data = th_magma_malloc_pinned<real>(n * n); THCTensor_(copyTensor2d)(state, a_data, a_); real *wr = th_magma_malloc_pinned<real>(n); real *wi = th_magma_malloc_pinned<real>(n); real *vr_data = NULL; int ldvr = 1; if (jobvr == MagmaVec) { vr_data = th_magma_malloc_pinned<real>(n * n); ldvr = n; } real wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); #else magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); #endif int lwork = (int) wkopt; real *work_data = th_magma_malloc_pinned<real>(lwork); #if defined(THC_REAL_IS_FLOAT) magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); #else magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); #endif if (info > 0) THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA geev : Argument %d : illegal value", -info); { THCTensor_(resize2d)(state, re_, 2, n); THCTensor *re = THCTensor_(newContiguous)(state, re_); THCudaCheck(cudaMemcpy(re->storage->data + re->storageOffset, wr, n*sizeof(real), cudaMemcpyHostToDevice)); THCudaCheck(cudaMemcpy(re->storage->data + re->storageOffset + n, wi, n*sizeof(real), cudaMemcpyHostToDevice)); THCTensor_(freeCopyTo)(state, re, re_); THCTensor_(transpose)(state, re_, NULL, 0, 1); } if (jobvr == MagmaVec) THCTensor_(copyArray2d)(state, rv_, vr_data, n, n); magma_free_pinned(work_data); magma_free_pinned(vr_data); magma_free_pinned(wi); magma_free_pinned(wr); magma_free_pinned(a_data); #else THError(NoMagma(geev)); #endif } THC_API void THCTensor_(gesvd)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *a, const char *jobu) { #ifdef USE_MAGMA THCTensor *ra_ = THCTensor_(new)(state); THCTensor_(gesvd2)(state, ru_, rs_, rv_, ra_, a, jobu); THCTensor_(free)(state, ra_); #else THError(NoMagma(gesvd)); #endif } THC_API void THCTensor_(gesvd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *ra_, THCTensor *a, const char *jobus) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); magma_vec_t jobz = jobus[0] == 'A' ? MagmaAllVec : jobus[0] == 'S' ? MagmaSomeVec : jobus[0] == 'O' ? MagmaOverwriteVec : MagmaNoVec; int iunused[1]; int m = a->size[0]; int n = a->size[1]; int k = m < n ? m : n; int j = (jobz == MagmaAllVec) ? m : k; int jv = (jobz == MagmaAllVec) ? n : k; real *a_data = th_magma_malloc_pinned<real>(m * n); THCTensor_(copyTensor2d)(state, a_data, a); real *rs_data = th_magma_malloc_pinned<real>(k); real *ru_data = th_magma_malloc_pinned<real>(m * j); real *rv_data = th_magma_malloc_pinned<real>(n * n); real wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info); #else magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info); #endif int lwork = (int) wkopt; real *work_data = th_magma_malloc_pinned<real>(lwork); int *iwork = th_magma_malloc_pinned<int>(8 * k); #if defined(THC_REAL_IS_FLOAT) magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info); #else magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info); #endif if (info > 0) THError("MAGMA gesdd : the updating process of SBDSDC did not converge (error: %d)", info); else if (info < 0) THError("MAGMA gesdd : Argument %d : illegal value", -info); THCTensor_(copyArray2d)(state, rv_, rv_data, n, n); THCTensor_(transpose)(state, rv_, NULL, 0, 1); if (jobz != MagmaAllVec) THCTensor_(narrow)(state, rv_, rv_, 1, 0, jv); THCTensor_(copyArray2d)(state, ru_, ru_data, m, j); THCTensor_(copyArray1d)(state, rs_, rs_data, k); THCTensor_(copyArray2d)(state, ra_, a_data, m, n); magma_free_pinned(work_data); magma_free_pinned(iwork); magma_free_pinned(rv_data); magma_free_pinned(ru_data); magma_free_pinned(rs_data); magma_free_pinned(a_data); #else THError(NoMagma(gesvd2)); #endif } THC_API void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a) { THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); #ifdef USE_MAGMA int info; int n = a->size[0]; int lwork = n * magma_get_sgetri_nb(n); THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); real *input_data = THCTensor_(data)(state, input); int *ipiv = th_magma_malloc_pinned<int>(n); THCTensor *work = THCTensor_(newWithSize1d)(state, lwork); real *work_data = THCTensor_(data)(state, work); // Run LU #if defined(THC_REAL_IS_FLOAT) magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info); #else magma_dgetrf_gpu(n, n, input_data, n, ipiv, &info); #endif if (info > 0) THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getrf : Argument %d : illegal value", -info); // Inverse #if defined(THC_REAL_IS_FLOAT) magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info); #else magma_dgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info); #endif if (info > 0) THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getri : Argument %d : illegal value", -info); THCTensor_(free)(state, work); magma_free_pinned(ipiv); THCTensor_(freeCopyTo)(state, input, ra_); #else int n = a->size[0]; // input THCTensor *input = THCTensor_(newColumnMajor)(state, a, a); THCTensor_(resizeNd)(state, ra_, 2, input->size, input->stride); real *matrices1[1] = { THCTensor_(data)(state, input) }; real *matrices2[1] = { THCTensor_(data)(state, ra_) }; // Copy pointers to device. real **d_matrices1, **d_matrices2; THCudaCheck(THCudaMalloc(state, (void**)&d_matrices1, sizeof(real*))); THCudaCheck(THCudaMalloc(state, (void**)&d_matrices2, sizeof(real*))); THCudaCheck(cudaMemcpyAsync(d_matrices1, matrices1, sizeof(real*), cudaMemcpyHostToDevice, THCState_getCurrentStream(state))); THCudaCheck(cudaMemcpyAsync(d_matrices2, matrices2, sizeof(real*), cudaMemcpyHostToDevice, THCState_getCurrentStream(state))); int info; int *info_gpu; THCudaCheck(THCudaMalloc(state, (void**)&info_gpu, sizeof(int))); int *ipiv_gpu; THCudaCheck(THCudaMalloc(state, (void**)&ipiv_gpu, n * sizeof(int))); // Run LU #if defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1); #else THCudaBlas_Dgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1); #endif THCudaCheck(cudaMemcpy(&info, info_gpu, sizeof(int), cudaMemcpyDeviceToHost)); if (info > 0) THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getrf : Argument %d : illegal value", -info); // Inverse #if defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); #else THCudaBlas_Dgetri(state, n, (const real**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); #endif THCudaCheck(cudaMemcpy(&info, info_gpu, sizeof(int), cudaMemcpyDeviceToHost)); if (info > 0) THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getri : Argument %d : illegal value", -info); THCudaCheck(THCudaFree(state, ipiv_gpu)); THCudaCheck(THCudaFree(state, info_gpu)); THCudaCheck(THCudaFree(state, d_matrices1)); THCudaCheck(THCudaFree(state, d_matrices2)); THCTensor_(free)(state, input); #endif } __global__ void THCTensor_(copyUpperSymmetric)(real *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r > c) { input[idx] = input[r*n + c]; } } } __global__ void THCTensor_(copyLowerSymmetric)(real *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r < c) { input[idx] = input[r*n + c]; } } } THC_API void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); real *input_data = THCTensor_(data)(state, input); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotri_gpu(ul, n, input_data, n, &info); #else magma_dpotri_gpu(ul, n, input_data, n, &info); #endif if (info > 0) THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potri : Argument %d : illegal value", -info); cudaStream_t stream = THCState_getCurrentStream(state); const int len = n*n; dim3 blocks(std::min(DIVUP(len, 128), 65535)); dim3 threads(128); if (uplo[0] == 'U') { THCTensor_(copyUpperSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len); } else { THCTensor_(copyLowerSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len); } THCTensor_(freeCopyTo)(state, input, ra_); #else THError(NoMagma(potri)); #endif } THC_API void THCTensor_(potrf)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(a->nDimension == 2, 2, "A should be 2 dimensional"); THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); real *input_data = THCTensor_(data)(state, input); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotrf_gpu(ul, n, input_data, n, &info); #else magma_dpotrf_gpu(ul, n, input_data, n, &info); #endif // check error value if (info > 0) THError("MAGMA potrf : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potrf : Argument %d : illegal value", -info); if (uplo[0] == 'U') { THCTensor_(triu)(state, ra_, input, 0); } else { THCTensor_(tril)(state, ra_, input, 0); } THCTensor_(free)(state, input); #else THError(NoMagma(potrf)); #endif } THC_API void THCTensor_(potrs)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(a->size[0] == a->size[1], 2, "A should be square"); int n = a->size[0]; int nrhs = b->size[1]; magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *b_ = THCTensor_(newColumnMajor)(state, rb_, b); real *b_data = THCTensor_(data)(state, b_); THCTensor *a_ = THCTensor_(newColumnMajor)(state, a, a); real *a_data = THCTensor_(data)(state, a_); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info); #else magma_dpotrs_gpu(ul, n, nrhs, a_data, n, b_data, n, &info); #endif // check error value if (info < 0) THError("MAGMA potrs : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, b_, rb_); THCTensor_(free)(state, a_); #else THError(NoMagma(potrs)); #endif } THC_API void THCTensor_(qr)(THCState *state, THCTensor *rq_, THCTensor *rr_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(a_->nDimension == 2, 2, "A should be 2 dimensional"); THCTensor *a = THCTensor_(newColumnMajor)(state, rr_, a_); int m = a->size[0]; int n = a->size[1]; int k = (m < n ? m : n); #ifdef MAGMA_V2 #if defined(THC_REAL_IS_FLOAT) int nb = magma_get_sgeqrf_nb(m, n); #else int nb = magma_get_dgeqrf_nb(m, n); #endif #else #if defined(THC_REAL_IS_FLOAT) int nb = magma_get_sgeqrf_nb(m); #else int nb = magma_get_dgeqrf_nb(m); #endif #endif real *a_data = THCTensor_(data)(state, a); real *tau_data = th_magma_malloc_pinned<real>(k); THCTensor *work = THCTensor_(newWithSize1d)(state, (2*k + magma_roundup(n, 32))*nb); real *work_data = THCTensor_(data)(state, work); int info; #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf2_gpu(m, n, a_data, m, tau_data, &info); #else magma_dgeqrf2_gpu(m, n, a_data, m, tau_data, &info); #endif if (info != 0) THError("MAGMA geqrf2 : Argument %d : illegal value.", -info); THCTensor_(narrow)(state, a, a, 0, 0, k); THCTensor_(triu)(state, rr_, a, 0); THCTensor_(free)(state, a); a = THCTensor_(newColumnMajor)(state, rq_, a_); a_data = THCTensor_(data)(state, a); #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info); #else magma_dgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info); #endif if (info != 0) THError("MAGMA geqrf : Argument %d : illegal value.", -info); THCTensor *q = THCTensor_(newColumnMajor)(state, rq_, a); real *q_data = THCTensor_(data)(state, q); #if defined(THC_REAL_IS_FLOAT) magma_sorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info); #else magma_dorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info); #endif if (info != 0) THError("MAGMA orgqr : Argument %d : illegal value.", -info); THCTensor_(free)(state, work); magma_free_pinned(tau_data); THCTensor_(narrow)(state, q, q, 1, 0, k); THCTensor_(freeCopyTo)(state, q, rq_); #else THError(NoMagma(qr)); #endif } #endif #endif
the_stack
#if defined(THC_REAL_IS_HALF) #define _REAL(val) THC_float2half(val) #else #define _REAL(val) (val) #endif static int nn_(from_samples_to_structured)(lua_State *L) { THCState *state = getCudaState(L); // processes inputs if (lua_gettop(L) != 3) return LUA_HANDLE_ERROR_STR(L, "expected 3 arguments: samples, output, mask"); const int samples_index = 1; const int output_index = 2; const int mask_index = 3; if (!lua_istable(L, samples_index)) return LUA_HANDLE_ERROR_STR(L, "expected table for first argument"); THCTensor *output = (THCTensor *)luaT_checkudata(L, output_index, torch_Tensor); if (!THCTensor_(isContiguous)(state, output)) return LUA_HANDLE_ERROR_STR(L, "tensor should be contiguous"); THCudaByteTensor *mask = (THCudaByteTensor *)luaT_checkudata(L, mask_index, "torch.CudaByteTensor"); if (!THCudaByteTensor_isContiguous(state, mask)) return LUA_HANDLE_ERROR_STR(L, "tensor should be contiguous"); // loads all samples from the table long n_samples = lua_objlen(L, samples_index); THCTensor *tensors[n_samples]; lua_pushnil(L); while (lua_next(L, samples_index) != 0) { long index = lua_tointeger(L, -2); THCTensor *tensor = (THCTensor *)luaT_checkudata(L, -1, torch_Tensor); tensors[index-1] = tensor; lua_pop(L, 1); } // processes the samples to get some meta-info that will be used to determine the positioning in // the dense tensor created in the output Sample samples_info[n_samples]; THCTensor* step = THCTensor_(new)(state); // a tensor that contains first step of first tensor THCTensor* _step = THCTensor_(new)(state); // contains first step of other tensors (sizes much match) for (long i = 0; i < n_samples; i++) { THCTensor_(narrow)(state, _step, tensors[i], 0, 0, 1); // 1 [x ...] if (i == 0) THCTensor_(narrow)(state, step, tensors[i], 0, 0, 1); else if (!THCTensor_(isSameSizeAs)(state, step, _step)) return LUA_HANDLE_ERROR_STR(L, "got tensors of different sizes"); samples_info[i].length = THCTensor_(size)(state, tensors[i], 0); samples_info[i].index = i; samples_info[i].assigned_row = -1; } // sorts samples in order of length qsort(samples_info, n_samples, sizeof(Sample), sample_compare); long max_length = samples_info[n_samples-1].length; // creates the two tables with meta-info that will be output lua_newtable(L); const int indexes_index = lua_gettop(L); int local_indexes_index = 0; lua_newtable(L); const int mapped_lengths_index = lua_gettop(L); int local_mapped_lengths_index = 0; long row_index = 0; long length_available = max_length; long count = 0, row_count = 0; long start_index = 0; // while there are unprocessed samples... while (count < n_samples) { // flag of whether a sample was added in this iteration int added_sample = 0; // for each sample provided for (long i = n_samples-1; i >= 0; i--) { // checks if the current sample hasn't been assigned yet and fits the space left in the line if (samples_info[i].assigned_row == -1 && samples_info[i].length <= length_available) { long sample_index = samples_info[i].index; // if first sample in the row, creates sub-tables with meta-info for each row if (row_count == 0) { lua_newtable(L); local_indexes_index = lua_gettop(L); lua_newtable(L); local_mapped_lengths_index = lua_gettop(L); } // places the meta-info about the sample (index and length) into the tables row_count++; lua_pushinteger(L, sample_index+1); lua_rawseti(L, local_indexes_index, row_count); lua_pushinteger(L, samples_info[i].length); lua_rawseti(L, local_mapped_lengths_index, row_count); // assigns the sample to this row and updates the row and sample info samples_info[i].assigned_row = row_index; length_available -= samples_info[i].length + 1; start_index += samples_info[i].length + 1; count++; added_sample = 1; } } // if no sample was added, it means no sample available can fit in the space left, so we have to // add another table if (!added_sample) { // saves the current row-based meta-info lua_rawseti(L, mapped_lengths_index, row_index+1); lua_rawseti(L, indexes_index, row_index+1); // and advances rows row_index++; length_available = max_length; start_index = 0; row_count = 0; } } // saves the last row's meta-info lua_rawseti(L, mapped_lengths_index, row_index+1); lua_rawseti(L, indexes_index, row_index+1); // with the info available, resizes the output and mask long n_rows = lua_objlen(L, indexes_index); // output will have size: maxlen x nrows [x ...] long output_dim = THCTensor_(nDimension)(state, step) + 1; THLongStorage* output_size = THLongStorage_newWithSize(output_dim); output_size->data[0] = max_length; output_size->data[1] = n_rows; for (long i=2; i < output_dim; i++) { output_size->data[i] = THCTensor_(size)(state, step, i-1); } THCTensor_(resize)(state, output, output_size, NULL); THCudaByteTensor_resize2d(state, mask, max_length, n_rows); // mask starts filled with ones indicating it's empty THCudaByteTensor_fill(state, mask, 1); THCTensor *row = THCTensor_(new)(state), *section = THCTensor_(new)(state); THCudaByteTensor *mrow = THCudaByteTensor_new(state), *msection = THCudaByteTensor_new(state); // for each row in the output for (long i = 0; i < n_rows; i++) { THCTensor_(select)(state, row, output, 1, i); THCudaByteTensor_select(state, mrow, mask, 1, i); lua_rawgeti(L, indexes_index, i+1); const int local_indexes_index = lua_gettop(L); lua_rawgeti(L, mapped_lengths_index, i+1); const int local_mapped_lengths_index = lua_gettop(L); long n_entries_in_row = lua_objlen(L, -1); long start = 0; // for each sample placed in that row for (long j = 0; j < n_entries_in_row; j++) { lua_rawgeti(L, local_indexes_index, j+1); lua_rawgeti(L, local_mapped_lengths_index, j+1); long index = lua_tointeger(L, -2); long length = lua_tointeger(L, -1); lua_pop(L, 2); // copies the data from the input and fills the mask THCTensor_(narrow)(state, section, row, 0, start, length); THCudaByteTensor_narrow(state, msection, mrow, 0, start, length); THCTensor_(copy)(state, section, tensors[index-1]); THCudaByteTensor_fill(state, msection, 0); start += length + 1; } lua_pop(L, 2); } THCTensor_(free)(state, row); THCTensor_(free)(state, section); THCTensor_(free)(state, step); THCTensor_(free)(state, step); THLongStorage_free(output_size); THCudaByteTensor_free(state, mrow); THCudaByteTensor_free(state, msection); return 2; } // converts the dense tensor `input` into a list of samples `output`, each with its correct length. static int nn_(from_structured_to_samples)(lua_State *L) { THCState *state = getCudaState(L); // processes inputs if (lua_gettop(L) != 3) return LUA_HANDLE_ERROR_STR(L, "expected 3 arguments: indexing, lengths, input"); const int indexes_index = 1; const int mapped_lengths_index = 2; const int input_index = 3; if (!lua_istable(L, indexes_index)) return LUA_HANDLE_ERROR_STR(L, "expected table for first argument"); if (!lua_istable(L, mapped_lengths_index)) return LUA_HANDLE_ERROR_STR(L, "expected table for second argument"); THCTensor *input = (THCTensor *)luaT_checkudata(L, input_index, torch_Tensor); if (!THCTensor_(isContiguous)(state, input)) return LUA_HANDLE_ERROR_STR(L, "tensor should be contiguous"); lua_newtable(L); const int output_index = lua_gettop(L); long n_rows = lua_objlen(L, indexes_index); THCTensor *row = THCTensor_(new)(state); // for each row in the input for (long i = 0; i < n_rows; i++) { THCTensor_(select)(state, row, input, 1, i); lua_rawgeti(L, indexes_index, i+1); const int local_indexes_index = lua_gettop(L); lua_rawgeti(L, mapped_lengths_index, i+1); const int local_mapped_lengths_index = lua_gettop(L); long n_entries_in_row = lua_objlen(L, -1); long start = 0; // for each sample placed in that row for (long j = 0; j < n_entries_in_row; j++) { lua_rawgeti(L, local_indexes_index, j+1); lua_rawgeti(L, local_mapped_lengths_index, j+1); long index = lua_tointeger(L, -2); long length = lua_tointeger(L, -1); lua_pop(L, 2); // gets the sub-tensor of the row that corresponds to the sample and places in the table THCTensor *dest = THCTensor_(new)(state); THCTensor_(narrow)(state, dest, row, 0, start, length); start += length + 1; luaT_pushudata(L, dest, torch_Tensor); lua_rawseti(L, output_index, index); } lua_pop(L, 2); } THCTensor_(free)(state, row); return 1; } static int nn_(from_structured_to_final)(lua_State *L) { THCState *state = getCudaState(L); // processes inputs if (lua_gettop(L) != 4) return LUA_HANDLE_ERROR_STR(L, "expected 4 arguments: indexing, lengths, input, output"); const int indexes_index = 1; const int mapped_lengths_index = 2; const int input_index = 3; const int output_index = 4; if (!lua_istable(L, indexes_index)) return LUA_HANDLE_ERROR_STR(L, "expected table for first argument"); if (!lua_istable(L, mapped_lengths_index)) return LUA_HANDLE_ERROR_STR(L, "expected table for second argument"); THCTensor *input = (THCTensor *)luaT_checkudata(L, input_index, torch_Tensor); if (!THCTensor_(isContiguous)(state, input)) return LUA_HANDLE_ERROR_STR(L, "tensor should be contiguous"); THCTensor *output = (THCTensor *)luaT_checkudata(L, output_index, torch_Tensor); if (!THCTensor_(isContiguous)(state, output)) return LUA_HANDLE_ERROR_STR(L, "tensor should be contiguous"); long n_samples = get_n_samples(L, mapped_lengths_index); long output_dim = THCTensor_(nDimension)(state, input) - 1; THLongStorage* output_size = THLongStorage_newWithSize(output_dim); // n_samples [x ...] output_size->data[0] = n_samples; for (long i=1;i < output_dim; i++){ output_size->data[i] = THCTensor_(size)(state, input, i+1); } THCTensor_(resize)(state, output, output_size, NULL); long n_rows = lua_objlen(L, indexes_index); THCTensor *row = THCTensor_(new)(state), *section = THCTensor_(new)(state); THCTensor *output_section = THCTensor_(new)(state); // for each row in the output for (long i = 0; i < n_rows; i++) { THCTensor_(select)(state, row, input, 1, i); lua_rawgeti(L, indexes_index, i+1); const int local_indexes_index = lua_gettop(L); lua_rawgeti(L, mapped_lengths_index, i+1); const int local_mapped_lengths_index = lua_gettop(L); long n_entries_in_row = lua_objlen(L, -1); long start = 0; // for each sample placed in that row for (long j = 0; j < n_entries_in_row; j++) { lua_rawgeti(L, local_indexes_index, j+1); lua_rawgeti(L, local_mapped_lengths_index, j+1); long index = lua_tointeger(L, -2); long length = lua_tointeger(L, -1); lua_pop(L, 2); // gets the sub-tensor of the row that corresponds to the sample and places in the table THCTensor_(select)(state, section, row, 0, start + length-1); THCTensor_(select)(state, output_section, output, 0, index-1); THCTensor_(copy)(state, output_section, section); start += length + 1; } lua_pop(L, 2); } THCTensor_(free)(state, row); THCTensor_(free)(state, section); THCTensor_(free)(state, output_section); THLongStorage_free(output_size); return 0; } static int nn_(from_final_to_structured)(lua_State *L) { THCState *state = getCudaState(L); if (lua_gettop(L) != 4) return LUA_HANDLE_ERROR_STR(L, "expected 4 arguments: indexing, lengths, input, output"); const int indexes_index = 1; const int mapped_lengths_index = 2; const int input_index = 3; const int output_index = 4; if (!lua_istable(L, indexes_index)) return LUA_HANDLE_ERROR_STR(L, "expected table for first argument"); if (!lua_istable(L, mapped_lengths_index)) return LUA_HANDLE_ERROR_STR(L, "expected table for second argument"); THCTensor *input = (THCTensor *)luaT_checkudata(L, input_index, torch_Tensor); if (!THCTensor_(isContiguous)(state, input)) return LUA_HANDLE_ERROR_STR(L, "tensor should be contiguous"); THCTensor *output = (THCTensor *)luaT_checkudata(L, output_index, torch_Tensor); if (!THCTensor_(isContiguous)(state, output)) return LUA_HANDLE_ERROR_STR(L, "tensor should be contiguous"); long max_length = get_max_length(L, mapped_lengths_index); long n_rows = lua_objlen(L, mapped_lengths_index); long output_dim = THCTensor_(nDimension)(state, input) + 1; THLongStorage* output_size = THLongStorage_newWithSize(output_dim); // max_length x n_rows [x ...] output_size->data[0] = max_length; output_size->data[1] = n_rows; for (long i=2;i < output_dim; i++){ output_size->data[i] = THCTensor_(size)(state, input, i-1); } THCTensor_(resize)(state, output, output_size, NULL); THCTensor_(fill)(state, output, _REAL(0)); THCTensor *row = THCTensor_(new)(state), *section = THCTensor_(new)(state); THCTensor *input_section = THCTensor_(new)(state); // for each row in the input for (long i = 0; i < n_rows; i++) { THCTensor_(select)(state, row, output, 1, i); lua_rawgeti(L, indexes_index, i+1); const int local_indexes_index = lua_gettop(L); lua_rawgeti(L, mapped_lengths_index, i+1); const int local_mapped_lengths_index = lua_gettop(L); long n_entries_in_row = lua_objlen(L, -1); long start = 0; // for each sample placed in that row for (long j = 0; j < n_entries_in_row; j++) { lua_rawgeti(L, local_indexes_index, j+1); lua_rawgeti(L, local_mapped_lengths_index, j+1); long index = lua_tointeger(L, -2); long length = lua_tointeger(L, -1); lua_pop(L, 2); // copies the data from the input THCTensor_(select)(state, section, row, 0, start + length-1); THCTensor_(select)(state, input_section, input, 0, index-1); THCTensor_(copy)(state, section, input_section); start += length + 1; } lua_pop(L, 2); } THCTensor_(free)(state, row); THCTensor_(free)(state, section); THCTensor_(free)(state, input_section); THLongStorage_free(output_size); return 0; } static const struct luaL_Reg nn_(VariableLength__) [] = { {"VariableLength_FromSamples", nn_(from_samples_to_structured)}, {"VariableLength_ToSamples", nn_(from_structured_to_samples)}, {"VariableLength_ToFinal", nn_(from_structured_to_final)}, {"VariableLength_FromFinal", nn_(from_final_to_structured)}, {NULL, NULL} }; static void nn_(VariableLength_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, nn_(VariableLength__), "nn"); lua_pop(L,1); } #undef _REAL #endif
the_stack
* \test Tests the correct handling of self-assignments. **/ // // *** System // #include <iostream> // // *** ViennaCL // //#define VIENNACL_DEBUG_ALL #include "viennacl/scalar.hpp" #include "viennacl/vector.hpp" #include "viennacl/vector_proxy.hpp" #include "viennacl/matrix.hpp" #include "viennacl/matrix_proxy.hpp" #include "viennacl/compressed_matrix.hpp" #include "viennacl/compressed_compressed_matrix.hpp" #include "viennacl/coordinate_matrix.hpp" #include "viennacl/ell_matrix.hpp" #include "viennacl/sliced_ell_matrix.hpp" #include "viennacl/hyb_matrix.hpp" #include "viennacl/linalg/prod.hpp" #include "viennacl/linalg/norm_2.hpp" #include "viennacl/linalg/ilu.hpp" #include "viennacl/linalg/detail/ilu/common.hpp" #include "viennacl/io/matrix_market.hpp" #include "viennacl/tools/random.hpp" // // ------------------------------------------------------------- // template<typename NumericT> NumericT diff(NumericT const & s1, viennacl::scalar<NumericT> const & s2) { if (std::fabs(s1 - s2) > 0) return (s1 - s2) / std::max(std::fabs(s1), std::fabs(s2)); return 0; } template<typename NumericT> NumericT diff(std::vector<NumericT> const & v1, viennacl::vector<NumericT> const & v2) { std::vector<NumericT> v2_cpu(v2.size()); viennacl::backend::finish(); viennacl::copy(v2.begin(), v2.end(), v2_cpu.begin()); for (std::size_t i=0;i<v1.size(); ++i) { if ( std::max( std::fabs(v2_cpu[i]), std::fabs(v1[i]) ) > 0 ) v2_cpu[i] = std::fabs(v2_cpu[i] - v1[i]) / std::max( std::fabs(v2_cpu[i]), std::fabs(v1[i]) ); else v2_cpu[i] = 0.0; if (v2_cpu[i] > 0.0001) { //std::cout << "Neighbor: " << i-1 << ": " << v1[i-1] << " vs. " << v2_cpu[i-1] << std::endl; std::cout << "Error at entry " << i << ": " << v1[i] << " vs. " << v2[i] << std::endl; //std::cout << "Neighbor: " << i+1 << ": " << v1[i+1] << " vs. " << v2_cpu[i+1] << std::endl; exit(EXIT_FAILURE); } } NumericT inf_norm = 0; for (std::size_t i=0;i<v2_cpu.size(); ++i) inf_norm = std::max<NumericT>(inf_norm, std::fabs(v2_cpu[i])); return inf_norm; } template<typename NumericT> NumericT diff(std::vector<std::vector<NumericT> > const & A1, viennacl::matrix<NumericT> const & A2) { std::vector<NumericT> host_values(A2.internal_size()); for (std::size_t i=0; i<A2.size1(); ++i) for (std::size_t j=0; j<A2.size2(); ++j) host_values[i*A2.internal_size2() + j] = A1[i][j]; std::vector<NumericT> device_values(A2.internal_size()); viennacl::fast_copy(A2, &device_values[0]); viennacl::vector<NumericT> vcl_device_values(A2.internal_size()); // workaround to avoid code duplication viennacl::copy(device_values, vcl_device_values); return diff(host_values, vcl_device_values); } template<typename HostContainerT, typename DeviceContainerT, typename NumericT> void check(HostContainerT const & host_container, DeviceContainerT const & device_container, std::string current_stage, NumericT epsilon) { current_stage.resize(25, ' '); std::cout << "Testing operation: " << current_stage; NumericT rel_error = std::fabs(diff(host_container, device_container)); if (rel_error > epsilon) { std::cout << std::endl; std::cout << "# Error at operation: " << current_stage << std::endl; std::cout << " diff: " << rel_error << std::endl; exit(EXIT_FAILURE); } std::cout << "PASS" << std::endl; } struct op_assign { template<typename LHS, typename RHS> static void apply(LHS & lhs, RHS const & rhs) { lhs = rhs; } static std::string str() { return "="; } }; struct op_plus_assign { template<typename LHS, typename RHS> static void apply(LHS & lhs, RHS const & rhs) { lhs += rhs; } static std::string str() { return "+="; } }; struct op_minus_assign { template<typename LHS, typename RHS> static void apply(LHS & lhs, RHS const & rhs) { lhs -= rhs; } static std::string str() { return "-="; } }; // compute C = A * B on host and device and compare results. // Note that the reference uses three distinct matrices A, B, C, // whereas C on the device is the same as either A, B, or both. template<typename OpT, typename NumericT, typename HostMatrixT, typename DeviceMatrixT> void test_gemm(NumericT epsilon, HostMatrixT & host_A, HostMatrixT & host_B, HostMatrixT & host_C, DeviceMatrixT & device_A, std::string name_A, DeviceMatrixT & device_B, std::string name_B, DeviceMatrixT & device_C, bool copy_from_A, bool trans_first, bool trans_second) { viennacl::tools::uniform_random_numbers<NumericT> randomNumber; for (std::size_t i = 0; i<host_A.size(); ++i) for (std::size_t j = 0; j<host_A[i].size(); ++j) { host_A[i][j] = randomNumber(); host_B[i][j] = randomNumber(); } viennacl::copy(host_A, device_A); viennacl::copy(host_B, device_B); if (copy_from_A) host_C = host_A; else host_C = host_B; for (std::size_t i = 0; i<host_A.size(); ++i) for (std::size_t j = 0; j<host_A[i].size(); ++j) { NumericT tmp = 0; for (std::size_t k = 0; k<host_A[i].size(); ++k) tmp += (trans_first ? host_A[k][i] : host_A[i][k]) * (trans_second ? host_B[j][k] : host_B[k][j]); OpT::apply(host_C[i][j], tmp); } if (trans_first && trans_second) { OpT::apply(device_C, viennacl::linalg::prod(trans(device_A), trans(device_B))); check(host_C, device_C, std::string("A ") + OpT::str() + std::string(" ") + name_A + std::string("^T*") + name_B + std::string("^T"), epsilon); } else if (trans_first && !trans_second) { OpT::apply(device_C, viennacl::linalg::prod(trans(device_A), device_B)); check(host_C, device_C, std::string("A ") + OpT::str() + std::string(" ") + name_A + std::string("^T*") + name_B + std::string(""), epsilon); } else if (!trans_first && trans_second) { OpT::apply(device_C, viennacl::linalg::prod(device_A, trans(device_B))); check(host_C, device_C, std::string("A ") + OpT::str() + std::string(" ") + name_A + std::string("*") + name_B + std::string("^T"), epsilon); } else { OpT::apply(device_C, viennacl::linalg::prod(device_A, device_B)); check(host_C, device_C, std::string("A ") + OpT::str() + std::string(" ") + name_A + std::string("*") + name_B + std::string(""), epsilon); } } // dispatch routine for all combinations of transpositions: // C = A * B, C = A * B^T, C = A^T * B, C = A^T * B^T template<typename OpT, typename NumericT, typename HostMatrixT, typename DeviceMatrixT> void test_gemm(NumericT epsilon, HostMatrixT & host_A, HostMatrixT & host_B, HostMatrixT & host_C, DeviceMatrixT & device_A, std::string name_A, DeviceMatrixT & device_B, std::string name_B, DeviceMatrixT & device_C, bool copy_from_A) { test_gemm<OpT>(epsilon, host_A, host_B, host_C, device_A, name_A, device_B, name_B, device_C, copy_from_A, false, false); test_gemm<OpT>(epsilon, host_A, host_B, host_C, device_A, name_A, device_B, name_B, device_C, copy_from_A, false, true); test_gemm<OpT>(epsilon, host_A, host_B, host_C, device_A, name_A, device_B, name_B, device_C, copy_from_A, true, false); test_gemm<OpT>(epsilon, host_A, host_B, host_C, device_A, name_A, device_B, name_B, device_C, copy_from_A, true, true); } // The actual testing routine. // Sets of vectors and matrices using STL types and uses these for reference calculations. // ViennaCL operations are carried out as usual and then compared against the reference. template<typename NumericT> int test(NumericT epsilon) { std::size_t N = 142; // should be larger than 128 in order to avoid false negatives due to blocking viennacl::tools::uniform_random_numbers<NumericT> randomNumber; // // Vector setup and test: // std::vector<NumericT> std_x(N); std::vector<NumericT> std_y(N); std::vector<NumericT> std_z(N); for (std::size_t i=0; i<std_x.size(); ++i) std_x[i] = NumericT(i + 1); for (std::size_t i=0; i<std_y.size(); ++i) std_y[i] = NumericT(i*i + 1); for (std::size_t i=0; i<std_z.size(); ++i) std_z[i] = NumericT(2 * i + 1); viennacl::vector<NumericT> vcl_x; viennacl::vector<NumericT> vcl_y; viennacl::vector<NumericT> vcl_z; viennacl::copy(std_x, vcl_x); viennacl::copy(std_y, vcl_y); viennacl::copy(std_z, vcl_z); // This shouldn't do anything bad: vcl_x = vcl_x; check(std_x, vcl_x, "x = x", epsilon); // This should work, even though we are dealing with the same buffer: std_x[0] = std_x[2]; std_x[1] = std_x[3]; viennacl::project(vcl_x, viennacl::range(0, 2)) = viennacl::project(vcl_x, viennacl::range(2, 4)); check(std_x, vcl_x, "x = x (range)", epsilon); // // Matrix-Vector // std::vector<std::vector<NumericT> > std_A(N, std::vector<NumericT>(N, NumericT(1))); std::vector<std::vector<NumericT> > std_B(N, std::vector<NumericT>(N, NumericT(2))); std::vector<std::vector<NumericT> > std_C(N, std::vector<NumericT>(N, NumericT(3))); viennacl::matrix<NumericT> vcl_A; viennacl::matrix<NumericT> vcl_B; viennacl::matrix<NumericT> vcl_C; viennacl::copy(std_A, vcl_A); viennacl::copy(std_B, vcl_B); viennacl::copy(std_C, vcl_C); // This shouldn't do anything bad: vcl_A = vcl_A; check(std_A, vcl_A, "A = A", epsilon); // This should work, even though we are dealing with the same buffer: std_A[0][0] = std_A[0][2]; std_A[0][1] = std_A[0][3]; viennacl::project(vcl_A, viennacl::range(0, 1), viennacl::range(0, 2)) = viennacl::project(vcl_A, viennacl::range(0, 1), viennacl::range(2, 4)); check(std_A, vcl_A, "A = A (range)", epsilon); // check x <- A * x; for (std::size_t i = 0; i<std_y.size(); ++i) { NumericT val = 0; for (std::size_t j = 0; j<std_x.size(); ++j) val += std_A[i][j] * std_x[j]; std_y[i] = val; } vcl_x = viennacl::linalg::prod(vcl_A, vcl_x); check(std_y, vcl_x, "x = A*x", epsilon); typedef unsigned int KeyType; std::vector< std::map<KeyType, NumericT> > std_Asparse(N); for (std::size_t i=0; i<std_Asparse.size(); ++i) { if (i > 0) std_Asparse[i][KeyType(i-1)] = randomNumber(); std_Asparse[i][KeyType(i)] = NumericT(1) + randomNumber(); if (i < std_Asparse.size() - 1) std_Asparse[i][KeyType(i+1)] = randomNumber(); } // Sparse viennacl::compressed_matrix<NumericT> vcl_A_csr; viennacl::coordinate_matrix<NumericT> vcl_A_coo; viennacl::ell_matrix<NumericT> vcl_A_ell; viennacl::sliced_ell_matrix<NumericT> vcl_A_sell; viennacl::hyb_matrix<NumericT> vcl_A_hyb; viennacl::copy(std_Asparse, vcl_A_csr); viennacl::copy(std_Asparse, vcl_A_coo); viennacl::copy(std_Asparse, vcl_A_ell); viennacl::copy(std_Asparse, vcl_A_sell); viennacl::copy(std_Asparse, vcl_A_hyb); for (std::size_t i=0; i<std_Asparse.size(); ++i) { NumericT val = 0; for (typename std::map<unsigned int, NumericT>::const_iterator it = std_Asparse[i].begin(); it != std_Asparse[i].end(); ++it) val += it->second * std_x[it->first]; std_y[i] = val; } viennacl::copy(std_x, vcl_x); vcl_x = viennacl::linalg::prod(vcl_A_csr, vcl_x); check(std_y, vcl_x, "x = A*x (sparse, csr)", epsilon); viennacl::copy(std_x, vcl_x); vcl_x = viennacl::linalg::prod(vcl_A_coo, vcl_x); check(std_y, vcl_x, "x = A*x (sparse, coo)", epsilon); viennacl::copy(std_x, vcl_x); vcl_x = viennacl::linalg::prod(vcl_A_ell, vcl_x); check(std_y, vcl_x, "x = A*x (sparse, ell)", epsilon); viennacl::copy(std_x, vcl_x); vcl_x = viennacl::linalg::prod(vcl_A_sell, vcl_x); check(std_y, vcl_x, "x = A*x (sparse, sell)", epsilon); viennacl::copy(std_x, vcl_x); vcl_x = viennacl::linalg::prod(vcl_A_hyb, vcl_x); check(std_y, vcl_x, "x = A*x (sparse, hyb)", epsilon); std::cout << std::endl; // // Matrix-Matrix (dense times dense): // test_gemm<op_assign>(epsilon, std_A, std_B, std_C, vcl_A, "A", vcl_B, "B", vcl_A, true); test_gemm<op_assign>(epsilon, std_B, std_A, std_C, vcl_B, "B", vcl_A, "A", vcl_A, false); test_gemm<op_assign>(epsilon, std_A, std_A, std_C, vcl_A, "A", vcl_A, "A", vcl_A, true); std::cout << std::endl; test_gemm<op_plus_assign>(epsilon, std_A, std_B, std_C, vcl_A, "A", vcl_B, "B", vcl_A, true); test_gemm<op_plus_assign>(epsilon, std_B, std_A, std_C, vcl_B, "B", vcl_A, "A", vcl_A, false); test_gemm<op_plus_assign>(epsilon, std_A, std_A, std_C, vcl_A, "A", vcl_A, "A", vcl_A, true); std::cout << std::endl; test_gemm<op_minus_assign>(epsilon, std_A, std_B, std_C, vcl_A, "A", vcl_B, "B", vcl_A, true); test_gemm<op_minus_assign>(epsilon, std_B, std_A, std_C, vcl_B, "B", vcl_A, "A", vcl_A, false); test_gemm<op_minus_assign>(epsilon, std_A, std_A, std_C, vcl_A, "A", vcl_A, "A", vcl_A, true); std::cout << std::endl; // // Matrix-Matrix (sparse times dense) // // A = sparse * A viennacl::copy(std_A, vcl_A); for (std::size_t i = 0; i<std_A.size(); ++i) for (std::size_t j = 0; j<std_A[i].size(); ++j) { NumericT tmp = 0; for (std::size_t k = 0; k<std_A[i].size(); ++k) tmp += std_Asparse[i][KeyType(k)] * std_A[k][j]; std_C[i][j] = tmp; } viennacl::copy(std_A, vcl_A); vcl_A = viennacl::linalg::prod(vcl_A_csr, vcl_A); check(std_C, vcl_A, "A = csr*A", epsilon); viennacl::copy(std_A, vcl_A); vcl_A = viennacl::linalg::prod(vcl_A_coo, vcl_A); check(std_C, vcl_A, "A = coo*A", epsilon); viennacl::copy(std_A, vcl_A); vcl_A = viennacl::linalg::prod(vcl_A_ell, vcl_A); check(std_C, vcl_A, "A = ell*A", epsilon); viennacl::copy(std_A, vcl_A); //vcl_A = viennacl::linalg::prod(vcl_A_sell, vcl_A); //check(std_C, vcl_A, "A = sell*A", epsilon); viennacl::copy(std_A, vcl_A); vcl_A = viennacl::linalg::prod(vcl_A_hyb, vcl_A); check(std_C, vcl_A, "A = hyb*A", epsilon); // A = sparse * A^T viennacl::copy(std_A, vcl_A); for (std::size_t i = 0; i<std_A.size(); ++i) for (std::size_t j = 0; j<std_A[i].size(); ++j) { NumericT tmp = 0; for (std::size_t k = 0; k<std_A[i].size(); ++k) tmp += std_Asparse[i][KeyType(k)] * std_A[j][k]; std_C[i][j] = tmp; } viennacl::copy(std_A, vcl_A); vcl_A = viennacl::linalg::prod(vcl_A_csr, trans(vcl_A)); check(std_C, vcl_A, "A = csr*A^T", epsilon); viennacl::copy(std_A, vcl_A); vcl_A = viennacl::linalg::prod(vcl_A_coo, trans(vcl_A)); check(std_C, vcl_A, "A = coo*A^T", epsilon); viennacl::copy(std_A, vcl_A); vcl_A = viennacl::linalg::prod(vcl_A_ell, trans(vcl_A)); check(std_C, vcl_A, "A = ell*A^T", epsilon); viennacl::copy(std_A, vcl_A); //vcl_A = viennacl::linalg::prod(vcl_A_sell, trans(vcl_A)); //check(std_C, vcl_A, "A = sell*A^T", epsilon); viennacl::copy(std_A, vcl_A); vcl_A = viennacl::linalg::prod(vcl_A_hyb, trans(vcl_A)); check(std_C, vcl_A, "A = hyb*A^T", epsilon); return EXIT_SUCCESS; } // // ------------------------------------------------------------- // int main() { std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << "## Test :: Self-Assignment" << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; int retval = EXIT_SUCCESS; std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; { typedef float NumericT; NumericT epsilon = static_cast<NumericT>(1E-4); std::cout << "# Testing setup:" << std::endl; std::cout << " eps: " << epsilon << std::endl; std::cout << " numeric: float" << std::endl; retval = test<NumericT>(epsilon); if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl; else return retval; } std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; // Note: No need for double precision check, self-assignments are handled in a numeric-type agnostic manner. std::cout << std::endl; std::cout << "------- Test completed --------" << std::endl; std::cout << std::endl; return retval; }
the_stack
See COPYRIGHT.TXT and LICENSE.TXT for copyright and license information ----------------------------------------------------------------------- */ /***************** * rtk #includes * *****************/ #include "rtkCudaUtilities.hcu" #include "rtkConfiguration.h" #include "rtkCudaWarpBackProjectionImageFilter.hcu" /***************** * C #includes * *****************/ #include <cstdio> #include <cstdlib> #include <cstring> #include <cmath> /***************** * CUDA #includes * *****************/ #include <cuda.h> #include <cublas_v2.h> #include <cuda_runtime.h> // T E X T U R E S //////////////////////////////////////////////////////// texture<float, cudaTextureType2DLayered> tex_proj; texture<float, 3, cudaReadModeElementType> tex_proj_3D; texture<float, 3, cudaReadModeElementType> tex_xdvf; texture<float, 3, cudaReadModeElementType> tex_ydvf; texture<float, 3, cudaReadModeElementType> tex_zdvf; /////////////////////////////////////////////////////////////////////////// // CONSTANTS ////////////////////////////////////////////////////////////// __constant__ float c_matrices[SLAB_SIZE * 12]; // Can process stacks of at most SLAB_SIZE projections __constant__ float c_volIndexToProjPP[SLAB_SIZE * 12]; __constant__ float c_projPPToProjIndex[9]; __constant__ int3 c_projSize; __constant__ int3 c_volSize; __constant__ float c_IndexInputToIndexDVFMatrix[12]; __constant__ float c_PPInputToIndexInputMatrix[12]; __constant__ float c_IndexInputToPPInputMatrix[12]; //////////////////////////////////////////////////////////////////////////// //_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_ // K E R N E L S -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_ //_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_( S T A R T )_ //_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_ __global__ void kernel_warp_back_project(float * dev_vol_in, float * dev_vol_out, unsigned int Blocks_Y) { // CUDA 2.0 does not allow for a 3D grid, which severely // limits the manipulation of large 3D arrays of data. The // following code is a hack to bypass this implementation // limitation. unsigned int blockIdx_z = blockIdx.y / Blocks_Y; unsigned int blockIdx_y = blockIdx.y - __umul24(blockIdx_z, Blocks_Y); unsigned int i = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; unsigned int j = __umul24(blockIdx_y, blockDim.y) + threadIdx.y; unsigned int k = __umul24(blockIdx_z, blockDim.z) + threadIdx.z; if (i >= c_volSize.x || j >= c_volSize.y || k >= c_volSize.z) { return; } // Index row major into the volume long int vol_idx = i + (j + k * c_volSize.y) * (c_volSize.x); float3 IndexInDVF, Displacement, PP, IndexInInput, ip; float voxel_data = 0; for (unsigned int proj = 0; proj < c_projSize.z; proj++) { // Compute the index in the DVF IndexInDVF = matrix_multiply(make_float3(i, j, k), c_IndexInputToIndexDVFMatrix); // Get each component of the displacement vector by interpolation in the DVF Displacement.x = tex3D(tex_xdvf, IndexInDVF.x + 0.5f, IndexInDVF.y + 0.5f, IndexInDVF.z + 0.5f); Displacement.y = tex3D(tex_ydvf, IndexInDVF.x + 0.5f, IndexInDVF.y + 0.5f, IndexInDVF.z + 0.5f); Displacement.z = tex3D(tex_zdvf, IndexInDVF.x + 0.5f, IndexInDVF.y + 0.5f, IndexInDVF.z + 0.5f); // Compute the physical point in input + the displacement vector PP = matrix_multiply(make_float3(i, j, k), c_IndexInputToPPInputMatrix) + Displacement; // Convert it to a continuous index IndexInInput = matrix_multiply(PP, c_PPInputToIndexInputMatrix); // Project the voxel onto the detector to find out which value to add to it ip = matrix_multiply(IndexInInput, &(c_matrices[12 * proj])); ; // Change coordinate systems ip.z = 1 / ip.z; ip.x = ip.x * ip.z; ip.y = ip.y * ip.z; // Get texture point, clip left to GPU voxel_data += tex3D(tex_proj_3D, ip.x, ip.y, proj + 0.5); } // Place it into the volume dev_vol_out[vol_idx] = dev_vol_in[vol_idx] + voxel_data; } __global__ void kernel_warp_back_project_cylindrical_detector(float * dev_vol_in, float * dev_vol_out, unsigned int Blocks_Y, float radius) { // CUDA 2.0 does not allow for a 3D grid, which severely // limits the manipulation of large 3D arrays of data. The // following code is a hack to bypass this implementation // limitation. unsigned int blockIdx_z = blockIdx.y / Blocks_Y; unsigned int blockIdx_y = blockIdx.y - __umul24(blockIdx_z, Blocks_Y); unsigned int i = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; unsigned int j = __umul24(blockIdx_y, blockDim.y) + threadIdx.y; unsigned int k = __umul24(blockIdx_z, blockDim.z) + threadIdx.z; if (i >= c_volSize.x || j >= c_volSize.y || k >= c_volSize.z) { return; } // Index row major into the volume long int vol_idx = i + (j + k * c_volSize.y) * (c_volSize.x); float3 IndexInDVF, Displacement, PP, IndexInInput, ip, pp; float voxel_data = 0; for (unsigned int proj = 0; proj < c_projSize.z; proj++) { // Compute the index in the DVF IndexInDVF = matrix_multiply(make_float3(i, j, k), c_IndexInputToIndexDVFMatrix); // Get each component of the displacement vector by interpolation in the DVF Displacement.x = tex3D(tex_xdvf, IndexInDVF.x + 0.5f, IndexInDVF.y + 0.5f, IndexInDVF.z + 0.5f); Displacement.y = tex3D(tex_ydvf, IndexInDVF.x + 0.5f, IndexInDVF.y + 0.5f, IndexInDVF.z + 0.5f); Displacement.z = tex3D(tex_zdvf, IndexInDVF.x + 0.5f, IndexInDVF.y + 0.5f, IndexInDVF.z + 0.5f); // Compute the physical point in input + the displacement vector PP = matrix_multiply(make_float3(i, j, k), c_IndexInputToPPInputMatrix) + Displacement; // Convert it to a continuous index IndexInInput = matrix_multiply(PP, c_PPInputToIndexInputMatrix); // Project the voxel onto the detector to find out which value to add to it pp = matrix_multiply(IndexInInput, &(c_volIndexToProjPP[12 * proj])); // Change coordinate systems pp.z = 1 / pp.z; pp.x = pp.x * pp.z; pp.y = pp.y * pp.z; // Apply correction for cylindrical detector const float u = pp.x; pp.x = radius * atan2(u, radius); pp.y = pp.y * radius / sqrt(radius * radius + u * u); // Get projection index ip.x = c_projPPToProjIndex[0] * pp.x + c_projPPToProjIndex[1] * pp.y + c_projPPToProjIndex[2]; ip.y = c_projPPToProjIndex[3] * pp.x + c_projPPToProjIndex[4] * pp.y + c_projPPToProjIndex[5]; // Get texture point, clip left to GPU voxel_data += tex3D(tex_proj_3D, ip.x, ip.y, proj + 0.5); } // Place it into the volume dev_vol_out[vol_idx] = dev_vol_in[vol_idx] + voxel_data; } __global__ void kernel_warp_back_project_3Dgrid(float * dev_vol_in, float * dev_vol_out) { unsigned int i = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; unsigned int j = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; unsigned int k = __umul24(blockIdx.z, blockDim.z) + threadIdx.z; if (i >= c_volSize.x || j >= c_volSize.y || k >= c_volSize.z) { return; } // Index row major into the volume long int vol_idx = i + (j + k * c_volSize.y) * (c_volSize.x); float3 IndexInDVF, Displacement, PP, IndexInInput, ip; float voxel_data = 0; for (unsigned int proj = 0; proj < c_projSize.z; proj++) { // Compute the index in the DVF IndexInDVF = matrix_multiply(make_float3(i, j, k), c_IndexInputToIndexDVFMatrix); // Get each component of the displacement vector by interpolation in the DVF Displacement.x = tex3D(tex_xdvf, IndexInDVF.x + 0.5f, IndexInDVF.y + 0.5f, IndexInDVF.z + 0.5f); Displacement.y = tex3D(tex_ydvf, IndexInDVF.x + 0.5f, IndexInDVF.y + 0.5f, IndexInDVF.z + 0.5f); Displacement.z = tex3D(tex_zdvf, IndexInDVF.x + 0.5f, IndexInDVF.y + 0.5f, IndexInDVF.z + 0.5f); // Compute the physical point in input + the displacement vector PP = matrix_multiply(make_float3(i, j, k), c_IndexInputToPPInputMatrix) + Displacement; // Convert it to a continuous index IndexInInput = matrix_multiply(PP, c_PPInputToIndexInputMatrix); // Project the voxel onto the detector to find out which value to add to it ip = matrix_multiply(IndexInInput, &(c_matrices[12 * proj])); ; // Change coordinate systems ip.z = 1 / ip.z; ip.x = ip.x * ip.z; ip.y = ip.y * ip.z; // Get texture point, clip left to GPU voxel_data += tex2DLayered(tex_proj, ip.x, ip.y, proj); } // Place it into the volume dev_vol_out[vol_idx] = dev_vol_in[vol_idx] + voxel_data; } __global__ void kernel_warp_back_project_3Dgrid_cylindrical_detector(float * dev_vol_in, float * dev_vol_out, float radius) { unsigned int i = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; unsigned int j = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; unsigned int k = __umul24(blockIdx.z, blockDim.z) + threadIdx.z; if (i >= c_volSize.x || j >= c_volSize.y || k >= c_volSize.z) { return; } // Index row major into the volume long int vol_idx = i + (j + k * c_volSize.y) * (c_volSize.x); float3 IndexInDVF, Displacement, PP, IndexInInput, ip, pp; float voxel_data = 0; for (unsigned int proj = 0; proj < c_projSize.z; proj++) { // Compute the index in the DVF IndexInDVF = matrix_multiply(make_float3(i, j, k), c_IndexInputToIndexDVFMatrix); // Get each component of the displacement vector by interpolation in the DVF Displacement.x = tex3D(tex_xdvf, IndexInDVF.x + 0.5f, IndexInDVF.y + 0.5f, IndexInDVF.z + 0.5f); Displacement.y = tex3D(tex_ydvf, IndexInDVF.x + 0.5f, IndexInDVF.y + 0.5f, IndexInDVF.z + 0.5f); Displacement.z = tex3D(tex_zdvf, IndexInDVF.x + 0.5f, IndexInDVF.y + 0.5f, IndexInDVF.z + 0.5f); // Compute the physical point in input + the displacement vector PP = matrix_multiply(make_float3(i, j, k), c_IndexInputToPPInputMatrix) + Displacement; // Convert it to a continuous index IndexInInput = matrix_multiply(PP, c_PPInputToIndexInputMatrix); // Project the voxel onto the detector to find out which value to add to it pp = matrix_multiply(IndexInInput, &(c_volIndexToProjPP[12 * proj])); // Change coordinate systems pp.z = 1 / pp.z; pp.x = pp.x * pp.z; pp.y = pp.y * pp.z; // Apply correction for cylindrical detector const float u = pp.x; pp.x = radius * atan2(u, radius); pp.y = pp.y * radius / sqrt(radius * radius + u * u); // Get projection index ip.x = c_projPPToProjIndex[0] * pp.x + c_projPPToProjIndex[1] * pp.y + c_projPPToProjIndex[2]; ip.y = c_projPPToProjIndex[3] * pp.x + c_projPPToProjIndex[4] * pp.y + c_projPPToProjIndex[5]; // Get texture point, clip left to GPU voxel_data += tex2DLayered(tex_proj, ip.x, ip.y, proj); } // Place it into the volume dev_vol_out[vol_idx] = dev_vol_in[vol_idx] + voxel_data; } //_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_ // K E R N E L S -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_ //_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-( E N D )-_-_ //_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_ /////////////////////////////////////////////////////////////////////////// // FUNCTION: CUDA_back_project ///////////////////////////// void CUDA_warp_back_project(int projSize[3], int volSize[3], int dvf_size[3], float * matrices, float * volIndexToProjPPs, float * projPPToProjIndex, float * dev_vol_in, float * dev_vol_out, float * dev_proj, float * dev_input_dvf, float IndexInputToIndexDVFMatrix[12], float PPInputToIndexInputMatrix[12], float IndexInputToPPInputMatrix[12], double radiusCylindricalDetector) { // Create CUBLAS context cublasHandle_t handle; cublasCreate(&handle); int device; cudaGetDevice(&device); // Copy the size of inputs into constant memory cudaMemcpyToSymbol(c_projSize, projSize, sizeof(int3)); cudaMemcpyToSymbol(c_volSize, volSize, sizeof(int3)); // Copy the projection matrices into constant memory cudaMemcpyToSymbol(c_matrices, &(matrices[0]), 12 * sizeof(float) * projSize[2]); cudaMemcpyToSymbol(c_volIndexToProjPP, &(volIndexToProjPPs[0]), 12 * sizeof(float) * projSize[2]); cudaMemcpyToSymbol(c_projPPToProjIndex, &(projPPToProjIndex[0]), 9 * sizeof(float)); // set texture parameters tex_proj.addressMode[0] = cudaAddressModeBorder; tex_proj.addressMode[1] = cudaAddressModeBorder; tex_proj.addressMode[2] = cudaAddressModeBorder; tex_proj.filterMode = cudaFilterModeLinear; tex_proj.normalized = false; // don't access with normalized texture coords tex_proj_3D.addressMode[0] = cudaAddressModeBorder; tex_proj_3D.addressMode[1] = cudaAddressModeBorder; tex_proj_3D.addressMode[2] = cudaAddressModeBorder; tex_proj_3D.filterMode = cudaFilterModeLinear; tex_proj_3D.normalized = false; // don't access with normalized texture coords // Copy projection data to array, bind the array to the texture cudaExtent projExtent = make_cudaExtent(projSize[0], projSize[1], projSize[2]); cudaArray * array_proj; static cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); CUDA_CHECK_ERROR; // Allocate array for input projections, in order to bind them to // either a 2D layered texture (requires GetCudaComputeCapability >= 2.0) or // a 3D texture if (GetCudaComputeCapability(device).first <= 1) cudaMalloc3DArray((cudaArray **)&array_proj, &channelDesc, projExtent); else cudaMalloc3DArray((cudaArray **)&array_proj, &channelDesc, projExtent, cudaArrayLayered); CUDA_CHECK_ERROR; // Copy data to 3D array cudaMemcpy3DParms copyParams = cudaMemcpy3DParms(); copyParams.srcPtr = make_cudaPitchedPtr(dev_proj, projSize[0] * sizeof(float), projSize[0], projSize[1]); copyParams.dstArray = (cudaArray *)array_proj; copyParams.extent = projExtent; copyParams.kind = cudaMemcpyDeviceToDevice; cudaMemcpy3D(&copyParams); CUDA_CHECK_ERROR; // Extent stuff, will be used for each component extraction cudaExtent dvfExtent = make_cudaExtent(dvf_size[0], dvf_size[1], dvf_size[2]); // Set texture parameters tex_xdvf.addressMode[0] = cudaAddressModeBorder; tex_xdvf.addressMode[1] = cudaAddressModeBorder; tex_xdvf.addressMode[2] = cudaAddressModeBorder; tex_xdvf.filterMode = cudaFilterModeLinear; tex_xdvf.normalized = false; // don't access with normalized texture coords tex_ydvf.addressMode[0] = cudaAddressModeBorder; tex_ydvf.addressMode[1] = cudaAddressModeBorder; tex_ydvf.addressMode[2] = cudaAddressModeBorder; tex_ydvf.filterMode = cudaFilterModeLinear; tex_ydvf.normalized = false; tex_zdvf.addressMode[0] = cudaAddressModeBorder; tex_zdvf.addressMode[1] = cudaAddressModeBorder; tex_zdvf.addressMode[2] = cudaAddressModeBorder; tex_zdvf.filterMode = cudaFilterModeLinear; tex_zdvf.normalized = false; // Allocate an intermediate memory space to extract x, y and z components of the DVF float * DVFcomponent; int numel = dvf_size[0] * dvf_size[1] * dvf_size[2]; cudaMalloc(&DVFcomponent, numel * sizeof(float)); float one = 1.0; // Allocate the arrays used for textures cudaArray ** DVFcomponentArrays = new cudaArray *[3]; CUDA_CHECK_ERROR; // Copy image data to arrays. The tricky part is the make_cudaPitchedPtr. // The best way to understand it is to read // http://stackoverflow.com/questions/16119943/how-and-when-should-i-use-pitched-pointer-with-the-cuda-api for (unsigned int component = 0; component < 3; component++) { // Reset the intermediate memory cudaMemset((void *)DVFcomponent, 0, numel * sizeof(float)); // Fill it with the current component float * pComponent = dev_input_dvf + component; cublasSaxpy(handle, numel, &one, pComponent, 3, DVFcomponent, 1); // Allocate the cudaArray and fill it with the current DVFcomponent cudaMalloc3DArray((cudaArray **)&DVFcomponentArrays[component], &channelDesc, dvfExtent); cudaMemcpy3DParms CopyParams = cudaMemcpy3DParms(); CopyParams.srcPtr = make_cudaPitchedPtr(DVFcomponent, dvf_size[0] * sizeof(float), dvf_size[0], dvf_size[1]); CopyParams.dstArray = (cudaArray *)DVFcomponentArrays[component]; CopyParams.extent = dvfExtent; CopyParams.kind = cudaMemcpyDeviceToDevice; cudaMemcpy3D(&CopyParams); CUDA_CHECK_ERROR; } // Intermediate memory is no longer needed cudaFree(DVFcomponent); // Bind 3D arrays to 3D textures cudaBindTextureToArray(tex_xdvf, (cudaArray *)DVFcomponentArrays[0], channelDesc); cudaBindTextureToArray(tex_ydvf, (cudaArray *)DVFcomponentArrays[1], channelDesc); cudaBindTextureToArray(tex_zdvf, (cudaArray *)DVFcomponentArrays[2], channelDesc); CUDA_CHECK_ERROR; // Copy matrices into constant memory cudaMemcpyToSymbol( c_IndexInputToIndexDVFMatrix, IndexInputToIndexDVFMatrix, 12 * sizeof(float), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol( c_PPInputToIndexInputMatrix, PPInputToIndexInputMatrix, 12 * sizeof(float), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol( c_IndexInputToPPInputMatrix, IndexInputToPPInputMatrix, 12 * sizeof(float), 0, cudaMemcpyHostToDevice); // Thread Block Dimensions constexpr int tBlock_x = 16; constexpr int tBlock_y = 4; constexpr int tBlock_z = 4; // Each element in the volume (each voxel) gets 1 thread unsigned int blocksInX = (volSize[0] - 1) / tBlock_x + 1; unsigned int blocksInY = (volSize[1] - 1) / tBlock_y + 1; unsigned int blocksInZ = (volSize[2] - 1) / tBlock_z + 1; // Run kernels. Note: Projection data is passed via texture memory, // transform matrix is passed via constant memory if (GetCudaComputeCapability(device).first <= 1) { // Compute block and grid sizes dim3 dimGrid = dim3(blocksInX, blocksInY * blocksInZ); dim3 dimBlock = dim3(tBlock_x, tBlock_y, tBlock_z); // Bind the array of projections to a 3D texture cudaBindTextureToArray(tex_proj_3D, (cudaArray *)array_proj, channelDesc); CUDA_CHECK_ERROR; if (radiusCylindricalDetector == 0) kernel_warp_back_project<<<dimGrid, dimBlock>>>(dev_vol_in, dev_vol_out, blocksInY); else kernel_warp_back_project_cylindrical_detector<<<dimGrid, dimBlock>>>( dev_vol_in, dev_vol_out, blocksInY, (float)radiusCylindricalDetector); // Unbind the image and projection matrix textures cudaUnbindTexture(tex_proj_3D); CUDA_CHECK_ERROR; } else { // Compute block and grid sizes dim3 dimGrid = dim3(blocksInX, blocksInY, blocksInZ); dim3 dimBlock = dim3(tBlock_x, tBlock_y, tBlock_z); CUDA_CHECK_ERROR; // Bind the array of projections to a 2D layered texture cudaBindTextureToArray(tex_proj, (cudaArray *)array_proj, channelDesc); CUDA_CHECK_ERROR; // Note: cbi->img is passed via texture memory // Matrices are passed via constant memory //------------------------------------- if (radiusCylindricalDetector == 0) kernel_warp_back_project_3Dgrid<<<dimGrid, dimBlock>>>(dev_vol_in, dev_vol_out); else kernel_warp_back_project_3Dgrid_cylindrical_detector<<<dimGrid, dimBlock>>>( dev_vol_in, dev_vol_out, (float)radiusCylindricalDetector); // Unbind the image and projection matrix textures cudaUnbindTexture(tex_proj); CUDA_CHECK_ERROR; } // Unbind the image and projection matrix textures cudaUnbindTexture(tex_xdvf); cudaUnbindTexture(tex_ydvf); cudaUnbindTexture(tex_zdvf); // Cleanup cudaFreeArray((cudaArray *)DVFcomponentArrays[0]); cudaFreeArray((cudaArray *)DVFcomponentArrays[1]); cudaFreeArray((cudaArray *)DVFcomponentArrays[2]); delete[] DVFcomponentArrays; cudaFreeArray((cudaArray *)array_proj); CUDA_CHECK_ERROR; // Destroy CUBLAS context cublasDestroy(handle); }
the_stack
#include <cstdlib> #include <ctime> #include <cstdio> using namespace akg_reduce; using namespace std; // file to test multi-aggerated values reduce in single thread. // including single-block reduce/multi-block reduce by x/y directions. template <typename T> void CompareResults(T *arr1, T *arr2, int len) { double total_err = 0.0; bool flag = true; for (auto i = 0; i < len; i++) { if (std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransform<double, T>(arr2[i])) > 1e-03) { flag = false; } total_err += std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransform<double, T>(arr2[i])); } if (flag) { printf("[CORRECT] Output is equal to Expected.\n"); } else { printf("[INCORRECT] Output is not equal to Expected\n"); printf("Ouput (show few results):\n"); for (auto i = 0; i < std::min(10, len); i++) { printf("%f ", TypeTransform<double, T>(arr1[i])); } printf("\n"); printf("Expected:\n"); for (auto i = 0; i < std::min(10, len); i++) { printf("%f ", TypeTransform<double, T>(arr2[i])); } printf("\n"); } printf("AVERAGE_ERROR = %f\n", total_err / (double)len); } // Kahan summation for single thread Sum implement. // More info in 'test_kahan.cc' template <typename T> __global__ void ComputeResultAlongXSingleThread(int x_len, int y_len, T *arr, T *output) { for (auto j = 0; j < y_len; j++) { T sum = 0.0; T low_bits = 0.0; T lower_val, cropped_sum; for (auto i = 0; i < x_len; i++) { lower_val = arr[i + j * x_len] - low_bits; cropped_sum = sum + lower_val; low_bits = (cropped_sum - sum) - lower_val; sum = cropped_sum; } output[j] = sum; } } template <typename T> __global__ void ComputeResultAlongYSingleThread(int x_len, int y_len, T *arr, T *output) { for (auto i = 0; i < x_len; i++) { T sum = 0.0; T low_bits = 0.0; T lower_val, cropped_sum; for (auto j = 0; j < y_len; j++) { lower_val = arr[i + j * x_len] - low_bits; cropped_sum = sum + lower_val; low_bits = (cropped_sum - sum) - lower_val; sum = cropped_sum; } output[i] = sum; } } template <typename T, typename ReduceOp> __global__ void ComputeResultAlongXGPUMultiBlock(int x_len, int y_len, T *arr, T *output, int item_per_thread_x, int item_per_thread_y, ReduceOp op) { T T_red_rf[4]; // must be explict 16384 = 4096 * 4 * 1 __shared__ T red_buf[4][1024]; __shared__ T temp_output[4]; // temp storage for output for (int i = 0; i < 4; ++i) { temp_output[i] = (T)0.0; } for (int i = 0; i < item_per_thread_y; ++i) { T_red_rf[i] = 0.0; for (int k = 0; k < item_per_thread_x; ++k) { if (threadIdx.x + k * blockDim.x + blockIdx.x * blockDim.x * item_per_thread_x < x_len && threadIdx.y + i * blockDim.y + blockIdx.y * blockDim.y * item_per_thread_y < y_len) { T_red_rf[i] += arr[threadIdx.x + k * blockDim.x + blockIdx.x * blockDim.x * item_per_thread_x + (threadIdx.y + i * blockDim.y + blockIdx.y * blockDim.y * item_per_thread_y) * x_len]; } } } __syncthreads(); for (int i = 0; i < item_per_thread_y; ++i) { AkgReduce<T, ReduceOp, 1024, REDUCE2D_X>(op, &temp_output[i * blockDim.y + 0], &red_buf[i][0], T_red_rf[i]); } __syncthreads(); if (threadIdx.x == 0) { for (int i = 0; i < item_per_thread_y; ++i) { AkgAtomicReturn<T, ReduceOp>( temp_output[i], &output[blockIdx.y * blockDim.y * item_per_thread_y + i * blockDim.y + threadIdx.y], op); } } } template <typename T, typename ReduceOp> __global__ void ComputeResultAlongYGPUMultiBlock(int x_len, int y_len, T *arr, T *output, int item_per_thread_x, int item_per_thread_y, ReduceOp op, int sharedmem_x) { T T_red_rf[4]; __shared__ T red_buf[4 * 1024]; __shared__ T temp_output[32 * 4]; for (int i = 0; i < 32 * 4; ++i) { temp_output[i] = (T)0.0; } for (int i = 0; i < item_per_thread_x; ++i) { // x is non-reduce-axis T_red_rf[i] = 0.0; for (int k = 0; k < item_per_thread_y; ++k) { // here y is reduce-axis if (threadIdx.x + blockDim.x * i + blockIdx.x * blockDim.x * item_per_thread_x < x_len && threadIdx.y + blockDim.y * k + blockIdx.y * blockDim.y * item_per_thread_y < y_len) { T_red_rf[i] += arr[threadIdx.x + blockDim.x * i + blockIdx.x * blockDim.x * item_per_thread_x + (threadIdx.y + blockDim.y * k + blockIdx.y * blockDim.y * item_per_thread_y) * y_len]; } } } __syncthreads(); for (int i = 0; i < item_per_thread_x; ++i) { AkgReduce<T, ReduceOp, 32, REDUCE2D_Y>(op, &temp_output[i * blockDim.x + threadIdx.x], &red_buf[i * 1024], T_red_rf[i], sharedmem_x); } __syncthreads(); if (threadIdx.y == 0) { for (int i = 0; i < item_per_thread_x; ++i) { AkgAtomicReturn<T, ReduceOp>(temp_output[i * blockDim.x + threadIdx.x], &output[blockIdx.x * blockDim.x * item_per_thread_x + blockDim.x * i + threadIdx.x], op); } } } template <typename T> void TestReduce2DAlongX(int x_len, int y_len, string type_name, bool single_block = true, bool verbose = false) { printf("--- TEST CASE Reduce2DAlongX ---\n X = %d, Y = %d, TYPE = %s\n", x_len, y_len, type_name.c_str()); int input_bytes = x_len * y_len * sizeof(T); int output_bytes = y_len * sizeof(T); T *h_I, *d_I, *h_O, *d_O, *expected_h_O, *expected_d_O; h_I = (T *)malloc(input_bytes); h_O = (T *)malloc(output_bytes); expected_h_O = (T *)malloc(output_bytes); // random initialize srand(time(0)); for (auto i = 0; i < x_len * y_len; i++) { h_I[i] = TypeTransform<T, double>((rand() % 10000000) / 10000000.0); } if (verbose) { printf("[VERBOSE] random Input data:\n"); for (auto j = 0; j < y_len; j++) { for (auto i = 0; i < x_len; i++) { printf("%f ", TypeTransform<double, T>(h_I[i + j * x_len])); } printf("\n"); } } for (auto i = 0; i < y_len; i++) { h_O[i] = TypeTransform<T, double>(0.0); expected_h_O[i] = TypeTransform<T, double>(0.0); } // host to device GetGpuErr(cudaMalloc((void **)&d_I, input_bytes)); GetGpuErr(cudaMemcpy((void *)d_I, (void *)h_I, input_bytes, cudaMemcpyHostToDevice)); GetGpuErr(cudaMalloc((void **)&d_O, output_bytes)); GetGpuErr(cudaMemcpy((void *)d_O, (void *)h_O, output_bytes, cudaMemcpyHostToDevice)); GetGpuErr(cudaMalloc((void **)&expected_d_O, output_bytes)); GetGpuErr(cudaMemcpy((void *)expected_d_O, (void *)expected_h_O, output_bytes, cudaMemcpyHostToDevice)); // compute single thread resutls ComputeResultAlongXSingleThread<T><<<1, 1>>>(x_len, y_len, d_I, expected_d_O); GetGpuErr(cudaMemcpy((void *)expected_h_O, (void *)expected_d_O, output_bytes, cudaMemcpyDeviceToHost)); dim3 gridSize(8, 4096); dim3 blockSize(1024, 1); int item_per_block_x = (x_len - 1) / gridSize.x + 1; int item_per_thread_x = (item_per_block_x - 1) / blockSize.x + 1; int item_per_block_y = (y_len - 1) / gridSize.y + 1; int item_per_thread_y = (item_per_block_y - 1) / blockSize.y + 1; ComputeResultAlongXGPUMultiBlock<T, akg_reduce::SumOp> <<<gridSize, blockSize>>>(x_len, y_len, d_I, d_O, item_per_thread_x, item_per_thread_y, akg_reduce::SumOp()); GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost)); // compare GPU with CPU CompareResults<T>(h_O, expected_h_O, y_len); GetGpuErr(cudaFree(expected_d_O)); GetGpuErr(cudaFree(d_O)); GetGpuErr(cudaFree(d_I)); free(expected_h_O); free(h_O); free(h_I); printf("--- CASE END ---\n\n"); } template <typename T> void TestReduce2DAlongY(int x_len, int y_len, string type_name, bool single_block = true, bool verbose = false) { printf("--- TEST CASE Reduce2DAlongY ---\n X = %d, Y = %d, TYPE = %s\n", x_len, y_len, type_name.c_str()); int input_bytes = x_len * y_len * sizeof(T); int output_bytes = x_len * sizeof(T); T *h_I, *d_I, *h_O, *d_O, *expected_h_O, *expected_d_O; h_I = (T *)malloc(input_bytes); h_O = (T *)malloc(output_bytes); expected_h_O = (T *)malloc(output_bytes); // random initialize srand(time(0)); for (auto i = 0; i < x_len * y_len; i++) { h_I[i] = TypeTransform<T, double>((rand() % 10000000) / 10000000.0); } if (verbose) { printf("[VERBOSE] random Input data:\n"); for (auto j = 0; j < y_len; j++) { for (auto i = 0; i < x_len; i++) { printf("%f ", TypeTransform<double, T>(h_I[i + j * x_len])); } printf("\n"); } } for (auto i = 0; i < x_len; i++) { h_O[i] = TypeTransform<T, double>(0.0); expected_h_O[i] = TypeTransform<T, double>(0.0); } // host to device GetGpuErr(cudaMalloc((void **)&d_I, input_bytes)); GetGpuErr(cudaMemcpy((void *)d_I, (void *)h_I, input_bytes, cudaMemcpyHostToDevice)); GetGpuErr(cudaMalloc((void **)&d_O, output_bytes)); GetGpuErr(cudaMemcpy((void *)d_O, (void *)h_O, output_bytes, cudaMemcpyHostToDevice)); GetGpuErr(cudaMalloc((void **)&expected_d_O, output_bytes)); GetGpuErr(cudaMemcpy((void *)expected_d_O, (void *)expected_h_O, output_bytes, cudaMemcpyHostToDevice)); // compute single thread results ComputeResultAlongYSingleThread<T><<<1, 1>>>(x_len, y_len, d_I, expected_d_O); GetGpuErr(cudaMemcpy((void *)expected_h_O, (void *)expected_d_O, output_bytes, cudaMemcpyDeviceToHost)); dim3 gridSize(128, 128); dim3 blockSize(32, 32); int item_per_block_x = (x_len - 1) / gridSize.x + 1; int item_per_thread_x = (item_per_block_x - 1) / blockSize.x + 1; int item_per_block_y = (y_len - 1) / gridSize.y + 1; int item_per_thread_y = (item_per_block_y - 1) / blockSize.y + 1; int sharedmem_x = 32; ComputeResultAlongYGPUMultiBlock<T, akg_reduce::SumOp><<<gridSize, blockSize>>>( x_len, y_len, d_I, d_O, item_per_thread_x, item_per_thread_y, akg_reduce::SumOp(), sharedmem_x); GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost)); // compare GPU with CPU CompareResults<T>(h_O, expected_h_O, x_len); GetGpuErr(cudaFree(expected_d_O)); GetGpuErr(cudaFree(d_O)); GetGpuErr(cudaFree(d_I)); free(expected_h_O); free(h_O); free(h_I); printf("--- CASE END ---\n\n"); } int main() { // TestReduce2DAlongX<int>(128, 8, "int", true); // TestReduce2DAlongX<half>(128, 8, "half", true); // TestReduce2DAlongX<float>(128, 8, "float", true); // TestReduce2DAlongX<double>(128, 8, "double", true); // TestReduce2DAlongX<int>(128, 8, "int", false); TestReduce2DAlongX<float>(16384, 16384, "float", false); // TestReduce2DAlongX<double>(128, 8, "double", false); // TestReduce2DAlongY<int>(8, 128, "int", true); // TestReduce2DAlongY<half>(8, 128, "half", true); // TestReduce2DAlongY<float>(8, 128, "float", true); // TestReduce2DAlongY<double>(8, 128, "double", true); // TestReduce2DAlongY<int>(8, 128, "int", false); // TestReduce2DAlongY<half>(8, 128, "half", false); TestReduce2DAlongY<float>(16384, 16384, "float", false); // TestReduce2DAlongY<double>(8, 128, "double", false); return 0; }
the_stack
* This is a simple test program to measure the memcopy bandwidth of the GPU. * It can measure device to device copy bandwidth, host to device copy bandwidth * for pageable and pinned memory, and device to host copy bandwidth for pageable * and pinned memory. * * Usage: * ./bandwidthTest [option]... */ // includes #include <shrUtils.h> #include <sdkHelper.h> // helper for shared functions common to CUDA SDK samples #include <shrQATest.h> // This is for automated testing output (--qatest) #include <cuda.h> #include <memory> #include <iostream> #include <cassert> // defines, project #define MEMCOPY_ITERATIONS 10 #define DEFAULT_SIZE ( 32 * ( 1 << 20 ) ) //32 M #define DEFAULT_INCREMENT (1 << 22) //4 M #define CACHE_CLEAR_SIZE (1 << 24) //16 M //shmoo mode defines #define SHMOO_MEMSIZE_MAX (1 << 26) //64 M #define SHMOO_MEMSIZE_START (1 << 10) //1 KB #define SHMOO_INCREMENT_1KB (1 << 10) //1 KB #define SHMOO_INCREMENT_2KB (1 << 11) //2 KB #define SHMOO_INCREMENT_10KB (10 * (1 << 10)) //10KB #define SHMOO_INCREMENT_100KB (100 * (1 << 10)) //100 KB #define SHMOO_INCREMENT_1MB (1 << 20) //1 MB #define SHMOO_INCREMENT_2MB (1 << 21) //2 MB #define SHMOO_INCREMENT_4MB (1 << 22) //4 MB #define SHMOO_LIMIT_20KB (20 * (1 << 10)) //20 KB #define SHMOO_LIMIT_50KB (50 * (1 << 10)) //50 KB #define SHMOO_LIMIT_100KB (100 * (1 << 10)) //100 KB #define SHMOO_LIMIT_1MB (1 << 20) //1 MB #define SHMOO_LIMIT_16MB (1 << 24) //16 MB #define SHMOO_LIMIT_32MB (1 << 25) //32 MB //enums, project enum testMode { QUICK_MODE, RANGE_MODE, SHMOO_MODE }; enum memcpyKind { DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE }; enum printMode { USER_READABLE, CSV }; enum memoryMode { PINNED, PAGEABLE }; // if true, use CPU based timing for everything static bool bDontUseGPUTiming; int *pArgc = NULL; char **pArgv = NULL; //////////////////////////////////////////////////////////////////////////////// // declaration, forward int runTest(const int argc, const char **argv); void testBandwidth( unsigned int start, unsigned int end, unsigned int increment, testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc); float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc); float testDeviceToDeviceTransfer(unsigned int memSize); void printResultsReadable(unsigned int *memSizes, double* bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc); void printResultsCSV(unsigned int *memSizes, double* bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc); void printHelp(void); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors( cudaError err, const char *file, const int line ) { if( cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError( const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // General GPU Device CUDA Initialization int gpuDeviceInit(int devID) { int deviceCount; checkCudaErrors(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n"); exit(-1); } if (devID < 0) devID = 0; if (devID > deviceCount-1) { fprintf(stderr, "\n"); fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount); fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID); fprintf(stderr, "\n"); return -devID; } cudaDeviceProp deviceProp; checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) ); if (deviceProp.major < 1) { fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n"); exit(-1); \ } checkCudaErrors( cudaSetDevice(devID) ); printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name); return devID; } // This function returns the best GPU (with maximum GFLOPS) int gpuGetMaxGflopsDeviceId() { int current_device = 0, sm_per_multiproc = 0; int max_compute_perf = 0, max_perf_device = 0; int device_count = 0, best_SM_arch = 0; cudaDeviceProp deviceProp; cudaGetDeviceCount( &device_count ); // Find the best major SM Architecture GPU device while ( current_device < device_count ) { cudaGetDeviceProperties( &deviceProp, current_device ); if (deviceProp.major > 0 && deviceProp.major < 9999) { best_SM_arch = MAX(best_SM_arch, deviceProp.major); } current_device++; } // Find the best CUDA capable GPU device current_device = 0; while( current_device < device_count ) { cudaGetDeviceProperties( &deviceProp, current_device ); if (deviceProp.major == 9999 && deviceProp.minor == 9999) { sm_per_multiproc = 1; } else { sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor); } int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate; if( compute_perf > max_compute_perf ) { // If we find GPU with SM major > 2, search only these if ( best_SM_arch > 2 ) { // If our device==dest_SM_arch, choose this, or else pass if (deviceProp.major == best_SM_arch) { max_compute_perf = compute_perf; max_perf_device = current_device; } } else { max_compute_perf = compute_perf; max_perf_device = current_device; } } ++current_device; } return max_perf_device; } // Initialization code to find the best CUDA Device int findCudaDevice(int argc, const char **argv) { cudaDeviceProp deviceProp; int devID = 0; // If the command-line has a device number specified, use it if (checkCmdLineFlag(argc, argv, "device")) { devID = getCmdLineArgumentInt(argc, argv, "device="); if (devID < 0) { printf("Invalid command line parameters\n"); exit(-1); } else { devID = gpuDeviceInit(devID); if (devID < 0) { printf("exiting...\n"); shrQAFinishExit(argc, (const char **)argv, QA_FAILED); exit(-1); } } } else { // Otherwise pick the device with highest Gflops/s devID = gpuGetMaxGflopsDeviceId(); checkCudaErrors( cudaSetDevice( devID ) ); checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) ); printf("> Using CUDA device [%d]: %s\n", devID, deviceProp.name); } return devID; } // end of CUDA Helper Functions //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { pArgc = &argc; pArgv = argv; shrQAStart(argc, argv); // set logfile name and start logs shrSetLogFileName ("bandwidthTest.txt"); shrLog("%s Starting...\n\n", argv[0]); int iRetVal = runTest(argc, (const char**)argv); // finish shrQAFinishExit(argc, (const char **)argv, (iRetVal==0 ? QA_PASSED : QA_FAILED)); } /////////////////////////////////////////////////////////////////////////////// //Parse args, run the appropriate tests /////////////////////////////////////////////////////////////////////////////// int runTest(const int argc, const char **argv) { int start = DEFAULT_SIZE; int end = DEFAULT_SIZE; int startDevice = 0; int endDevice = 0; int increment = DEFAULT_INCREMENT; testMode mode = QUICK_MODE; bool htod = false; bool dtoh = false; bool dtod = false; bool wc = false; char *modeStr; char *device = NULL; printMode printmode = USER_READABLE; char *memModeStr = NULL; memoryMode memMode = PAGEABLE; //process command line args if(checkCmdLineFlag( argc, argv, "help")) { printHelp(); return 0; } if(checkCmdLineFlag( argc, argv, "csv")) { printmode = CSV; } if( getCmdLineArgumentString(argc, argv, "memory", &memModeStr) ) { if( strcmp(memModeStr, "pageable") == 0 ) { memMode = PAGEABLE; } else if( strcmp(memModeStr, "pinned") == 0) { memMode = PINNED; } else { shrLog("Invalid memory mode - valid modes are pageable or pinned\n"); shrLog("See --help for more information\n"); return -1000; } } else { //default - pageable memory memMode = PAGEABLE; } if( shrGetCmdLineArgumentstr(argc, argv, "device", &device) ) { int deviceCount; cudaError_t error_id = cudaGetDeviceCount(&deviceCount); if (error_id != cudaSuccess) { shrLog( "cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id) ); shrQAFinishExit(*pArgc, (const char **)pArgv, QA_FAILED); } if( deviceCount == 0 ) { shrLog("!!!!!No devices found!!!!!\n"); return -2000; } if( strcmp (device, "all") == 0 ) { printf ("\n!!!!!Cumulative Bandwidth to be computed from all the devices !!!!!!\n\n"); startDevice = 0; endDevice = deviceCount-1; } else { startDevice = endDevice = atoi(device); if( startDevice >= deviceCount || startDevice < 0) { shrLog("\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n", startDevice,0); startDevice = endDevice = 0; } } } shrLog("Running on...\n\n"); for( int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { cudaDeviceProp deviceProp; cudaError_t error_id = cudaGetDeviceProperties(&deviceProp, currentDevice); if (error_id == cudaSuccess) { shrLog(" Device %d: %s\n", currentDevice, deviceProp.name); } else { shrLog( "cudaGetDeviceProperties returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id) ); shrQAFinishExit(*pArgc, (const char **)pArgv, QA_FAILED); } } if( shrGetCmdLineArgumentstr(argc, argv, "mode", &modeStr) ) { //figure out the mode if( strcmp(modeStr, "quick") == 0 ) { shrLog(" Quick Mode\n\n"); mode = QUICK_MODE; } else if( strcmp(modeStr, "shmoo") == 0 ) { shrLog(" Shmoo Mode\n\n"); mode = SHMOO_MODE; } else if( strcmp(modeStr, "range") == 0 ) { shrLog(" Range Mode\n\n"); mode = RANGE_MODE; } else { shrLog("Invalid mode - valid modes are quick, range, or shmoo\n"); shrLog("See --help for more information\n"); return -3000; } } else { //default mode - quick shrLog(" Quick Mode\n\n"); mode = QUICK_MODE; } if(checkCmdLineFlag( argc, argv, "htod")) htod = true; if(checkCmdLineFlag( argc, argv, "dtoh")) dtoh = true; if(checkCmdLineFlag( argc, argv, "dtod")) dtod = true; #if CUDART_VERSION >= 2020 if(checkCmdLineFlag( argc, argv, "wc")) wc = true; #endif if(checkCmdLineFlag( argc, argv, "cputiming")) bDontUseGPUTiming = true; if( !htod && !dtoh && !dtod ) { //default: All htod = true; dtoh = true; dtod = true; } if( RANGE_MODE == mode ) { if( shrGetCmdLineArgumenti( argc, argv, "start", &start) ) { if( start <= 0 ) { shrLog("Illegal argument - start must be greater than zero\n"); return -4000; } } else { shrLog("Must specify a starting size in range mode\n"); shrLog("See --help for more information\n"); return -5000; } if( shrGetCmdLineArgumenti( argc, argv, "end", &end) ) { if( end <= 0 ) { shrLog("Illegal argument - end must be greater than zero\n"); return -6000; } if( start > end ) { shrLog("Illegal argument - start is greater than end\n"); return -7000; } } else { shrLog("Must specify an end size in range mode.\n"); shrLog("See --help for more information\n"); return -8000; } if( shrGetCmdLineArgumenti( argc, argv, "increment", &increment) ) { if( increment <= 0 ) { shrLog("Illegal argument - increment must be greater than zero\n"); return -9000; } } else { shrLog("Must specify an increment in user mode\n"); shrLog("See --help for more information\n"); return -10000; } } if( htod ) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, HOST_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc); } if( dtoh ) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, DEVICE_TO_HOST, printmode, memMode, startDevice, endDevice, wc); } if( dtod ) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, DEVICE_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc); } // Ensure that we reset all CUDA Devices in question for (int nDevice = startDevice; nDevice < endDevice; nDevice++) { cudaSetDevice(nDevice); cudaDeviceReset(); } return 0; } /////////////////////////////////////////////////////////////////////////////// // Run a bandwidth test /////////////////////////////////////////////////////////////////////////////// void testBandwidth(unsigned int start, unsigned int end, unsigned int increment, testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { switch( mode ) { case QUICK_MODE: testBandwidthQuick( DEFAULT_SIZE, kind, printmode, memMode, startDevice, endDevice, wc ); break; case RANGE_MODE: testBandwidthRange(start, end, increment, kind, printmode, memMode, startDevice, endDevice, wc); break; case SHMOO_MODE: testBandwidthShmoo(kind, printmode, memMode, startDevice, endDevice, wc); break; default: break; } } ////////////////////////////////////////////////////////////////////// // Run a quick mode bandwidth test ////////////////////////////////////////////////////////////////////// void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { testBandwidthRange(size, size, DEFAULT_INCREMENT, kind, printmode, memMode, startDevice, endDevice, wc); } /////////////////////////////////////////////////////////////////////// // Run a range mode bandwidth test ////////////////////////////////////////////////////////////////////// void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { //count the number of copies we're going to run unsigned int count = 1 + ((end - start) / increment); unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) ); double *bandwidths = ( double * ) malloc( count * sizeof(double) ); // Before calculating the cumulative bandwidth, initialize bandwidths array to NULL for (unsigned int i = 0; i < count; i++) bandwidths[i] = 0.0; // Use the device asked by the user for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { cudaSetDevice(currentDevice); //run each of the copies for(unsigned int i = 0; i < count; i++) { memSizes[i] = start + i * increment; switch(kind) { case DEVICE_TO_HOST: bandwidths[i] += testDeviceToHostTransfer( memSizes[i], memMode, wc); break; case HOST_TO_DEVICE: bandwidths[i] += testHostToDeviceTransfer( memSizes[i], memMode, wc); break; case DEVICE_TO_DEVICE: bandwidths[i] += testDeviceToDeviceTransfer( memSizes[i] ); break; } } } // Complete the bandwidth computation on all the devices //print results if(printmode == CSV) { printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } else { printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } //clean up free(memSizes); free(bandwidths); } ////////////////////////////////////////////////////////////////////////////// // Intense shmoo mode - covers a large range of values with varying increments ////////////////////////////////////////////////////////////////////////////// void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { //count the number of copies to make unsigned int count = 1 + (SHMOO_LIMIT_20KB / SHMOO_INCREMENT_1KB) + ((SHMOO_LIMIT_50KB - SHMOO_LIMIT_20KB) / SHMOO_INCREMENT_2KB) + ((SHMOO_LIMIT_100KB - SHMOO_LIMIT_50KB) / SHMOO_INCREMENT_10KB) + ((SHMOO_LIMIT_1MB - SHMOO_LIMIT_100KB) / SHMOO_INCREMENT_100KB) + ((SHMOO_LIMIT_16MB - SHMOO_LIMIT_1MB) / SHMOO_INCREMENT_1MB) + ((SHMOO_LIMIT_32MB - SHMOO_LIMIT_16MB) / SHMOO_INCREMENT_2MB) + ((SHMOO_MEMSIZE_MAX - SHMOO_LIMIT_32MB) / SHMOO_INCREMENT_4MB); unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) ); double *bandwidths = ( double * ) malloc( count * sizeof(double) ); // Before calculating the cumulative bandwidth, initialize bandwidths array to NULL for (unsigned int i = 0; i < count; i++) bandwidths[i] = 0.0; // Use the device asked by the user for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { cudaSetDevice(currentDevice); //Run the shmoo int iteration = 0; unsigned int memSize = 0; while( memSize <= SHMOO_MEMSIZE_MAX ) { if( memSize < SHMOO_LIMIT_20KB ) { memSize += SHMOO_INCREMENT_1KB; } else if( memSize < SHMOO_LIMIT_50KB ) { memSize += SHMOO_INCREMENT_2KB; }else if( memSize < SHMOO_LIMIT_100KB ) { memSize += SHMOO_INCREMENT_10KB; }else if( memSize < SHMOO_LIMIT_1MB ) { memSize += SHMOO_INCREMENT_100KB; }else if( memSize < SHMOO_LIMIT_16MB ) { memSize += SHMOO_INCREMENT_1MB; }else if( memSize < SHMOO_LIMIT_32MB ) { memSize += SHMOO_INCREMENT_2MB; }else { memSize += SHMOO_INCREMENT_4MB; } memSizes[iteration] = memSize; switch(kind) { case DEVICE_TO_HOST: bandwidths[iteration] += testDeviceToHostTransfer( memSizes[iteration], memMode, wc ); break; case HOST_TO_DEVICE: bandwidths[iteration] += testHostToDeviceTransfer( memSizes[iteration], memMode, wc ); break; case DEVICE_TO_DEVICE: bandwidths[iteration] += testDeviceToDeviceTransfer( memSizes[iteration] ); break; } iteration++; shrLog("."); } } // Complete the bandwidth computation on all the devices //print results shrLog("\n"); if( CSV == printmode) { printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } else { printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } //clean up free(memSizes); free(bandwidths); } /////////////////////////////////////////////////////////////////////////////// // test the bandwidth of a device to host memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc) { StopWatchInterface *timer = NULL; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; unsigned char *h_idata = NULL; unsigned char *h_odata = NULL; cudaEvent_t start, stop; sdkCreateTimer( &timer ); checkCudaErrors( cudaEventCreate( &start ) ); checkCudaErrors( cudaEventCreate( &stop ) ); //allocate host memory if( PINNED == memMode ) { //pinned memory mode - use special function to get OS-pinned memory #if CUDART_VERSION >= 2020 checkCudaErrors( cudaHostAlloc( (void**)&h_idata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) ); checkCudaErrors( cudaHostAlloc( (void**)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) ); #else checkCudaErrors( cudaMallocHost( (void**)&h_idata, memSize ) ); checkCudaErrors( cudaMallocHost( (void**)&h_odata, memSize ) ); #endif } else { //pageable memory mode - use malloc h_idata = (unsigned char *)malloc( memSize ); h_odata = (unsigned char *)malloc( memSize ); } //initialize the memory for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_idata[i] = (unsigned char) (i & 0xff); } // allocate device memory unsigned char* d_idata; checkCudaErrors( cudaMalloc( (void**) &d_idata, memSize)); //initialize the device memory checkCudaErrors( cudaMemcpy( d_idata, h_idata, memSize, cudaMemcpyHostToDevice) ); //copy data from GPU to Host sdkStartTimer( &timer ); checkCudaErrors( cudaEventRecord( start, 0 ) ); if( PINNED == memMode ) { for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ ) { checkCudaErrors( cudaMemcpyAsync( h_odata, d_idata, memSize, cudaMemcpyDeviceToHost, 0) ); } } else { for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ ) { checkCudaErrors( cudaMemcpy( h_odata, d_idata, memSize, cudaMemcpyDeviceToHost) ); } } checkCudaErrors( cudaEventRecord( stop, 0 ) ); // make sure GPU has finished copying checkCudaErrors( cudaDeviceSynchronize() ); //get the the total elapsed time in ms sdkStopTimer( &timer ); checkCudaErrors( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) ); if( PINNED != memMode || bDontUseGPUTiming ) { elapsedTimeInMs = sdkGetTimerValue( &timer ); } //calculate bandwidth in MB/s bandwidthInMBs = (1e3f * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory checkCudaErrors( cudaEventDestroy(stop) ); checkCudaErrors( cudaEventDestroy(start) ); sdkDeleteTimer( &timer ); if( PINNED == memMode ) { checkCudaErrors( cudaFreeHost(h_idata) ); checkCudaErrors( cudaFreeHost(h_odata) ); } else { free(h_idata); free(h_odata); } checkCudaErrors(cudaFree(d_idata)); return bandwidthInMBs; } /////////////////////////////////////////////////////////////////////////////// //! test the bandwidth of a host to device memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc) { StopWatchInterface *timer = NULL; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; cudaEvent_t start, stop; sdkCreateTimer( &timer ); checkCudaErrors( cudaEventCreate( &start ) ); checkCudaErrors( cudaEventCreate( &stop ) ); //allocate host memory unsigned char *h_odata = NULL; if( PINNED == memMode ) { #if CUDART_VERSION >= 2020 //pinned memory mode - use special function to get OS-pinned memory checkCudaErrors( cudaHostAlloc( (void**)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) ); #else //pinned memory mode - use special function to get OS-pinned memory checkCudaErrors( cudaMallocHost( (void**)&h_odata, memSize ) ); #endif } else { //pageable memory mode - use malloc h_odata = (unsigned char *)malloc( memSize ); } unsigned char *h_cacheClear1 = (unsigned char *)malloc( CACHE_CLEAR_SIZE ); unsigned char *h_cacheClear2 = (unsigned char *)malloc( CACHE_CLEAR_SIZE ); //initialize the memory for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_odata[i] = (unsigned char) (i & 0xff); } for(unsigned int i = 0; i < CACHE_CLEAR_SIZE / sizeof(unsigned char); i++) { h_cacheClear1[i] = (unsigned char) (i & 0xff); h_cacheClear2[i] = (unsigned char) (0xff - (i & 0xff)); } //allocate device memory unsigned char* d_idata; checkCudaErrors( cudaMalloc( (void**) &d_idata, memSize)); sdkStartTimer( &timer ); checkCudaErrors( cudaEventRecord( start, 0 ) ); //copy host memory to device memory if( PINNED == memMode ) { for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors( cudaMemcpyAsync( d_idata, h_odata, memSize, cudaMemcpyHostToDevice, 0) ); } } else { for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors( cudaMemcpy( d_idata, h_odata, memSize, cudaMemcpyHostToDevice) ); } } checkCudaErrors( cudaEventRecord( stop, 0 ) ); checkCudaErrors( cudaDeviceSynchronize() ); //total elapsed time in ms sdkStopTimer( &timer ); checkCudaErrors( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) ); if ( PINNED != memMode || bDontUseGPUTiming ) { elapsedTimeInMs = sdkGetTimerValue( &timer ); } sdkResetTimer( &timer ); //calculate bandwidth in MB/s bandwidthInMBs = (1e3f * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory checkCudaErrors( cudaEventDestroy(stop) ); checkCudaErrors( cudaEventDestroy(start) ); sdkDeleteTimer( &timer ); if( PINNED == memMode ) { checkCudaErrors( cudaFreeHost(h_odata) ); } else { free(h_odata); } free(h_cacheClear1); free(h_cacheClear2); checkCudaErrors(cudaFree(d_idata)); return bandwidthInMBs; } /////////////////////////////////////////////////////////////////////////////// //! test the bandwidth of a device to device memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testDeviceToDeviceTransfer(unsigned int memSize) { StopWatchInterface *timer = NULL; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; cudaEvent_t start, stop; sdkCreateTimer( &timer ); checkCudaErrors( cudaEventCreate( &start ) ); checkCudaErrors( cudaEventCreate( &stop ) ); //allocate host memory unsigned char *h_idata = (unsigned char *)malloc( memSize ); //initialize the host memory for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_idata[i] = (unsigned char) (i & 0xff); } //allocate device memory unsigned char *d_idata; checkCudaErrors( cudaMalloc( (void**) &d_idata, memSize)); unsigned char *d_odata; checkCudaErrors( cudaMalloc( (void**) &d_odata, memSize)); //initialize memory checkCudaErrors( cudaMemcpy( d_idata, h_idata, memSize, cudaMemcpyHostToDevice) ); //run the memcopy sdkStartTimer( &timer ); checkCudaErrors( cudaEventRecord( start, 0 ) ); for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ ) { checkCudaErrors( cudaMemcpy( d_odata, d_idata, memSize, cudaMemcpyDeviceToDevice) ); } checkCudaErrors( cudaEventRecord( stop, 0 ) ); //Since device to device memory copies are non-blocking, //cudaDeviceSynchronize() is required in order to get //proper timing. checkCudaErrors( cudaDeviceSynchronize() ); //get the the total elapsed time in ms sdkStopTimer( &timer ); checkCudaErrors( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) ); if ( bDontUseGPUTiming ) { elapsedTimeInMs = sdkGetTimerValue( &timer ); } //calculate bandwidth in MB/s bandwidthInMBs = 2.0f * (1e3f * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory sdkDeleteTimer( &timer ); free(h_idata); checkCudaErrors(cudaEventDestroy(stop)); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaFree(d_idata)); checkCudaErrors(cudaFree(d_odata)); return bandwidthInMBs; } ///////////////////////////////////////////////////////// //print results in an easily read format //////////////////////////////////////////////////////// void printResultsReadable(unsigned int *memSizes, double* bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc) { // log config information if (kind == DEVICE_TO_DEVICE) { shrLog(" Device to Device Bandwidth, %i Device(s)\n", iNumDevs); } else { if (kind == DEVICE_TO_HOST) { shrLog(" Device to Host Bandwidth, %i Device(s), ", iNumDevs); } else if (kind == HOST_TO_DEVICE) { shrLog(" Host to Device Bandwidth, %i Device(s), ", iNumDevs); } if(memMode == PAGEABLE) { shrLog("Paged memory\n"); } else if (memMode == PINNED) { shrLog("Pinned memory"); if (wc) { shrLog(", Write-Combined Memory Enabled"); } shrLog("\n"); } } shrLog(" Transfer Size (Bytes)\tBandwidth(MB/s)\n"); unsigned int i; for(i = 0; i < (count - 1); i++) { shrLog(" %u\t\t\t%s%.1f\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]); } shrLog(" %u\t\t\t%s%.1f\n\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]); } /////////////////////////////////////////////////////////////////////////// //print results in a database format /////////////////////////////////////////////////////////////////////////// void printResultsCSV(unsigned int *memSizes, double* bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc) { std::string sConfig; // log config information if (kind == DEVICE_TO_DEVICE) { sConfig += "D2D"; } else { if (kind == DEVICE_TO_HOST) { sConfig += "D2H"; } else if (kind == HOST_TO_DEVICE) { sConfig += "H2D"; } if(memMode == PAGEABLE) { sConfig += "-Paged"; } else if (memMode == PINNED) { sConfig += "-Pinned"; if (wc) { sConfig += "-WriteCombined"; } } } unsigned int i; double dSeconds = 0.0; for(i = 0; i < count; i++) { dSeconds = (double)memSizes[i] / (bandwidths[i] * (double)(1<<20)); shrLogEx(LOGBOTH | MASTER, 0, "bandwidthTest-%s, Bandwidth = %.1f MB/s, Time = %.5f s, Size = %u bytes, NumDevsUsed = %d\n", sConfig.c_str(), bandwidths[i], dSeconds, memSizes[i], iNumDevs); } } /////////////////////////////////////////////////////////////////////////// //Print help screen /////////////////////////////////////////////////////////////////////////// void printHelp(void) { shrLog("Usage: bandwidthTest [OPTION]...\n"); shrLog("Test the bandwidth for device to host, host to device, and device to device transfers\n"); shrLog("\n"); shrLog("Example: measure the bandwidth of device to host pinned memory copies in the range 1024 Bytes to 102400 Bytes in 1024 Byte increments\n"); shrLog("./bandwidthTest --memory=pinned --mode=range --start=1024 --end=102400 --increment=1024 --dtoh\n"); shrLog("\n"); shrLog("Options:\n"); shrLog("--help\tDisplay this help menu\n"); shrLog("--csv\tPrint results as a CSV\n"); shrLog("--device=[deviceno]\tSpecify the device device to be used\n"); shrLog(" all - compute cumulative bandwidth on all the devices\n"); shrLog(" 0,1,2,...,n - Specify any particular device to be used\n"); shrLog("--memory=[MEMMODE]\tSpecify which memory mode to use\n"); shrLog(" pageable - pageable memory\n"); shrLog(" pinned - non-pageable system memory\n"); shrLog("--mode=[MODE]\tSpecify the mode to use\n"); shrLog(" quick - performs a quick measurement\n"); shrLog(" range - measures a user-specified range of values\n"); shrLog(" shmoo - performs an intense shmoo of a large range of values\n"); shrLog("--htod\tMeasure host to device transfers\n"); shrLog("--dtoh\tMeasure device to host transfers\n"); shrLog("--dtod\tMeasure device to device transfers\n"); #if CUDART_VERSION >= 2020 shrLog("--wc\tAllocate pinned memory as write-combined\n"); #endif shrLog("--cputiming\tForce CPU-based timing always\n"); shrLog("Range mode options\n"); shrLog("--start=[SIZE]\tStarting transfer size in bytes\n"); shrLog("--end=[SIZE]\tEnding transfer size in bytes\n"); shrLog("--increment=[SIZE]\tIncrement size in bytes\n"); }
the_stack
namespace amgx { namespace aggregation { /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __host__ __device__ int hash_function(int a, int seed = 17) { a ^= seed; a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) + (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a ^ 0xd3a2646c) + (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) + (a >> 16); return a; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Value_type, int NUM_THREADS_PER_ROW, int CTA_SIZE, int WARP_SIZE > __global__ void compute_edge_weights_diag( const int A_num_rows, const int block_size, const int num_owned, const int *__restrict A_rows, const int *__restrict A_cols, const int *__restrict A_diag, const Value_type *__restrict A_vals, Value_type *__restrict A_edge_weights ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Shared memory to broadcast columns. __shared__ volatile int s_bcast_col[CTA_SIZE]; // Shared memory to store columns ji. __shared__ volatile Value_type s_bcast_ji[CTA_SIZE]; // Constants. // const int lane_id_div_num_threads = lane_id / NUM_THREADS_PER_ROW; // const int lane_id_mod_num_threads = lane_id % NUM_THREADS_PER_ROW; // First threads load the row IDs of A needed by the CTA... int a_row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id; // Loop over rows of A. for ( ; a_row_id < A_num_rows ; a_row_id += gridDim.x * NUM_WARPS_PER_CTA ) { // The diagonal of (i,i). Value_type row_diag = A_vals[block_size * A_diag[a_row_id]]; // Load the range of the row. int a_col_begin = A_rows[a_row_id + 0]; int a_col_end = A_rows[a_row_id + 1]; // Iterate over the columns of A. for ( ; a_col_begin < a_col_end ; a_col_begin += WARP_SIZE ) { // Each lane works on a different column. const int a_col_it = a_col_begin + lane_id; // Is it an active thread. int is_active = a_col_it < a_col_end; // Active nodes load column ids. int a_col_id = -1; if ( is_active ) { a_col_id = A_cols[a_col_it]; } s_bcast_col[threadIdx.x] = a_col_id; // Reset ji coefficients. s_bcast_ji[threadIdx.x] = Value_type(0); // Iterate over columns to find ji coefficients if they exist. for ( int k = 0, num_cols = __popc(utils::ballot(is_active)) ; k < num_cols ; ++k ) { const int uniform_col_id = s_bcast_col[warp_id * WARP_SIZE + k]; // Skip columns whose id is greater than num_owned. if ( uniform_col_id == -1 || uniform_col_id == a_row_id || uniform_col_id >= num_owned ) { continue; } // The bounds of the row. int b_row_begin = A_rows[uniform_col_id + 0]; int b_row_end = A_rows[uniform_col_id + 1]; // Iterate over the row. for ( int not_found = 1 ; not_found && b_row_begin < b_row_end ; b_row_begin += WARP_SIZE ) { const int b_row_it = b_row_begin + lane_id; // Load the column id. int b_col_id = -1; if ( b_row_it < b_row_end ) { b_col_id = A_cols[b_row_it]; } // Has anyone found the column. not_found = utils::all( b_col_id != a_row_id ); // If someone found the column, ask it to load the value. if ( b_col_id == a_row_id ) { s_bcast_ji[warp_id * WARP_SIZE + k] = A_vals[block_size * b_row_it]; } } } // Deactivate "invalid" threads. is_active = is_active && a_col_id < num_owned; // The diagonal value associated with the column. Value_type col_diag(0); if ( is_active ) { col_diag = A_vals[block_size * A_diag[a_col_id]]; } // The value of the column. Value_type col_val(0); if ( is_active ) { col_val = A_vals[block_size * a_col_it]; } // Compute the denominator. Value_type den = max( abs(row_diag), abs(col_diag) ); // Compute the weight of the edge. Value_type weight(0); if ( den != Value_type(0) ) { weight = Value_type(0.5) * (abs(col_val) + abs(s_bcast_ji[threadIdx.x])) / den; } if ( is_active ) { A_edge_weights[a_col_it] = weight; } } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< int CTA_SIZE, int WARP_SIZE > __global__ void compute_ring_leader( const int A_num_rows, const int num_owned, const int *__restrict A_rows, const int *__restrict A_cols, const int *__restrict is_aggregated, const int *__restrict in_leader_id, const int *__restrict in_leader_hash, int *__restrict out_leader_id, int *__restrict out_leader_hash ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // First threads load the row IDs of A needed by the CTA... int a_row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id; // Loop over rows of A. for ( ; a_row_id < A_num_rows ; a_row_id += gridDim.x * NUM_WARPS_PER_CTA ) { // Skip already aggregated vertices. if ( is_aggregated[a_row_id] ) { continue; } // Load the range of the row. int a_col_begin = A_rows[a_row_id + 0]; int a_col_end = A_rows[a_row_id + 1]; // The max hash in my ith ring. int my_min_id = A_num_rows; int my_max_hash = INT_MIN; if ( in_leader_id ) { my_min_id = in_leader_id [a_row_id]; my_max_hash = in_leader_hash[a_row_id]; } else { my_min_id = a_row_id; my_max_hash = hash_function(a_row_id); } // Iterate over the columns of A. for ( ; a_col_begin < a_col_end ; a_col_begin += WARP_SIZE ) { // Each lane works on a different column. const int a_col_it = a_col_begin + lane_id; // Is it an active thread. int is_active = a_col_it < a_col_end; // Active nodes load column ids. int a_col_id = -1; if ( is_active ) { a_col_id = A_cols[a_col_it]; } // Compute the hash value if needed. int col_min_id = A_num_rows; int col_max_hash = INT_MIN; if ( in_leader_id ) { if ( is_active && !is_aggregated[a_col_id] ) { col_min_id = in_leader_id [a_col_id]; col_max_hash = in_leader_hash[a_col_id]; } } else { if ( is_active && !is_aggregated[a_col_id] ) { col_min_id = a_col_id; col_max_hash = hash_function(a_col_id); } } // Update the max_hash if needed. if ( is_active && (my_max_hash < col_max_hash || (my_max_hash == col_max_hash && my_min_id >= col_min_id)) ) { my_min_id = col_min_id; my_max_hash = col_max_hash; } } // Reduce the max hash. #pragma unroll for ( int mask = WARP_SIZE / 2 ; mask > 0 ; mask >>= 1 ) { int other_min_id = utils::shfl_xor(my_min_id, mask); int other_max_hash = utils::shfl_xor(my_max_hash, mask); if ( other_max_hash > my_max_hash || (other_max_hash == my_max_hash && other_min_id < my_min_id) ) { my_min_id = other_min_id; my_max_hash = other_max_hash; } } // The 1st thread stores the result. if ( lane_id == 0 ) { out_leader_id [a_row_id] = my_min_id; out_leader_hash[a_row_id] = my_max_hash; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Value_type, int CTA_SIZE, int WARP_SIZE > __global__ void build_aggregates( const int num_rings, const int A_num_rows, const int num_owned, const int *__restrict A_rows, const int *__restrict A_cols, int *__restrict is_aggregated, const int *__restrict in_leader_id, const int *__restrict in_leader_hash, const Value_type *__restrict edge_weights, int *__restrict num_aggregates, int *__restrict aggregates, int *__restrict num_unaggregated) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // First threads load the row IDs of A needed by the CTA... int a_row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id; // Loop over rows of A. for ( ; a_row_id < A_num_rows ; a_row_id += gridDim.x * NUM_WARPS_PER_CTA ) { // Skip already aggregated vertices. if ( is_aggregated[a_row_id] ) { continue; } // The max hash in my ith ring. int my_hash = hash_function(a_row_id), my_max_hash = in_leader_hash[a_row_id]; // Skip if I'm not a local king. if ( my_hash < my_max_hash || (my_hash == my_max_hash && a_row_id > in_leader_id[a_row_id]) ) { continue; } // We start at the row. int curr_row = a_row_id; // Iterate to form the aggregate. for ( int aggregate_size = 1, aggregate_id = -1 ; aggregate_size <= num_rings ; ++aggregate_size ) { // Load the range of the row. int a_col_begin = A_rows[curr_row + 0]; int a_col_end = A_rows[curr_row + 1]; // The max weight. int max_id = -1; float max_weight = -1.0f; // The max aggregated weight. int max_aggregated_id = -1; float max_aggregated_weight = -1.0f; // Iterate over the columns of A. for ( ; a_col_begin < a_col_end ; a_col_begin += WARP_SIZE ) { // Each lane works on a different column. const int a_col_it = a_col_begin + lane_id; // Is it an active thread. int is_active = a_col_it < a_col_end; // Active nodes load column ids. int a_col_id = -1; float a_col_weight = -1.0f; if ( is_active ) { a_col_id = A_cols[a_col_it]; a_col_weight = static_cast<float>(edge_weights[a_col_it]); } // Is the column aggregated? int is_col_aggregated = is_active && is_aggregated[a_col_id]; // Select the column if it's not aggregated and its weight is better. if ( is_active && !is_col_aggregated && a_col_weight > max_weight ) { max_id = a_col_id; max_weight = a_col_weight; } // Update max aggregated weight if needed. if ( is_active && is_col_aggregated && a_col_weight > max_aggregated_weight ) { max_aggregated_id = a_col_id; max_aggregated_weight = a_col_weight; } } // Is there a valid max_id. int valid_max_id = utils::any(max_id != -1, utils::activemask()); // No valid max id? if ( !valid_max_id && aggregate_size > 1 ) { break; } // Find the max id. if ( !valid_max_id ) { max_id = max_aggregated_id; max_weight = max_aggregated_weight; } #pragma unroll for ( int mask = WARP_SIZE / 2 ; mask > 0 ; mask >>= 1 ) { int other_max_id = utils::shfl_xor(max_id, mask); float other_max_weight = utils::shfl_xor(max_weight, mask); if ( other_max_weight > max_weight ) { max_id = other_max_id; max_weight = other_max_weight; } } // We know it's a singleton so merge with an existing aggregate. if ( !valid_max_id ) { if ( lane_id == 0 && max_id != -1 ) { is_aggregated[curr_row] = 1; aggregates[curr_row] = aggregates[max_id]; atomicAdd(num_unaggregated, -1); } break; } // It's not a singleton but the 1st vertex in the aggregate. if ( lane_id == 0 && aggregate_size == 1 ) { aggregate_id = atomicAdd(num_aggregates, 1); is_aggregated[a_row_id] = 1; aggregates[a_row_id] = aggregate_id; atomicAdd(num_unaggregated, -1); } // Set the aggregate of the winner. if ( lane_id == 0 ) { aggregates[max_id] = aggregate_id; is_aggregated[max_id] = 1; atomicAdd(num_unaggregated, -1); } // Set the next row to consider. curr_row = max_id; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// enum { WARP_SIZE = 32 }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void ParallelGreedySelector<TemplateConfig<AMGX_device, V, M, I> >::setAggregates_1x1( const MatrixType &A, IVector &aggregates, IVector &aggregates_global, int &num_aggregates ) {} template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void ParallelGreedySelector<TemplateConfig<AMGX_device, V, M, I> >::setAggregates_common_sqblocks( const MatrixType &A, IVector &aggregates, IVector &aggregates_global, int &num_aggregates ) { const int num_rings = 4; // The number of rows of A. const int num_rows = A.get_num_rows(); // The size of the block. const int block_size = A.get_block_dimx() * A.get_block_dimy(); // The number of threads per CTA. const int CTA_SIZE = 128; // The number of warps per CTA. const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // The number of CTAs in a grid where each thread is independent. const int THREAD_GRID_SIZE = (A.get_num_rows() + CTA_SIZE - 1) / CTA_SIZE; // The number of CTAs in a grid where each warp proceeds a row. const int WARP_GRID_SIZE = (A.get_num_rows() + NUM_WARPS_PER_CTA - 1) / NUM_WARPS_PER_CTA; // Edge weights. device_vector_alloc<ValueType> edge_weights(A.get_num_nz()); // Compute edge weights. compute_edge_weights_diag<ValueType, 32, CTA_SIZE, WARP_SIZE> <<< WARP_GRID_SIZE, CTA_SIZE>>>( num_rows, block_size, num_rows, // It should be num_owned!!! A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), thrust::raw_pointer_cast( &edge_weights.front() ) ); cudaCheckError(); // Make sure there's enough room to store aggregates. aggregates.resize(num_rows); // The number of aggregates. device_vector_alloc<int> dev_num_aggregates(1, 0), dev_num_unaggregated(1, num_rows); // Is a vertex already aggregated. device_vector_alloc<int> is_aggregated(num_rows, 0); // Hash values. device_vector_alloc<int> ring_leader_id0_array(num_rows); device_vector_alloc<int> ring_leader_id1_array(num_rows); device_vector_alloc<int> ring_leader_hash0_array(num_rows); device_vector_alloc<int> ring_leader_hash1_array(num_rows); // Is there any unaggregated vertex? int num_unaggregated = num_rows; // Iterate until all vertices are aggregated. while ( num_unaggregated > 0 ) { int *ring_leader_id0 = thrust::raw_pointer_cast(&ring_leader_id0_array.front()); int *ring_leader_id1 = thrust::raw_pointer_cast(&ring_leader_id1_array.front()); int *ring_leader_hash0 = thrust::raw_pointer_cast(&ring_leader_hash0_array.front()); int *ring_leader_hash1 = thrust::raw_pointer_cast(&ring_leader_hash1_array.front()); // Count N-ring of vertices. for ( int i = 0 ; i < 2 * num_rings ; ++i ) { compute_ring_leader<CTA_SIZE, WARP_SIZE> <<< WARP_GRID_SIZE, CTA_SIZE>>>( num_rows, num_rows, // It should be num_owned!!! A.row_offsets.raw(), A.col_indices.raw(), thrust::raw_pointer_cast(&is_aggregated.front()), i == 0 ? NULL : ring_leader_id0, i == 0 ? NULL : ring_leader_hash0, ring_leader_id1, ring_leader_hash1); cudaCheckError(); std::swap(ring_leader_id0, ring_leader_id1); std::swap(ring_leader_hash0, ring_leader_hash1); } // Perform the assignments to aggregates. build_aggregates<ValueType, CTA_SIZE, WARP_SIZE> <<< WARP_GRID_SIZE, CTA_SIZE>>>( num_rings, num_rows, num_rows, // It should be num_owned!!! A.row_offsets.raw(), A.col_indices.raw(), thrust::raw_pointer_cast(&is_aggregated.front()), ring_leader_id0, ring_leader_hash0, thrust::raw_pointer_cast(&edge_weights.front()), thrust::raw_pointer_cast(&dev_num_aggregates.front()), aggregates.raw(), thrust::raw_pointer_cast(&dev_num_unaggregated.front())); cudaCheckError(); // Number of aggregated vertices. num_unaggregated = dev_num_unaggregated[0]; } // The number of aggregates. num_aggregates = dev_num_aggregates[0]; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void ParallelGreedySelector<TemplateConfig<AMGX_host, V, M, I> >::setAggregates_1x1( const MatrixType &A, IVector &aggregates, IVector &aggregates_global, int &num_aggregates ) {} template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void ParallelGreedySelector<TemplateConfig<AMGX_host, V, M, I> >::setAggregates_common_sqblocks( const MatrixType &A, IVector &aggregates, IVector &aggregates_global, int &num_aggregates ) {} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< class T_Config > ParallelGreedySelector_Base<T_Config>::ParallelGreedySelector_Base( AMG_Config &cfg, const std::string &cfg_scope ) : Selector<T_Config>() { } template< class T_Config > void ParallelGreedySelector_Base<T_Config>::setAggregates( MatrixType &A, IVector &aggregates, IVector &aggregates_global, int &num_aggregates ) { setAggregates_common_sqblocks( A, aggregates, aggregates_global, num_aggregates ); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #define AMGX_CASE_LINE(CASE) template class ParallelGreedySelector_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class ParallelGreedySelector<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace aggregation } // namespace amgx
the_stack
#include <prnn/detail/matrix/matrix_operations.h> #include <prnn/detail/matrix/matrix_transforms.h> #include <prnn/detail/matrix/copy_operations.h> #include <prnn/detail/matrix/matrix.h> #include <prnn/detail/matrix/matrix_view.h> #include <prnn/detail/matrix/operation.h> #include <prnn/detail/parallel/multi_bulk_synchronous_parallel.h> #include <prnn/detail/parallel/shared_memory_allocator.h> #include <prnn/detail/util/metaprogramming.h> // Standard Library Includes #include <tuple> namespace prnn { namespace matrix { namespace detail { template<typename NativeType, typename OperationType> class BinaryApplyLambda { public: CUDA_DECORATOR void operator()(parallel::ThreadGroup threadGroup) const { for(size_t i = threadGroup.id(), step = threadGroup.size(); i < elements; i += step) { resultBase[i] = nativeOperation(leftBase[i], rightBase[i]); } } public: NativeType* resultBase; const NativeType* leftBase; const NativeType* rightBase; public: OperationType nativeOperation; public: size_t elements; }; template<typename NativeType, typename OperationType> class BinaryNoncontiguousApplyLambda { public: CUDA_DECORATOR void operator()(parallel::ThreadGroup threadGroup) const { for(size_t i = threadGroup.id(), step = threadGroup.size(); i < elements; i += step) { auto fullDimension = linearToDimension(i, resultView.size()); resultView(fullDimension) = nativeOperation(leftView(fullDimension), rightView(fullDimension)); } } public: MatrixView<NativeType> resultView; ConstMatrixView<NativeType> leftView; ConstMatrixView<NativeType> rightView; public: OperationType nativeOperation; public: size_t elements; }; template<typename OperationType, typename T> void applyOverPrecisions(const DynamicView& result, const ConstDynamicView& left, const ConstDynamicView& right, const Operation& op, const Precision& precision, std::tuple<T> precisions) { typedef T PrecisionPrimitive; typedef typename PrecisionPrimitive::type NativeType; assert(precision == PrecisionPrimitive()); auto nativeOperation = static_cast<const OperationType&>(op); size_t elements = result.elements(); if(result.isContiguous() && left.isContiguous() && right.isContiguous()) { auto resultBase = result.data<NativeType>(); auto leftBase = left.data<NativeType>(); auto rightBase = right.data<NativeType>(); auto lambda = BinaryApplyLambda<NativeType, OperationType> {resultBase, leftBase, rightBase, nativeOperation, elements}; parallel::multiBulkSynchronousParallel(lambda); } else { MatrixView<NativeType> resultView(result); ConstMatrixView<NativeType> leftView(left); ConstMatrixView<NativeType> rightView(right); auto lambda = BinaryNoncontiguousApplyLambda<NativeType, OperationType> {resultView, leftView, rightView, nativeOperation, elements}; parallel::multiBulkSynchronousParallel(lambda); } } template<typename OperationType, typename PossiblePrecisions> void applyOverPrecisions(const DynamicView& result, const ConstDynamicView& left, const ConstDynamicView& right, const Operation& op, const Precision& precision, PossiblePrecisions precisions) { typedef typename std::tuple_element<0, PossiblePrecisions>::type PossiblePrecisionType; if(precision == PossiblePrecisionType()) { applyOverPrecisions<OperationType>(result, left, right, op, precision, std::tuple<PossiblePrecisionType>()); } else { typedef typename util::RemoveFirstType<PossiblePrecisions>::type RemainingPrecisions; applyOverPrecisions<OperationType>(result, left, right, op, precision, RemainingPrecisions()); } } template<typename T> void applyOverOperations(const DynamicView& result, const ConstDynamicView& left, const ConstDynamicView& right, const Operation& op, const Precision& precision, const std::tuple<T>& operations) { typedef T PossibleOperationType; assert(op == PossibleOperationType()); applyOverPrecisions<PossibleOperationType, AllPrecisions>(result, left, right, op, precision, AllPrecisions()); } template<typename PossibleOperations> void applyOverOperations(const DynamicView& result, const ConstDynamicView& left, const ConstDynamicView& right, const Operation& op, const Precision& precision, const PossibleOperations& operations) { typedef typename std::tuple_element<0, PossibleOperations>::type PossibleOperationType; if(op == PossibleOperationType()) { applyOverOperations(result, left, right, op, precision, std::tuple<PossibleOperationType>()); } else { typedef typename util::RemoveFirstType<PossibleOperations>::type RemainingOperations; applyOverOperations(result, left, right, op, precision, RemainingOperations()); } } void applyOverOperations(const DynamicView& result, const ConstDynamicView& left, const ConstDynamicView& right, const Operation& op, const Precision& precision) { applyOverOperations<AllBinaryOperations>(result, left, right, op, precision, AllBinaryOperations()); } } void apply(const DynamicView& result, const ConstDynamicView& left, const ConstDynamicView& right, const Operation& op) { auto precision = left.precision(); assert(left.size() == right.size()); assert(result.size() == right.size()); assert(left.precision() == right.precision()); assert(result.precision() == right.precision()); detail::applyOverOperations(result, left, right, op, precision); } void apply(Matrix& result, const Matrix& left, const Matrix& right, const Operation& op) { apply(DynamicView(result), ConstDynamicView(left), ConstDynamicView(right), op); } Matrix apply(const Matrix& left, const Matrix& right, const Operation& op) { assert(left.size() == right.size()); assert(left.precision() == right.precision()); Matrix temp(left.size(), left.precision()); apply(temp, left, right, op); return temp; } namespace detail { template<typename NativeType, typename OperationType> class UnaryApplyLambda { public: CUDA_DECORATOR void operator()(parallel::ThreadGroup threadGroup) { for(size_t i = threadGroup.id(), step = threadGroup.size(); i < elements; i += step) { resultBase[i] = nativeOperation(inputBase[i]); } } public: NativeType* resultBase; const NativeType* inputBase; public: size_t elements; public: OperationType nativeOperation; }; template<typename NativeType, typename OperationType> class UnaryNoncontiguousApplyLambda { public: CUDA_DECORATOR void operator()(parallel::ThreadGroup threadGroup) { for(size_t i = threadGroup.id(), step = threadGroup.size(); i < elements; i += step) { auto dimension = linearToDimension(i, resultView.size()); resultView(dimension) = nativeOperation(inputView(dimension)); } } public: MatrixView<NativeType> resultView; ConstMatrixView<NativeType> inputView; public: size_t elements; public: OperationType nativeOperation; }; template<typename OperationType, typename T> void applyOverPrecisions(const DynamicView& result, const ConstDynamicView& input, const Operation& op, const Precision& precision, std::tuple<T> precisions) { typedef T PrecisionPrimitive; typedef typename PrecisionPrimitive::type NativeType; assert(precision == PrecisionPrimitive()); auto nativeOperation = static_cast<const OperationType&>(op); size_t elements = result.elements(); if(input.isContiguous() && result.isContiguous()) { auto resultBase = result.data<NativeType>(); auto inputBase = input.data<NativeType>(); auto lambda = UnaryApplyLambda<NativeType, OperationType> {resultBase, inputBase, elements, nativeOperation}; parallel::multiBulkSynchronousParallel(lambda); } else { MatrixView<NativeType> resultView(result); ConstMatrixView<NativeType> inputView(input); auto lambda = UnaryNoncontiguousApplyLambda<NativeType, OperationType> {resultView, inputView, elements, nativeOperation}; parallel::multiBulkSynchronousParallel(lambda); } } template<typename OperationType, typename PossiblePrecisions> void applyOverPrecisions(const DynamicView& result, const ConstDynamicView& input, const Operation& op, const Precision& precision, PossiblePrecisions precisions) { typedef typename std::tuple_element<0, PossiblePrecisions>::type PossiblePrecisionType; if(precision == PossiblePrecisionType()) { applyOverPrecisions<OperationType>(result, input, op, precision, std::tuple<PossiblePrecisionType>()); } else { typedef typename util::RemoveFirstType<PossiblePrecisions>::type RemainingPrecisions; applyOverPrecisions<OperationType>(result, input, op, precision, RemainingPrecisions()); } } template<typename T> void applyOverOperations(const DynamicView& result, const ConstDynamicView& input, const Operation& op, const Precision& precision, const std::tuple<T>& operations) { typedef T PossibleOperationType; assert(op == PossibleOperationType()); applyOverPrecisions<PossibleOperationType, AllPrecisions>(result, input, op, precision, AllPrecisions()); } template<typename PossibleOperations> void applyOverOperations(const DynamicView& result, const ConstDynamicView& input, const Operation& op, const Precision& precision, const PossibleOperations& operations) { typedef typename std::tuple_element<0, PossibleOperations>::type PossibleOperationType; if(op == PossibleOperationType()) { applyOverOperations(result, input, op, precision, std::tuple<PossibleOperationType>()); } else { typedef typename util::RemoveFirstType<PossibleOperations>::type RemainingOperations; applyOverOperations(result, input, op, precision, RemainingOperations()); } } void applyOverOperations(const DynamicView& result, const ConstDynamicView& input, const Operation& op, const Precision& precision) { applyOverOperations<AllUnaryOperations>(result, input, op, precision, AllUnaryOperations()); } } void apply(const DynamicView& result, const ConstDynamicView& input, const Operation& op) { detail::applyOverOperations(result, input, op, input.precision()); } void apply(Matrix& result, const Matrix& input, const Operation& op) { apply(DynamicView(result), ConstDynamicView(input), op); } Matrix apply(const Matrix& input, const Operation& op) { Matrix result(input.size(), input.precision()); apply(result, input, op); return result; } namespace detail { static const size_t tileSize = 8; template<typename NativeType, typename ActualOperation> class ReduceAllDimensionsStepLambda { public: CUDA_DECORATOR void operator()(parallel::ThreadGroup threadGroup) const { for(size_t i = threadGroup.id(); i < elements; i += threadGroup.size()) { NativeType value = rawInput[i]; for(size_t inputElement = i + elements; inputElement < inputElements; inputElement += elements) { value = nativeOperation(value, rawInput[inputElement]); } rawResult[i] = value; } } public: NativeType* rawResult; const NativeType* rawInput; public: size_t elements; size_t inputElements; public: ActualOperation nativeOperation; }; template <typename ActualOperation, typename ActualPrecision> void reduceAllDimensionsStep(Matrix& result, const Matrix& input, const ActualOperation& nativeOperation, const ActualPrecision& p) { typedef typename ActualPrecision::type NativeType; NativeType* rawResult = static_cast<NativeType*>(result.data()); const NativeType* rawInput = static_cast<const NativeType*>(input.data()); auto lambda = ReduceAllDimensionsStepLambda<NativeType, ActualOperation>{rawResult, rawInput, result.elements(), input.elements(), nativeOperation}; parallel::multiBulkSynchronousParallel(lambda); } template <typename ActualOperation, typename ActualPrecision> void reduceAllDimensions(Matrix& result, const Matrix& input, const ActualOperation& nativeOperation, const ActualPrecision& p) { if(input.elements() < tileSize) { return reduceAllDimensionsStep(result, input, nativeOperation, p); } Matrix temporary({(input.elements() + tileSize - 1) / tileSize}, p); reduceAllDimensionsStep(temporary, input, nativeOperation, p); reduceAllDimensions(result, temporary, nativeOperation, p); } template<typename NativeType, typename ActualOperation> class ReduceFirstDimensionLambda { public: CUDA_DECORATOR void operator()(parallel::ThreadGroup threadGroup) const { auto innerGroup = parallel::partitionThreadGroupAtLevel(threadGroup, 2); auto relativeGroup = parallel::getRelativeGroup(innerGroup, threadGroup); size_t rows = inputElements / columns; for(size_t column = relativeGroup.id(); column < columns; column += relativeGroup.size()) { size_t row = innerGroup.id(); size_t position = column * rows + row; size_t endPosition = (column + 1) * rows; if(innerGroup.size() <= rows) { NativeType value = rawInput[position]; for(position += innerGroup.size(); position < endPosition; position += innerGroup.size()) { value = nativeOperation(value, rawInput[position]); } value = reduce(innerGroup, value, nativeOperation); if(innerGroup.id() == 0) { rawResult[column] = value; } } else if(innerGroup.id() == 0) { NativeType value = rawInput[position]; ++position; for(size_t row = 1; row < rows; ++row, ++position) { value = nativeOperation(value, rawInput[position]); } rawResult[column] = value; } } } public: NativeType* rawResult; const NativeType* rawInput; public: size_t columns; size_t inputElements; public: ActualOperation nativeOperation; }; template <typename ActualOperation, typename ActualPrecision> void reduceFirstDimension(Matrix& result, const Matrix& input, const ActualOperation& nativeOperation, const ActualPrecision& p) { typedef typename ActualPrecision::type NativeType; NativeType* rawResult = static_cast<NativeType*>(result.data()); const NativeType* rawInput = static_cast<const NativeType*>(input.data()); auto lambda = ReduceFirstDimensionLambda<NativeType, ActualOperation>{rawResult, rawInput, result.elements(), input.elements(), nativeOperation}; parallel::multiBulkSynchronousParallel(lambda); } template<typename NativeType, typename ActualOperation> class ReduceSecondDimensionLambda { public: CUDA_DECORATOR void operator()(parallel::ThreadGroup threadGroup) const { auto warp = partitionThreadGroupAtLevel(threadGroup, 1); auto cta = partitionThreadGroupAtLevel(threadGroup, 2); auto ctaInKernel = getRelativeGroup(cta, threadGroup); auto warpInCta = getRelativeGroup(warp, cta); auto* memory = parallel::SharedMemoryAllocator<NativeType, parallel::GroupLevelSize<2>::size()>::allocate(); size_t start = ctaInKernel.id() * warp.size(); for(size_t startingRow = start; startingRow < rows; startingRow += warp.size() * ctaInKernel.size()) { size_t row = startingRow + warp.id(); size_t startingColumnPosition = rows * warpInCta.id(); size_t columnStep = rows * warpInCta.size(); size_t startingPosition = startingColumnPosition + row; size_t position = startingPosition; NativeType value = 0; if(row < rows && position < inputElements) { value = rawInput[position]; for(position += columnStep; position < inputElements; position += columnStep) { value = nativeOperation(value, rawInput[position]); } } memory[cta.id()] = value; barrier(cta); if(warpInCta.id() == 0 && row < rows) { for(size_t sharedPosition = warp.id() + warp.size(), warpCounter = 1, position = row + rows; warpCounter < warpInCta.size() && position < inputElements; ++warpCounter, position += rows, sharedPosition += warp.size()) { value = nativeOperation(value, memory[sharedPosition]); } if (startingPosition < inputElements) { rawResult[row] = value; } } barrier(cta); } } public: NativeType* rawResult; const NativeType* rawInput; public: size_t rows; size_t inputElements; public: ActualOperation nativeOperation; }; template <typename ActualOperation, typename ActualPrecision> void reduceSecondDimension(Matrix& result, const Matrix& input, const ActualOperation& nativeOperation, const ActualPrecision& p) { typedef typename ActualPrecision::type NativeType; NativeType* rawResult = static_cast<NativeType*>(result.data()); const NativeType* rawInput = static_cast<const NativeType*>(input.data()); auto lambda = ReduceSecondDimensionLambda<NativeType, ActualOperation>{rawResult, rawInput, result.elements(), input.elements(), nativeOperation}; parallel::multiBulkSynchronousParallel(lambda); } template<typename NativeType, typename ActualOperation> class GenericReduceLambda { public: CUDA_DECORATOR void operator()(parallel::ThreadGroup threadGroup) const { auto innerGroup = parallel::partitionThreadGroupAtLevel(threadGroup, 2); auto relativeGroup = parallel::getRelativeGroup(innerGroup, threadGroup); for(size_t i = relativeGroup.id(); i < elements; i += relativeGroup.size()) { auto resultIndex = linearToDimension(i, resultView.size()); // find the start of the input slice auto inputBegin = selectNamedDimensions(dimensions, resultIndex, zeros(inputView.size())); // find the end of the input slice auto inputEnd = selectNamedDimensions(dimensions, resultIndex + ones(resultView.size()), inputView.size()); auto inputSlice = slice(inputView, inputBegin, inputEnd); // find the total size of the slice size_t sliceSize = inputSlice.elements(); // iterate over i linearly from 0 to size NativeType resultValue; if(innerGroup.size() <= sliceSize) { auto inputIndex = linearToDimension(innerGroup.id(), inputSlice.size()); resultValue = inputSlice(inputIndex); for(size_t inputLinearIndex = innerGroup.size() + innerGroup.id(); inputLinearIndex < sliceSize; inputLinearIndex += innerGroup.size()) { // get index for i in the input's space auto inputIndex = linearToDimension(inputLinearIndex, inputSlice.size()); // apply operator to resultValue, input[index] resultValue = nativeOperation(resultValue, inputSlice(inputIndex)); } resultValue = reduce(innerGroup, resultValue, nativeOperation); } else if(innerGroup.id() == 0) { auto inputIndex = linearToDimension(0, inputSlice.size()); resultValue = inputSlice(inputIndex); for(size_t inputLinearIndex = 1; inputLinearIndex < sliceSize; ++inputLinearIndex) { // get index for i in the input's space auto inputIndex = linearToDimension(inputLinearIndex, inputSlice.size()); // apply operator to resultValue, input[index] resultValue = nativeOperation(resultValue, inputSlice(inputIndex)); } } // save the result if(innerGroup.id() == 0) { resultView(resultIndex) = resultValue; } } } public: MatrixView<NativeType> resultView; ConstMatrixView<NativeType> inputView; public: size_t elements; public: ActualOperation nativeOperation; public: Dimension dimensions; }; template <typename ActualOperation, typename ActualPrecision> void reduce(Matrix& result, const Matrix& input, const Dimension& unsortedDimensions, const Operation& op, const std::tuple<ActualPrecision>& p) { typedef typename ActualPrecision::type NativeType; Dimension dimensions = unsortedDimensions; std::sort(dimensions.begin(), dimensions.end()); assert(ActualPrecision() == result.precision()); assert(result.precision() == input.precision()); assert(result.size() == removeDimensions(input.size(), dimensions)); size_t elements = result.elements(); auto nativeOperation = static_cast<const ActualOperation&>(op); // try to simplify the operation to a 2d reduction auto reshapedInput = input; auto reshapedResult = result; if(elements > 1 && input.isContiguous() && result.isContiguous() && isContiguous(dimensions) && (dimensions.front() == 0 || dimensions.back() == (input.size().size() - 1))) { size_t reducedElements = selectDimensions(input.size(), dimensions).product(); auto newInputSize = dimensions.front() == 0 ? Dimension(reducedElements, elements) : Dimension(elements, reducedElements); reshapedInput = reshape(input, newInputSize); reshapedResult = reshape(result, {result.elements()} ); dimensions = dimensions.front() == 0 ? Dimension(0) : Dimension(1); } // special case reduce down to a single element, and 2d reductions if(elements == 1 && reshapedInput.isContiguous() && reshapedResult.isContiguous()) { reduceAllDimensions(reshapedResult, reshapedInput, nativeOperation, ActualPrecision()); } else if(reshapedInput.size().size() == 2 && dimensions.size() == 1 && dimensions[0] == 0 && reshapedInput.isContiguous() && reshapedResult.isContiguous()) { reduceFirstDimension(reshapedResult, reshapedInput, nativeOperation, ActualPrecision()); } else if(reshapedInput.size().size() == 2 && dimensions.size() == 1 && dimensions[0] == 1 && reshapedInput.isContiguous() && reshapedResult.isContiguous()) { reduceSecondDimension(reshapedResult, reshapedInput, nativeOperation, ActualPrecision()); } else { // handle other types of reductions (note that this is much slower) MatrixView<NativeType> resultView(result); ConstMatrixView<NativeType> inputView(input); auto lambda = GenericReduceLambda<NativeType, ActualOperation>{resultView, inputView, elements, nativeOperation, dimensions}; parallel::multiBulkSynchronousParallel(lambda); } } template <typename ActualOperation, typename PossiblePrecisions> void reduce(Matrix& result, const Matrix& input, const Dimension& dimensions, const Operation& op, const PossiblePrecisions& possiblePrecisions) { typedef typename std::tuple_element<0, PossiblePrecisions>::type PossiblePrecisionType; if(result.precision() == PossiblePrecisionType()) { reduce<ActualOperation>(result, input, dimensions, op, std::tuple<PossiblePrecisionType>()); } else { typedef typename util::RemoveFirstType<PossiblePrecisions>::type RemainingPrecisions; reduce<ActualOperation>(result, input, dimensions, op, RemainingPrecisions()); } } template <typename PossibleOperation> void reduce(Matrix& result, const Matrix& input, const Dimension& dimensions, const Operation& op, const std::tuple<PossibleOperation>& p) { assert(PossibleOperation() == op); reduce<PossibleOperation>(result, input, dimensions, op, AllPrecisions()); } template <typename PossibleOperations> void reduce(Matrix& result, const Matrix& input, const Dimension& dimensions, const Operation& op, const PossibleOperations& possibleOperations) { typedef typename std::tuple_element<0, PossibleOperations>::type PossibleOperationType; if(op == PossibleOperationType()) { reduce(result, input, dimensions, op, std::tuple<PossibleOperationType>()); } else { typedef typename util::RemoveFirstType<PossibleOperations>::type RemainingOperations; reduce(result, input, dimensions, op, RemainingOperations()); } } } void reduce(Matrix& result, const Matrix& input, const Dimension& dimensions, const Operation& op) { detail::reduce(result, input, dimensions, op, AllBinaryOperations()); } Matrix reduce(const Matrix& input, const Dimension& dimensions, const Operation& op) { Matrix result(removeDimensions(input.size(), dimensions), input.precision()); reduce(result, input, dimensions, op); return result; } namespace detail { template <typename NativeType, typename ActualOperation> class BroadcastFirstOfTwoDimensionsLambda { public: CUDA_DECORATOR void operator()(parallel::ThreadGroup threadGroup) const { if(rows > columns) { for(size_t row = threadGroup.id(); row < rows; row += threadGroup.size()) { size_t offset = row; for(size_t column = 0; column < columns; ++column, offset += rows) { rawResult[offset] = nativeOperation(rawLeft[offset], rawRight[row]); } } } else { for(size_t column = threadGroup.id(); column < columns; column += threadGroup.size()) { size_t offset = column * rows; for(size_t row = 0; row < rows; ++row, ++offset) { rawResult[offset] = nativeOperation(rawLeft[offset], rawRight[row]); } } } } public: NativeType* rawResult; const NativeType* rawLeft; const NativeType* rawRight; public: size_t rows; size_t columns; public: ActualOperation nativeOperation; }; template <typename NativeType, typename ActualOperation> class BroadcastFirstOfTwoDimensionsWarpStrideLambda { public: CUDA_DECORATOR void operator()(parallel::ThreadGroup threadGroup) const { auto innerGroup = parallel::partitionThreadGroupAtLevel(threadGroup, 1); auto relativeGroup = parallel::getRelativeGroup(innerGroup, threadGroup); int globalOffset = relativeGroup.id() * rows; for(int column = relativeGroup.id(); column < columns; column += relativeGroup.size(), globalOffset += rows * relativeGroup.size()) { int offset = globalOffset + innerGroup.id(); for(int row = innerGroup.id(); row < rows; row += innerGroup.size(), offset += innerGroup.size()) { //std::printf("thread (%d, %d) handling (%d, %d)\n", threadGroup.id(), threadGroup.size(), row, column); rawResult[offset] = nativeOperation(rawLeft[offset], rawRight[row]); } } } public: NativeType* rawResult; const NativeType* rawLeft; const NativeType* rawRight; public: int rows; int columns; public: ActualOperation nativeOperation; }; template <typename ActualOperation, typename NativeType> void broadcastFirstOfTwoDimensions(Matrix& result, const Matrix& left, const Matrix& right, const ActualOperation& op) { NativeType* rawResult = static_cast<NativeType*>(result.data()); const NativeType* rawLeft = static_cast<const NativeType*>(left.data()); const NativeType* rawRight = static_cast<const NativeType*>(right.data()); size_t rows = left.size()[0]; size_t columns = left.size()[1]; if(rows < columns && rows >= 32 && (rows <= std::numeric_limits<int>::max() && columns <= std::numeric_limits<int>::max())) { auto lambda = BroadcastFirstOfTwoDimensionsWarpStrideLambda<NativeType, ActualOperation>{ rawResult, rawLeft, rawRight, static_cast<int>(rows), static_cast<int>(columns), op}; parallel::multiBulkSynchronousParallel(lambda); } else { auto lambda = BroadcastFirstOfTwoDimensionsLambda<NativeType, ActualOperation>{rawResult, rawLeft, rawRight, rows, columns, op}; parallel::multiBulkSynchronousParallel(lambda); } } template <typename NativeType, typename ActualOperation> class BroadcastSecondOfTwoDimensionsLambda { public: CUDA_DECORATOR void operator()(parallel::ThreadGroup threadGroup) const { if(rows > columns) { for(size_t row = threadGroup.id(); row < rows; row += threadGroup.size()) { size_t offset = row; for(size_t column = 0; column < columns; ++column, offset += rows) { rawResult[offset] = nativeOperation(rawLeft[offset], rawRight[column]); } } } else { for(size_t column = threadGroup.id(); column < columns; column += threadGroup.size()) { size_t offset = column * rows; for(size_t row = 0; row < rows; ++row, ++offset) { rawResult[offset] = nativeOperation(rawLeft[offset], rawRight[column]); } } } } public: NativeType* rawResult; const NativeType* rawLeft; const NativeType* rawRight; public: size_t rows; size_t columns; public: ActualOperation nativeOperation; }; template <typename ActualOperation, typename NativeType> void broadcastSecondOfTwoDimensions(Matrix& result, const Matrix& left, const Matrix& right, const ActualOperation& op) { NativeType* rawResult = static_cast<NativeType*>(result.data()); const NativeType* rawLeft = static_cast<const NativeType*>(left.data()); const NativeType* rawRight = static_cast<const NativeType*>(right.data()); auto lambda = BroadcastSecondOfTwoDimensionsLambda<NativeType, ActualOperation>{rawResult, rawLeft, rawRight, left.size()[0], left.size()[1], op}; parallel::multiBulkSynchronousParallel(lambda); } static Dimension fillOutDimension(const Dimension& d, const Dimension& leftSize, const Dimension& rightSize) { if(d.size() != 0) { return d; } Dimension retVal; for(auto i = leftSize.begin(), j = rightSize.begin(); i != leftSize.end(); ++i) { if((j != rightSize.end()) && (*i == *j)) { ++j; continue; } retVal.push_back(std::distance(leftSize.begin(), i)); } return retVal; } static Dimension invert(const Dimension& original, const Dimension& removed) { Dimension result; for(size_t i = 0; i < original.size(); ++i) { if(!isContained(removed, i)) { result.push_back(i); } } return result; } template <typename NativeType, typename ActualOperation> class BroadcastLambda { public: CUDA_DECORATOR void operator()(parallel::ThreadGroup threadGroup) const { for(size_t i = threadGroup.id(); i < elements; i += threadGroup.size()) { auto fullDimension = linearToDimension(i, resultView.size()); auto reducedDimension = selectDimensions(fullDimension, dimension); resultView(fullDimension) = nativeOperation(leftView(fullDimension), rightView(reducedDimension)); } } public: MatrixView<NativeType> resultView; ConstMatrixView<NativeType> leftView; ConstMatrixView<NativeType> rightView; public: size_t elements; public: ActualOperation nativeOperation; public: Dimension dimension; }; static bool isNotInnerDimension(const Dimension& size, const Dimension& dimension) { assert(dimension.front() < size.size()); bool leftOkay = true; // everything preceding it is one for(size_t i = 0; i < dimension.front(); ++i) { if(size[i] != 1) { leftOkay = false; break; } } if(leftOkay) { return true; } // everything after it is one for(size_t i = dimension.front() + 1; i < size.size(); ++i) { if(size[i] != 1) { return false; } } return true; } template <typename ActualOperation, typename ActualPrecision> void broadcast(Matrix& result, const Matrix& left, const Matrix& right, const Dimension& d, const Operation& op, const std::tuple<ActualPrecision>& p) { auto dimension = invert(left.size(), fillOutDimension(d, left.size(), right.size())); typedef typename ActualPrecision::type NativeType; assert(ActualPrecision() == result.precision()); assert(result.precision() == left.precision()); assert(result.precision() == right.precision()); assert(result.size() == left.size()); auto nativeOperation = static_cast<const ActualOperation&>(op); // try to simplify the operation to a 2d broadcast auto reshapedLeft = left; auto reshapedRight = right; auto reshapedResult = result; if(left.isContiguous() && result.isContiguous() && right.isContiguous() && dimension.size() == 1 && isNotInnerDimension(result.size(), dimension)) { Dimension leftDimension({1, 1}); bool isLeft = true; for(size_t i = 0; i < dimension.front(); ++i) { if(left.size()[i] != 1) { isLeft = false; break; } } if(isLeft) { for(size_t i = 0; i <= dimension.front(); ++i) { leftDimension[0] *= left.size()[i]; } leftDimension[1] = left.size().product() / leftDimension[0]; reshapedRight = reshape(right, leftDimension[0]); dimension.front() = 0; } else { for(size_t i = dimension.front(); i < left.size().size(); ++i) { leftDimension[1] *= left.size()[i]; } leftDimension[0] = left.size().product() / leftDimension[1]; reshapedRight = reshape(right, leftDimension[1]); dimension.front() = 1; } reshapedLeft = reshape(left, leftDimension); reshapedResult = reshape(result, leftDimension); if(isLeft) { broadcastFirstOfTwoDimensions<ActualOperation, NativeType>(reshapedResult, reshapedLeft, reshapedRight, nativeOperation); } else { broadcastSecondOfTwoDimensions<ActualOperation, NativeType>(reshapedResult, reshapedLeft, reshapedRight, nativeOperation); } } else { size_t elements = reshapedResult.elements(); MatrixView<NativeType> resultView(reshapedResult); ConstMatrixView<NativeType> leftView(reshapedLeft); ConstMatrixView<NativeType> rightView(reshapedRight); auto lambda = BroadcastLambda<NativeType, ActualOperation>{resultView, leftView, rightView, elements, nativeOperation, dimension}; parallel::multiBulkSynchronousParallel(lambda); } } template <typename ActualOperation, typename PossiblePrecisions> void broadcast(Matrix& result, const Matrix& left, const Matrix& right, const Dimension& d, const Operation& op, const PossiblePrecisions& possiblePrecisions) { typedef typename std::tuple_element<0, PossiblePrecisions>::type PossiblePrecisionType; if(result.precision() == PossiblePrecisionType()) { broadcast<ActualOperation>(result, left, right, d, op, std::tuple<PossiblePrecisionType>()); } else { typedef typename util::RemoveFirstType<PossiblePrecisions>::type RemainingPrecisions; broadcast<ActualOperation>(result, left, right, d, op, RemainingPrecisions()); } } template <typename PossibleOperation> void broadcast(Matrix& result, const Matrix& left, const Matrix& right, const Dimension& d, const Operation& op, const std::tuple<PossibleOperation>& p) { assert(PossibleOperation() == op); broadcast<PossibleOperation>(result, left, right, d, op, AllPrecisions()); } template <typename PossibleOperations> void broadcast(Matrix& result, const Matrix& left, const Matrix& right, const Dimension& d, const Operation& op, const PossibleOperations& possibleOperations) { typedef typename std::tuple_element<0, PossibleOperations>::type PossibleOperationType; if(op == PossibleOperationType()) { broadcast(result, left, right, d, op, std::tuple<PossibleOperationType>()); } else { typedef typename util::RemoveFirstType<PossibleOperations>::type RemainingOperations; broadcast(result, left, right, d, op, RemainingOperations()); } } } void broadcast(Matrix& result, const Matrix& left, const Matrix& right, const Dimension& d, const Operation& op) { detail::broadcast(result, left, right, d, op, AllBinaryOperations()); } Matrix broadcast(const Matrix& left, const Matrix& right, const Dimension& d, const Operation& op) { Matrix retVal(left.size(), left.precision()); broadcast(retVal, left, right, d, op); return retVal; } void zeros(const DynamicView& result) { apply(result, result, Fill(0.0)); } void zeros(Matrix& result) { zeros(DynamicView(result)); } Matrix zeros(const Dimension& size, const Precision& precision) { Matrix result(size, precision); zeros(result); return result; } void ones(Matrix& result) { apply(result, result, Fill(1.0)); } Matrix ones(const Dimension& size, const Precision& precision) { Matrix result(size, precision); ones(result); return result; } } }
the_stack
#include "utils.hpp" #include "kernels.cu" void benchmark( complex_t *sigma_in, complex_t *sigma_out, complex_t *hamiltonian, size_t size_sigma, size_t size_hamiltonian, complex_t *sigma_reference, complex_t *sigma_reference_transformed , const int dim, const int num, // global_work_size const int kernel_id, size_t vec_length, decltype(&transform_matrices_aos_to_aosoa) transformation_sigma, bool scale_hamiltonian, decltype(&transform_matrix_aos_to_soa) transformation_hamiltonian) { initialise_hamiltonian(hamiltonian, dim); if (scale_hamiltonian) transform_matrix_scale_aos(hamiltonian, dim); // pre-scale hamiltonian if (transformation_hamiltonian) transformation_hamiltonian(hamiltonian, dim); initialise_sigma(sigma_in, sigma_out, dim, num); std::memcpy(sigma_reference_transformed, sigma_reference, size_sigma * sizeof(complex_t)); // transform memory layout if a transformation is specified if (transformation_sigma) { // transform reference for comparison transformation_sigma(sigma_reference_transformed, dim, num, vec_length); // transform sigma transformation_sigma(sigma_in, dim, num, vec_length); } // extract the real and imag data real_2_t* ham = allocate_aligned<real_2_t>(size_hamiltonian); real_2_t* sin = allocate_aligned<real_2_t>(size_sigma); real_2_t* sout = allocate_aligned<real_2_t>(size_sigma); for (size_t i = 0; i < size_hamiltonian; i++) { ham[i].x = hamiltonian[i].real(); ham[i].y = hamiltonian[i].imag(); } for (size_t i = 0; i < size_sigma; i++) { sin[i].x = sigma_in[i].real(); sin[i].y = sigma_in[i].imag(); } for (size_t i = 0; i < size_sigma; i++) { sout[i].x = sigma_out[i].real(); sout[i].y = sigma_out[i].imag(); } // allocate device memory real_2_t *d_hamiltonian; real_2_t *d_sigma_in; real_2_t *d_sigma_out; cudaMalloc((void**)&d_hamiltonian, sizeof(real_2_t) * size_hamiltonian); cudaMemcpy(d_hamiltonian, ham, sizeof(real_2_t) * size_hamiltonian, cudaMemcpyHostToDevice); cudaMalloc((void**)&d_sigma_in, sizeof(real_2_t) * size_sigma); cudaMemcpy(d_sigma_in, sin, sizeof(real_2_t) * size_sigma, cudaMemcpyHostToDevice); cudaMalloc((void**)&d_sigma_out, sizeof(real_2_t) * size_sigma); // benchmark loop for (size_t i = 0; i < NUM_ITERATIONS; ++i) { // clear output cudaMemcpy(d_sigma_out, sout, sizeof(real_2_t) * size_sigma, cudaMemcpyHostToDevice); // empty kernel switch(kernel_id) { case 0: { dim3 k0_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG)); dim3 k0_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG); comm_empty<<<k0_gws, k0_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian); break; } // initial kernel case 1: { dim3 k1_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG)); dim3 k1_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG); comm_init<<<k1_gws, k1_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim); break; } // refactored initial kernel case 2: { dim3 k2_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG)); dim3 k2_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG); comm_refactor<<<k2_gws, k2_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim); break; } // refactored initial kernel with direct store case 3: { dim3 k3_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG)); dim3 k3_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG); comm_refactor_direct_store<<<k3_gws, k3_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim); break; } // vectorised kernel with 1D range case 4: { dim3 k4_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG)); dim3 k4_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG); comm_aosoa_naive<<<k4_gws, k4_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim); break; } // vectorised kernel with 1D range and compile time constants case 5: { dim3 k5_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG)); dim3 k5_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG); comm_aosoa_naive_constants<<<k5_gws, k5_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim); break; } // vectorised kernel with 1D range, compile time constants, and permuted loops with temporaries case 6: { dim3 k6_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG)); dim3 k6_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG); comm_aosoa_naive_constants_perm<<<k6_gws, k6_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian); break; } // vectorised kernel with 1D range and direct store case 7: { dim3 k7_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG)); dim3 k7_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG); comm_aosoa_naive_direct<<<k7_gws, k7_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim); break; } // vectorised kernel with 1D range, compile time constants, and direct store case 8: { dim3 k8_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG)); dim3 k8_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG); comm_aosoa_naive_constants_direct<<<k8_gws, k8_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian); break; } // vectorised kernel with 1D range, compile time constants, direct store, and permuted loops with temporaries case 9: { dim3 k9_gws (num / (VEC_LENGTH_AUTO * PACKAGES_PER_WG)); dim3 k9_lws (VEC_LENGTH_AUTO * PACKAGES_PER_WG); comm_aosoa_naive_constants_direct_perm<<<k9_gws, k9_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian); break; } // vectorised kernel with 2D-range case 10: { dim3 k10_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG); dim3 k10_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG); comm_aosoa<<<k10_gws, k10_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim); break; } // vectorised kernel with 2D-range and compile-time constants case 11: { dim3 k11_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG); dim3 k11_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG); comm_aosoa_constants<<<k11_gws, k11_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian); break; } // vectorised kernel with 2D-range, compile-time constants, and permuted loops with temporaries case 12: { dim3 k12_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG); dim3 k12_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG); comm_aosoa_constants_perm<<<k12_gws, k12_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian); break; } // vectorised kernel with 2D range and direct store case 13: { dim3 k13_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG); dim3 k13_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG); comm_aosoa_direct<<<k13_gws, k13_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim); break; } // vectorised kernel with 2D range, compile-time constants, and direct store case 14: { dim3 k14_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG); dim3 k14_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG); comm_aosoa_constants_direct<<<k14_gws, k14_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian); break; } // vectorised kernel with compile-time constants, direct store, and permuted loops with temporaries case 15: { dim3 k15_gws (1, num / VEC_LENGTH_AUTO / PACKAGES_PER_WG); dim3 k15_lws (VEC_LENGTH_AUTO, PACKAGES_PER_WG); comm_aosoa_constants_direct_perm<<<k15_gws, k15_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian); break; } // manually vectorised kernel case 16: { dim3 k16_gws (num / (VEC_LENGTH * VEC_LENGTH)); dim3 k16_lws (VEC_LENGTH); comm_manual_aosoa<<<k16_gws, k16_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim); break; } // manually vectorised kernel with compile-time constants case 17: { dim3 k17_gws (num / (VEC_LENGTH * VEC_LENGTH)); dim3 k17_lws (VEC_LENGTH); comm_manual_aosoa_constants<<<k17_gws, k17_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian); break; } // manually vectorised kernel with compile-time constants and permuted loops with temporaries case 18: { dim3 k18_gws (num / (VEC_LENGTH * VEC_LENGTH)); dim3 k18_lws (VEC_LENGTH); comm_manual_aosoa_constants_perm<<<k18_gws, k18_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian); break; } // manually vectorised kernel with compile-time constants and prefetch case 19: { dim3 k19_gws (num / (VEC_LENGTH * VEC_LENGTH)); dim3 k19_lws (VEC_LENGTH); comm_manual_aosoa_constants_perm_prefetch<<<k19_gws, k19_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian); break; } // manually vectorised kernel with direct store case 20: { dim3 k20_gws (num / (VEC_LENGTH * VEC_LENGTH)); dim3 k20_lws (VEC_LENGTH); comm_manual_aosoa_direct<<<k20_gws, k20_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, dim); break; } // manually vectorised kernel with compile time constants and direct store case 21: { dim3 k21_gws (num / (VEC_LENGTH * VEC_LENGTH)); dim3 k21_lws (VEC_LENGTH); comm_manual_aosoa_constants_direct<<<k21_gws, k21_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian); break; } // manually vectorised kernel with compile time constants, direct store, and prefetch case 22: { dim3 k22_gws (num / (VEC_LENGTH * VEC_LENGTH)); dim3 k22_lws (VEC_LENGTH); comm_manual_aosoa_constants_direct_prefetch<<<k22_gws, k22_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian); break; } // manually vectorised kernel with compile time constants, direct store, and permuted loops with temporaries case 23: { dim3 k23_gws (num / (VEC_LENGTH * VEC_LENGTH)); dim3 k23_lws (VEC_LENGTH); comm_manual_aosoa_constants_direct_perm<<<k23_gws, k23_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian); break; } // final GPGPU kernel optimised for an Nvidia GPU case 24: { size_t block_dim_x = (dim * dim + WARP_SIZE - 1) / WARP_SIZE * WARP_SIZE; size_t block_dim_y = NUM_SUB_GROUPS; dim3 k24_gws (num / (block_dim_y * CHUNK_SIZE), 1); dim3 k24_lws (block_dim_x, block_dim_y); final_gpu_kernel<<<k24_gws, k24_lws>>>(d_sigma_in, d_sigma_out, d_hamiltonian, num); break; } default: std::cerr << "ERROR: **** benchmark kernel unavailable **** \n"; } } real_t deviation = 0; // the deviation of an empty kernel does not make sense if (kernel_id > 0) { cudaMemcpy(sout, d_sigma_out, sizeof(real_2_t) * size_sigma, cudaMemcpyDeviceToHost); for (size_t i = 0; i < size_sigma; i++) { sigma_out[i] = {sout[i].x, sout[i].y}; } // measure the differences between the CPU and GPU results deviation = compare_matrices(sigma_out, sigma_reference_transformed, dim, num); std::cout << "Deviation of kernel " << look_up(kernel_id) << ": " << deviation << std::endl; } cudaFree(d_hamiltonian); cudaFree(d_sigma_in); cudaFree(d_sigma_out); free(sin); free(sout); free(ham); } int main(int argc, char* argv[]) { // debugging print_compile_config(std::cout); // constants const size_t dim = DIM; const size_t num = NUM; // allocate host memory size_t size_hamiltonian = dim * dim; size_t size_sigma = size_hamiltonian * num; size_t size_sigma_byte = sizeof(complex_t) * size_sigma; complex_t* hamiltonian = allocate_aligned<complex_t>(size_hamiltonian); complex_t* sigma_in = allocate_aligned<complex_t>(size_sigma); complex_t* sigma_out = allocate_aligned<complex_t>(size_sigma); complex_t* sigma_reference = allocate_aligned<complex_t>(size_sigma); complex_t* sigma_reference_transformed = allocate_aligned<complex_t>(size_sigma); // perform reference computation for correctness analysis initialise_hamiltonian(hamiltonian, dim); initialise_sigma(sigma_in, sigma_out, dim, num); commutator_reference(sigma_in, sigma_out, hamiltonian, dim, num); // copy reference results std::memcpy(sigma_reference, sigma_out, size_sigma_byte); // The macro "BENCHMARK(...)" is defined in utils.hpp BENCHMARK(0, VEC_LENGTH, NO_TRANSFORM, NO_SCALE_HAMILT, NO_TRANSFORM); BENCHMARK(1, VEC_LENGTH, NO_TRANSFORM, NO_SCALE_HAMILT, NO_TRANSFORM); BENCHMARK(2, VEC_LENGTH, NO_TRANSFORM, NO_SCALE_HAMILT, NO_TRANSFORM); BENCHMARK(3, VEC_LENGTH, NO_TRANSFORM, SCALE_HAMILT, NO_TRANSFORM); BENCHMARK(4, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(5, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(6, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(7, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(8, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(9, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(10, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(11, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(12, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(13, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(14, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(15, VEC_LENGTH_AUTO, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(16, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(17, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(18, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(19, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(20, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(21, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(22, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(23, VEC_LENGTH, &transform_matrices_aos_to_aosoa, SCALE_HAMILT, &transform_matrix_aos_to_soa); BENCHMARK(24, 2, NO_TRANSFORM, SCALE_HAMILT, NO_TRANSFORM); free(hamiltonian); free(sigma_in); free(sigma_out); free(sigma_reference); free(sigma_reference_transformed); return 0; }
the_stack
// ConnectRegion.cu // 实现图像的连通区域操作 #include "ConnectRegion.h" #include <iostream> #include <fstream> #include <cmath> using namespace std; // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:CONNREGION_PACK_LEVEL // 定义了一个线程中计算的像素点个数,若该值为4,则在一个线程中计算2 ^ 4 = 16 // 个像素点 #define CONNREGION_PACK_LEVEL 5 #define CONNREGION_PACK_NUM (1 << CONNREGION_PACK_LEVEL) #define CONNREGION_PACK_MASK (CONNREGION_PACK_LEVEL - 1) #if (CONNREGION_PACK_LEVEL < 1 || CONNREGION_PACK_LEVEL > 5) # error Unsupport CONNREGION_PACK_LEVEL Value!!! #endif // 宏:CONNREGION_DIFF_INT // 比较两个值的绝对值之差是否小于给定值,若是返回1,若不是,返回0 #define CONNREGION_DIFF_INT(p1, p2, t1,t2) ((p1 >= t1 && p1 <= t2) && (p2 >= t1 && p2 <=t2)) // 宏:CONNREGION_INI_IFI // 定义了一个无穷大 #define CONNREGION_INI_IFI 0x7fffffff // Device 子程序: _findRootDev // 查找根节点标记值算法,根据给定的 label 数组和坐标值 // 返回该坐标对应的根节点坐标值。该函数是为了便于其他 Kernel 函数调用。 static __device__ int // 返回值:根节点标记值 _findRootDev( int *label, // 输入的标记数组 int idx // 输入点的标记值 ); // Device 子程序: _unionDev // 合并两个不同像素点以使它们位于同一连通区域中 static __device__ void // Device 程序无返回值 _unionDev( int *label, // 标记值数组 unsigned char elenum1, // 第一个像素点灰度值 unsigned char elenum2, // 第二个像素点灰度值 int elelabel1, // 第一个像素点标记值 int elelabel2, // 第二个像素点标记值 int mingray, int maxgray, // 给定阈值 int *flag // 变换标记,当这两个输入像素点被合并到一个 // 区域后,该标记值将被设为 1。 ); // Device 子程序: _findePreNumDev // 计算当前行前所有行中的根节点个数。 static __device__ int // 返回值:根节点个数 _findPreNumDev( int rownum, // 当前行号 int *elenum // 一个长度与总行数一致的一维数组,用来记录每行中根节 // 点个数。 ); // Kernel 函数: _initLabelPerBlockKer (初始化每个块内像素点的标记值) // 初始化每个线程块内点的标记值。该过程主要分为两个部分,首先,每个节点的标记值为 // 其在源图像中的索引值,如对于坐标为 (c, r) 点,其初始标记值为 r * width + c , // 其中 width 为图像宽;然后,将各点标记值赋值为该点满足阈值关系的八邻域点中的最 // 小标记值。该过程在一个线程块中进行。 static __global__ void // Kernel 函数无返回值 _initLabelPerBlockKer( ImageCuda inimg, // 输入图像 int *label, // 输入标记数组 int mingray, int maxgray // 指定阈值 ); // Kernel 函数: _mergeBordersKer (合并不同块内像素点的标记值) // 不同线程块的合并过程。该过程主要合并每两个线程块边界的点, // 在这里我们主要采用每次合并 4 × 4 个线程块的策略。 static __global__ void // Kernel 函数无返回值 _mergeBordersKer( ImageCuda inimg, // 输入图像 int *label, // 输入标记数组 int blockw, // 应合并线程块的长度 int blockh, // 应合并线程块的宽度 int threadz_z, // 合并水平方向线程块时,z 向线程最大值 int threadz_y, // 合并竖直方向线程块时,z 向线程最大值 int mingray, int maxgray // 指定阈值 ); // Kernel 函数: _preComputeAreaKer (计算面积预处理) // 为每个节点找到其对应的根节点标记值。为下一步计算面积做准备。 static __global__ void // Kernel 函数无返回值 _perComputeAreaKer( int *label, // 输入标记数组 int width, // 输入图像长度 int height // 输入图像宽度 ); // Kernel 函数: _computeAreaKer (计算区域面积) // 计算各个区域的面积值。 static __global__ void // Kernel 函数无返回值 _computeAreaKer( int *label, // 输入标记数组 int *area, // 输出各区域面积值数组 int width, // 输入图像长度 int height // 输入图像宽度 ); // Kernel 函数: _areaAnalysisKer (区域面积大小判断) // 进行区域面积大小判断。其中不满足给定范围的区域的根节点标记值将被赋值为 -1。 static __global__ void // Kernel 函数无返回值 _areaAnalysisKer( int *label, // 输入标记数组 int *area, // 输入面积数组 int width, // 输入图像长度 int height, // 输入图像宽度 int minArea, // 区域最小面积 int maxArea // 区域最大面积 ); // Kernel 函数: _findRootLabelKer (寻找根节点标记值) // 经过面积判断后,为每个节点找到其根节点。其中区域面积超出范围的 // 所有点标记值将被置为 -1。 static __global__ void // Kernel 函数无返回值 _findRootLabelKer( int *label, // 输入标记数组 int *tmplabel, // 输入存储临时标记数组 int width, // 输入图像长度 int height // 输入图像宽度 ); // Kernel 函数: _reIndexKer (根据最终结果重新标记图像) // 将输入标记数组中每行中的根节点个数输出到 elenum 数组中。 static __global__ void // Kernel 函数无返回值 _reIndexKer( int *label, // 输入标记数组 int *labelri, // 记录重新标记前标记值的数组 int *elenum, // 记录各行根节点个数的数组 int width, // 输入图像长度 int height // 输入图像宽度 ); // Kernel 函数: _reIndexFinalKer () // 进行区域标记值的重新赋值。 static __global__ void // Kernel 函数无返回值 _reIndexFinalKer( int *label, // 输入标记数组 int *labelri, // 记录重新标记前标记值的数组 int *elenum, // 记录各行根节点个数的数组 int width, // 输入图像长度 int height // 输入图像宽度 ); // Kernel 函数: _markFinalLabelKer (将最终标记结果输出到一幅灰度图像上) // 将最终标记值输出到目标图像上。 static __global__ void // Kernel 函数无返回值 _markFinalLabelKer( ImageCuda outimg, // 输出图像 int *label, // 标记数组 int *tmplabel // 临时标记数组 ); // Device 子程序:_findRootDev (查找根节点标记值) static __device__ int _findRootDev(int *label, int idx) { // 在 label 数组中查找 idx 下标对应的最小标记值, // 并将该值作为返回值。 int nexidx; do { nexidx = idx; idx = label[nexidx]; } while (idx < nexidx); // 处理完毕,返回根节点标记值。 return idx; } // Kernel 函数:_initLabelPerBlockKer (初始化各线程块内像素点的标记值) static __global__ void _initLabelPerBlockKer(ImageCuda inimg, int *label, int mingray, int maxgray) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量 (其中, c 表示 column; r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; int i, j, k; // 计算输入坐标点在label数组中对应的数组下标 int idx = r * inimg.imgMeta.width + c; // 计算输入坐标点对应的图像数据数组下标 int inidx = r * inimg.pitchBytes + c, newidx; // 计算应申请的 shared memory 的步长 int spitch = blockDim.x + 2; // 计算当前坐标点在 shared memory 中对应的下标 int localidx = (threadIdx.y + 1) * spitch + threadIdx.x + 1; // oldlabel 用来记录当前点未经过八邻域判断前的标记值, // newlabel 用来记录经过一轮判断后当前点的最新标记值, // 当一个点的 oldlabel 与 newlabel 一致时,当前点对应的标记值为最终标记 // 初始时,每个点的标记值设为其在 shared memory 中的对应下标 int oldlabel, newlabel = localidx; // curvalue 用来记录当前点的灰度值,newvalue 用来记录其八邻域点的灰度值 unsigned char curvalue, newvalue; curvalue = inimg.imgMeta.imgData[inidx]; // 共享内存数据区,该部分包含了存储在共享内存中的像素点的标记值。 // 由于未对 Kernel 的尺寸做出假设,这里使用动态申请的 Shared // Memory(共享内存)。 extern __shared__ int slabel[]; // 共享变量 sflag 数组用来存储是否应停止循环信息。 // 当 sflag[0] 的值为 0 时,表示块内的迭代已经完成。 __shared__ int sflag[1]; // 由于 shared memory 的大小为 (blockDim.x + 2) * (blockDim.y + 2) // 在这里将 shared memory 的边界点(即 shared memory 中超出线程块的点) // 的标记值设为无穷大。 if (threadIdx.x == 0) slabel[localidx - 1] = CONNREGION_INI_IFI; if (threadIdx.x == blockDim.x - 1) slabel[localidx + 1] = CONNREGION_INI_IFI; if (threadIdx.y == 0) { slabel[localidx - spitch] = CONNREGION_INI_IFI; if (threadIdx.x == 0) slabel[localidx - spitch - 1] = CONNREGION_INI_IFI; if (threadIdx.x == blockDim.x - 1) slabel[localidx - spitch + 1] = CONNREGION_INI_IFI; } if (threadIdx.y == blockDim.y - 1) { slabel[localidx + spitch] = CONNREGION_INI_IFI; if (threadIdx.x == 0) slabel[localidx + spitch - 1] = CONNREGION_INI_IFI; if (threadIdx.x == blockDim.x - 1) slabel[localidx + spitch + 1] = CONNREGION_INI_IFI; } while (1) { // 将当前点的标记值设为其在 shared memory 中的数组下标 slabel[localidx] = newlabel; // 将 sflag[0] 标记值设为 0 if ((threadIdx.x | threadIdx.y) == 0) sflag[0] = 0; // 初始时,将 newlabel 值赋给 oldlabel oldlabel = newlabel; __syncthreads(); // 在当前点的八邻域范围内查找与其灰度值之差的绝对值小于阈值的点, // 并将这些点的最小标记值赋予记录在 newlabel 中 for (i = r - 1;i <= r + 1;i++) { for (j = c - 1;j <= c + 1;j++) { if (j == c && i == r) continue; newidx = i * inimg.pitchBytes + j; newvalue = inimg.imgMeta.imgData[newidx]; if ((i >= 0 && i < inimg.imgMeta.height && j >= 0 && j < inimg.imgMeta.width) && (CONNREGION_DIFF_INT(curvalue, newvalue, mingray, maxgray))) { k = localidx + (i - r) * spitch + j - c; newlabel = min(newlabel, slabel[k]); } } } __syncthreads(); // 若当前点的 oldlabel 值大于 newlabel 值, // 表明当前点的标记值不是最终的标记值 // 则将 sflag[0] 值设为 1,来继续进行循环判断,并通过原子操作 // 将 newlabel 与 slabel[oldlabel] 的较小值赋予 slabel[oldlabel] if (oldlabel > newlabel) { atomicMin(&slabel[oldlabel], newlabel); sflag[0] = 1; } __syncthreads(); // 当线程块内所有像素点对应的标记值不再改变, // 即 sflag[0] 的值为 0 时,循环结束。 if (sflag[0] == 0) break; // 计算 newlabel 对应的根节点标记值,并将该值赋给 newlabel newlabel = _findRootDev(slabel, newlabel); __syncthreads(); } // 将 newlabel 的值转换为其在 label 数组中的数组下标 j = newlabel / spitch; i = newlabel % spitch; i += blockIdx.x * blockDim.x - 1; j += blockIdx.y * blockDim.y - 1; newlabel = j * inimg.imgMeta.width + i; label[idx] = newlabel; } // Device 子程序:_unionDev (合并两个不同像素点以使它们位于同一连通区域中) static __device__ void _unionDev( int *label, unsigned char elenum1, unsigned char elenum2, int label1, int label2, int mingray, int maxgray, int *flag) { int newlabel1, newlabel2; // 比较两个输入像素点的灰度值是否满足给定的阈值范围 if (CONNREGION_DIFF_INT(elenum1, elenum2, mingray, maxgray)) { // 若两个点满足指定条件,则分别计算这两个点的根节点标记值 // 计算第一个点的根节点标记值 newlabel1 = _findRootDev(label, label1); // 计算第二个点的根节点标记值 newlabel2 = _findRootDev(label, label2); // 将较小的标记值赋值给另一点在标记数组中的值 // 并将 flag[0] 置为 1 if (newlabel1 > newlabel2) { // 使用原子操作以保证操作的唯一性与正确性 atomicMin(&label[newlabel1], newlabel2); flag[0] = 1; } else if (newlabel2 > newlabel1) { atomicMin(&label[newlabel2], newlabel1); flag[0] = 1; } } } static __global__ void _mergeBordersKer( ImageCuda inimg, int *label, int blockw, int blockh, int threadz_x, int threadz_y, int mingray, int maxgray) { int idx, iterateTimes, i; int x, y; int curidx, newidx; unsigned char curvalue, newvalue; // 在这里以每次合并 4 * 4 = 16 个线程块的方式合并线程块 // 分别计算待合并线程块在 GRID 中的 x 和 y 向分量 int threadidx_x = blockDim.x * blockIdx.x + threadIdx.x; int threadidx_y = blockDim.y * blockIdx.y + threadIdx.y; // 共享数组变量,只含有一个元素,每当有两个像素点合并时,该数组 // 变量值置为 1。 __shared__ int sflag[1]; while (1) { // 设置 sflag[0] 的值为 0。 if ((threadIdx.x | threadIdx.y | threadIdx.z) == 0) sflag[0] = 0; __syncthreads(); // 合并上下相邻线程块的水平方向边界点 // 由于位于 GRID 中最后一行的线程块向下没有待合并的线程块 // 因而这里不处理最后一行的线程块 if ((threadIdx.y < blockDim.y - 1)) { // 计算为了合并一行线程块的迭代次数 iterateTimes = blockw / threadz_x; // 计算待合并像素点在源图像中的像素点坐标 x = threadidx_x * blockw + threadIdx.z; y = threadidx_y * blockh + blockh - 1; for (i = 0; i < iterateTimes; i++) { if (threadIdx.z < threadz_x && x < inimg.imgMeta.width && y < inimg.imgMeta.height) { idx = y * inimg.imgMeta.width + x; // 计算当前像素点灰度值 curidx = y * inimg.pitchBytes + x; curvalue = inimg.imgMeta.imgData[curidx]; // 计算位于当前像素点下方像素点的灰度值, // 其坐标值为 (x, y + 1)。 newidx = curidx + inimg.pitchBytes; newvalue = inimg.imgMeta.imgData[newidx]; // 合并这两个像素点 _unionDev(label, curvalue, newvalue, idx, idx + inimg.imgMeta.width, mingray, maxgray, sflag); // 若当前像素点不为最左侧像素点时,即 x != 0 时,合并 // 位于当前像素点左下方像素点,其坐标值为 (x - 1, y + 1)。 if (x - 1 >= 0) { newidx -= 1; newvalue = inimg.imgMeta.imgData[newidx]; _unionDev(label, curvalue, newvalue, idx, idx + inimg.imgMeta.width - 1, mingray, maxgray, sflag); } // 若当前像素点不为最右侧像素点时,x != inimg.imgMeta.width // 时,合并位于当前像素点右下方像素点,其坐标值为 // (x + 1, y + 1)。 if (x + 1 < inimg.imgMeta.width) { newidx += 2; newvalue = inimg.imgMeta.imgData[newidx]; _unionDev(label, curvalue, newvalue, idx, idx + inimg.imgMeta.width + 1, mingray, maxgray, sflag); } } // 计算下次迭代的起始像素点的 x 坐标 x += threadz_x; } } // 合并左右相邻线程块的竖直方向边界点 // 由于位于 GRID 中最后一列的线程块向右没有待合并的线程块 // 因而这里不处理最后一列的线程块 if ((threadIdx.x < blockDim.x - 1)) { // 计算为了合并一列线程块的迭代次数 iterateTimes = blockh / threadz_y; // 计算待合并像素点在源图像中的像素点坐标, // 由于处理的是每个线程块的最右一列像素点, // 因此 x 坐标值因在原基础上加上线程块宽度 - 1 x = threadidx_x * blockw + blockw - 1; y = threadidx_y * blockh + threadIdx.z; for (i = 0;i < iterateTimes;i++) { if (threadIdx.z < threadz_y && x < inimg.imgMeta.width && y < inimg.imgMeta.height) { idx = y * inimg.imgMeta.width + x; // 计算当前像素点灰度值 curidx = y * inimg.pitchBytes + x; curvalue = inimg.imgMeta.imgData[curidx]; // 计算位于当前像素点右侧像素点的灰度值, // 其坐标值为 (x + 1, y)。 newidx = curidx + 1; newvalue = inimg.imgMeta.imgData[newidx]; // 合并这两个像素点 _unionDev(label, curvalue, newvalue, idx, idx + 1, mingray, maxgray, sflag); // 若当前像素点不为最上侧像素点时,即 y != 0 时,合并 // 位于当前像素点右上方像素点,其坐标值为 (x + 1, y - 1)。 if (y - 1 >= 0) { newidx -= inimg.pitchBytes; newvalue = inimg.imgMeta.imgData[newidx]; _unionDev(label, curvalue, newvalue, idx, idx - inimg.imgMeta.width + 1, mingray, maxgray, sflag); } // 若当前像素点不为最下侧像素点时,y != inimg.imgMeta.height // 时,合并位于当前像素点右下方像素点,其坐标值为 // (x + 1, y + 1)。 if (y + 1 < inimg.imgMeta.height) { newidx = curidx + inimg.pitchBytes + 1; newvalue = inimg.imgMeta.imgData[newidx]; _unionDev(label, curvalue, newvalue, idx, idx + inimg.imgMeta.width + 1, mingray, maxgray, sflag); } } // 计算下次迭代的起始像素点的 y 坐标 y += threadz_y; } } __syncthreads(); if (sflag[0] == 0) break; } } static __global__ void _perComputeAreaKer( int *label, int width, int height) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量 (其中, c 表示 column; r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= width || r >= height) return; // 计算输入坐标点在label数组中对应的数组下标 int inidx = r * width + c; // 计算当前像素点的标记值 int curlabel = label[inidx]; // 将当前像素点标记值的根节点值赋给原像素点 int newlabel = _findRootDev(label, curlabel); label[inidx] = newlabel; } static __global__ void _computeAreaKer( int *label, int *area, int width, int height) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并 // 行度缩减的策略,默认令一个线程处理 16 个输出像素,这四个像素位于统一列 // 的相邻 16 行上,因此,对于 r 需要进行右移计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) << CONNREGION_PACK_LEVEL; int inidx = r * width + c; int curlabel, nexlabel; int cursum = 0; do { // 线程中处理第一个点。 // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资 // 源,一方面防止由于段错误导致的程序崩溃。 if (r >= height || c >= width) break; // 得到第一个输入坐标点对应的标记值。 curlabel = label[inidx]; cursum = 1; // 处理第二个点。 // 此后的像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各点 // 之间没有变化,故不用检查。 if (++r >= height) break; // 得到第二个点的像素值。 // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计算。 inidx += width; nexlabel = label[inidx]; // 若当前第二个点的标记值不等于前一个,把当前临时变量 cursum 中的统计结 // 果增加到共享内存中的相应区域;若该值等于前一个点的标记值,则临时变量 // cursum 加 1,继续检查下一个像素点。 if (curlabel != nexlabel) { atomicAdd(&area[curlabel], cursum); curlabel = nexlabel; } else { cursum++; } // 宏:CONNREGION_KERNEL_MAIN_PHASE // 定义计算下一个像素点的程序片段。使用这个宏可以实现获取下一个点的像素 // 值,并累加到共享内存,并且简化编码量 #define CONNREGION_KERNEL_MAIN_PHASE \ if (++r >= height) \ break; \ inidx += width; \ nexlabel = label[inidx]; \ if (curlabel != nexlabel) { \ atomicAdd(&area[curlabel], cursum); \ curlabel = nexlabel; \ cursum = 1; \ } else { \ cursum++; \ } #define CONNREGION_KERNEL_MAIN_PHASEx2 \ CONNREGION_KERNEL_MAIN_PHASE \ CONNREGION_KERNEL_MAIN_PHASE #define CONNREGION_KERNEL_MAIN_PHASEx4 \ CONNREGION_KERNEL_MAIN_PHASEx2 \ CONNREGION_KERNEL_MAIN_PHASEx2 #define CONNREGION_KERNEL_MAIN_PHASEx8 \ CONNREGION_KERNEL_MAIN_PHASEx4 \ CONNREGION_KERNEL_MAIN_PHASEx4 #define CONNREGION_KERNEL_MAIN_PHASEx16 \ CONNREGION_KERNEL_MAIN_PHASEx8 \ CONNREGION_KERNEL_MAIN_PHASEx8 // 对于不同的 CONNREGION_PACK_LEVEL ,定义不同的执行次数,从而使一个线程内部 // 实现对多个点的像素值的统计。 #if (CONNREGION_PACK_LEVEL >= 2) CONNREGION_KERNEL_MAIN_PHASEx2 # if (CONNREGION_PACK_LEVEL >= 3) CONNREGION_KERNEL_MAIN_PHASEx4 # if (CONNREGION_PACK_LEVEL >= 4) CONNREGION_KERNEL_MAIN_PHASEx8 # if (CONNREGION_PACK_LEVEL >= 5) CONNREGION_KERNEL_MAIN_PHASEx16 # endif # endif # endif #endif // 取消前面的宏定义。 #undef CONNREGION_KERNEL_MAIN_PHASEx16 #undef CONNREGION_KERNEL_MAIN_PHASEx8 #undef CONNREGION_KERNEL_MAIN_PHASEx4 #undef CONNREGION_KERNEL_MAIN_PHASEx2 #undef CONNREGION_KERNEL_MAIN_PHASE } while (0); // 使用原子操作来保证操作的正确性 if (cursum != 0) atomicAdd(&area[curlabel], cursum); } static __global__ void _areaAnalysisKer( int *label, int *area, int width, int height, int minArea, int maxArea) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= width || r >= height) return; // 计算坐标点对应的 label 数组下标 int idx = r * width + c; // 若面积值大于最大面积值或小于指定最小面积值,则将当前点的标记值设为 -1 if (area[idx]) { if (area[idx] < minArea || area[idx] > maxArea) label[idx] = -1; } } static __global__ void _findRootLabelKer( int *label, int *tmplabel, int width, int height) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= width || r >= height) return; // 计算坐标点对应的 label 数组下标 int idx = r * width + c; // 计算当前点根节点的标记值 int nexidx = label[idx]; // 将根节点值为 -1 的点的标记值设为 -1,表明该点不应被标记 if (nexidx >= 0 && label[nexidx] == -1) label[idx] = -1; // 将像素点最终标记值赋值到 tmplabel 数组中 tmplabel[idx] = label[idx]; } static __device__ int _findPreNumDev(int rownum, int *elenum) { int n = rownum; // 将最终值初始化为 0 int finalnum = 0; // 计算由第 0 行至第 n-1 行内根节点的总数和,并将其值赋值给 finalnum。 while (--n >= 0) { finalnum += elenum[n]; } return finalnum; } static __global__ void _reIndexKer( int *label, int *labelri, int *elenum, int width, int height) { // 计算线程对应的点位置,其中 colnum 和 rownum 分别表示线程处理的像素点的 // 列号和行号。 int rownum = blockIdx.y, colnum = threadIdx.x; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (colnum >= width || rownum >= height) return; // 当输入图像的宽度大于 1024,即给定线程块时, // 应采取迭代的方式完成该步骤标记值的重新赋值 // 计算迭代次数,采用向上取整的方式 int iteratetimes = (width - 1) / blockDim.x + 1; int idx, rankidx = 0, i, j; // 共享内存数据区,该部分包含了记录点是否为根节点信息,当一个点是根节点时, // 对应元素值为 1,否则值为 0。由于未对 Kernel 的尺寸做出假设, // 这里使用动态申请的 Shared Memory(共享内存)。 extern __shared__ int srowinfo[]; // 用来记录每个线程块(即一行)中根节点的总个数 __shared__ int selenum[1]; if(colnum == 0) selenum[0] = 0; __syncthreads(); i = colnum; // 计算每行中根节点的个数 for (j = 0;j < iteratetimes;j++) { if (i < width) { // 计算 labelri 数组下标 idx = rownum * width + i; // 将 labelri 中所有元素值为 -1 labelri[idx] = -1; // 将当前点是否为根节点的信息返回至 srowinfo 数组中 srowinfo[i] = (label[idx] == idx); // 若当前点为根节点则使用原子操作使得 selenum[0] 值加 1 if (srowinfo[i] == 1) atomicAdd(&selenum[0], 1); } i += 1024; } __syncthreads(); // 将根节点信息存入 elenum 数组中 if (colnum == 0) elenum[rownum] = selenum[0]; //__syncthreads(); // 计算每个根节点在它所在行中属于第几个根节点 for (j = 0;j < iteratetimes;j++) { // 若当前点是根节点则进行如下判断 if ((colnum < width) && (srowinfo[colnum] == 1)) { rankidx = 1; idx = rownum * width + colnum; // 计算在当前行,根节点前的其他根节点的总个数 for (i = 0;i < colnum;i++) { // 若点为根节点,则使得 rankidx 值加 1。 if (srowinfo[i] == 1) rankidx++; } // 将根节点索引值返回至数组 labelri 中 labelri[idx] = rankidx - 1; } colnum += 1024; } } static __global__ void _reIndexFinalKer( int *label, int *labelri, int *elenum, int width, int height) { // 计算线程对应的点位置,其中 colnum 和 rownum 分别表示线程处理的像素点的 // 列号和行号。 int rownum = blockIdx.y, colnum = threadIdx.x; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (colnum >= width || rownum >= height) return; int idx, i, j; // 当输入图像的宽度大于 1024,即给定线程块时, // 应采取迭代的方式完成该步骤标记值的重新赋值 // 计算迭代次数,采用向上取整的方式 int iteratetimes = (width - 1) / blockDim.x + 1; i = colnum; // 将第 256 及以后根节点的标记值设为 -1。 for (j = 0;j < iteratetimes;j++) { if (i < width) { idx = rownum * width + i; if (labelri[idx] >= 0) { // 计算根节点的标记值 label[idx] = labelri[idx] + _findPreNumDev(rownum, elenum); // 若标记值大于 256,则将根节点标记值设为 -1,表示该点不应被标记 if (label[idx] >= 256) label[idx] = -1; } } i += 1024; } } static __global__ void _markFinalLabelKer( ImageCuda outimg, int *label, int *tmplabel) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= outimg.imgMeta.width || r >= outimg.imgMeta.height) return; // 计算坐标点对应的 label 数组下标 int inidx = r * outimg.imgMeta.width + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int outidx = r * outimg.pitchBytes + c; // 计算每个像素点的最终标记值 if (tmplabel[inidx] != -1) { tmplabel[inidx] = label[tmplabel[inidx]]; } // 由于标记值应由 1 开始,而在 tmplabel 中未标记区域的标记值为 -1。 // 因此输出图像的标记值为 tmplabel 在该位置的标记值加 1。 outimg.imgMeta.imgData[outidx] = tmplabel[inidx] + 1; } // Host 成员方法:connectRegion(连通区域) __host__ int ConnectRegion::connectRegion(Image *inimg, Image *outimg) { int mingray = 10; int maxgray = 250; // 检查输入输出图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图 // 像的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一 if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; // 计算初始化块内内存时,共享内存的大小。 int smsize = sizeof (int) * (blocksize.x + 2) * (blocksize.y + 2); // 计算各标记数组的存储数据大小 int data_size = insubimgCud.imgMeta.width * insubimgCud.imgMeta.height * sizeof (int); // 存储中间标记值的数组,其大小与输入图像大小一致 int *devtmplabel; // 存储输入图像各行根节点数目的数组,其下标表示图像行号,元素值为 // 各行根节点个数,数组大小与图像总行数一致 int *devElenumPerRow; // 存储最终标记值的数组,其大小与输入图像大小一致 int *devLabel; // 存储记录重新标记前标记值的数组,其大小与输入图像大小一致 int *devlabelri; // 存储各区域面积大小的数组,其大小与输入图像大小一致,其中各区域 // 面积记录在根节点对应元素中。 int *devArea; cudaError_t cudaerrcode; // 为标记数组分配大小。 cudaerrcode = cudaMalloc((void **)&devLabel, data_size); if (cudaerrcode != cudaSuccess) { cudaFree(devLabel); return cudaerrcode; } // 为记录各行根节点个数的数组分配大小。 cudaerrcode = cudaMalloc((void **)&devElenumPerRow, insubimgCud.imgMeta.height * sizeof (int)); if (cudaerrcode != cudaSuccess) { cudaFree(devElenumPerRow); return cudaerrcode; } // 为临时标记数组分配大小。 cudaerrcode = cudaMalloc((void **)(&devtmplabel), data_size); if (cudaerrcode != cudaSuccess) { cudaFree(devtmplabel); return cudaerrcode; } // 为记录重新标记前的标记数组分配大小。 cudaerrcode = cudaMalloc((void **)(&devlabelri), data_size); if (cudaerrcode != cudaSuccess) { cudaFree(devlabelri); return cudaerrcode; } // 为面积数组分配大小。 cudaerrcode = cudaMalloc((void **)(&devArea), data_size); if (cudaerrcode != cudaSuccess) { cudaFree(devArea); return cudaerrcode; } // 将面积数组中所有面积值初始化为 0. cudaerrcode = cudaMemset(devArea, 0, data_size); if (cudaerrcode != cudaSuccess) { cudaFree(devArea); return cudaerrcode; } // 调用核函数,初始化每个线程块内标记值 _initLabelPerBlockKer<<<gridsize, blocksize, smsize>>>( insubimgCud, devLabel, mingray, maxgray); // 合并线程块时每次合并线程块的长、宽和高 int blockw, blockh, blockz; // 计算第一次合并时,应合并线程块的长、宽和高 // 第一次合并时,应合并线程块的长应为初始线程块长,宽为初始线程块宽 blockw = blocksize.x; blockh = blocksize.y; // 由于这里采用的是 3 维线程块,线程块的高设为初始线程块长和宽的较大者。 blockz = blockw; if (blockw < blockh) blockz = blockh; // 计算每次合并的线程块个数,在这里我们采用的是每次合并 4 × 4 的线程块, // 由于采用这种方式合并所需的迭代次数最少。 int xtiles = 4, ytiles = 4; // 计算合并线程块前 GRID 的长 int tilesizex = gridsize.x; // 计算合并线程块前 GRID 的宽 int tilesizey = gridsize.y; // 定义为进行线程块合并而采用的线程块与网格。 dim3 blockformerge, gridformerge; // 由于每个线程块的大小限制为 1024,而 tilesizex * tilesizey * blockz 的值 // 为每次用来进行合并操作的三维线程块的最大大小,因此当该值不大于 1024 时, // 可将所有线程块放在一个三维线程块中合并,这样,我们就可以以该值是否 // 不大于 1024 来作为是否终止循环的判断条件。 while (tilesizex * tilesizey * blockz > 1024) { // 计算每次合并线程块时 GRID 的长,这里采用向上取整的方式 tilesizex = (tilesizex - 1) / xtiles + 1; // 计算每次合并线程块时 GRID 的宽,这里采用向上取整的方式 tilesizey = (tilesizey - 1) / ytiles + 1; // 设置为了合并而采用的三维线程块大小,这里采用的是 4 × 4 的方式, // 因此线程块的长为 4,宽也为 4,高则为 32。 blockformerge.x = xtiles; blockformerge.y = ytiles; blockformerge.z = blockz; // 设置为了合并而采用的二维网格的大小。 gridformerge.x = tilesizex; gridformerge.y = tilesizey; gridformerge.z = 1; // 调用核函数,每次合并4 × 4 个线程块内的标记值 _mergeBordersKer<<<gridformerge, blockformerge>>>( insubimgCud, devLabel, blockw, blockh, blocksize.x, blocksize.y, mingray, maxgray); // 在每次迭代后,修改应合并线程块的长和宽,因为每次合并 4 * 4 个线程块, // 因此,经过迭代后,应合并线程块的长和宽应分别乘 4。 blockw *= xtiles; blockh *= ytiles; } // 进行最后一轮线程块的合并 // 计算该轮应采用的三维线程块大小 blockformerge.x = tilesizex; blockformerge.y = tilesizey; blockformerge.z = blockz; // 设置该论应采用的网格大小,长宽高分别为1。 gridformerge.x = 1; gridformerge.y = 1;gridformerge.z = 1; // 调用核函数,进行最后一轮线程块合并 _mergeBordersKer<<<gridformerge, blockformerge>>>( insubimgCud, devLabel, blockw, blockh, blocksize.x, blocksize.y, mingray,maxgray); // 调用核函数,进行计算面积前的预处理,即找出每个结点对应的标记值, // 其中根节点的标记值与其自身在数组中的索引值一致 _perComputeAreaKer<<<gridsize, blocksize>>>( devLabel, insubimgCud.imgMeta.width, insubimgCud.imgMeta.height); // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blockforcalarea, gridforcalarea; int height = (insubimgCud.imgMeta.height + CONNREGION_PACK_MASK) / CONNREGION_PACK_NUM; blockforcalarea.x = DEF_BLOCK_X; blockforcalarea.y = DEF_BLOCK_Y; gridforcalarea.x = (insubimgCud.imgMeta.width + blockforcalarea.x - 1) / blockforcalarea.x; gridforcalarea.y = (height + blockforcalarea.y - 1) / blockforcalarea.y; // 调用核函数,计算各个区域的面积大小 _computeAreaKer<<<gridforcalarea, blockforcalarea>>>( devLabel, devArea, insubimgCud.imgMeta.width, insubimgCud.imgMeta.height); // 调用核函数,进行面积大小判断,面积大小超过给定范围的区域 // 标记值将被置为 -1。 _areaAnalysisKer<<<gridsize, blocksize>>>( devLabel, devArea, insubimgCud.imgMeta.width, insubimgCud.imgMeta.height, minArea, maxArea); // 调用核函数,进行各区域的最终根节点查找 _findRootLabelKer<<<gridsize, blocksize>>>( devLabel, devtmplabel, insubimgCud.imgMeta.width, insubimgCud.imgMeta.height); // 为标记值的重新排序预设线程块与网格的大小,这里采用一个线程块处理一行 // 的方式进行计算,由于线程块的大小限制为 1024,因此线程块的长设为 1024 blockforcalarea.x = 1024; blockforcalarea.y = 1; blockforcalarea.z = 1; gridforcalarea.x = 1; gridforcalarea.y = insubimgCud.imgMeta.height; gridforcalarea.z = 1; // 调用核函数,计算各行所含根节点个数,并将结果返回 devElenumPerRow 数组中 _reIndexKer<<<gridforcalarea, blockforcalarea, insubimgCud.imgMeta.width * sizeof (int)>>>( devLabel, devlabelri, devElenumPerRow, insubimgCud.imgMeta.width, insubimgCud.imgMeta.height); // 调用核函数,计算各个区域的最终有序标记值 _reIndexFinalKer<<<gridforcalarea, blockforcalarea>>>( devLabel, devlabelri, devElenumPerRow, insubimgCud.imgMeta.width, insubimgCud.imgMeta.height); // 调用核函数,将最终标记值传到输出图像中 _markFinalLabelKer<<<gridsize, blocksize>>>( outsubimgCud, devLabel, devtmplabel); // 释放已分配的数组内存,避免内存泄露 cudaFree(devtmplabel); cudaFree(devArea); cudaFree(devLabel); cudaFree(devlabelri); if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕,退出。 return NO_ERROR; }
the_stack
* \file * Thread utilities for reading memory using PTX cache modifiers. */ #pragma once #include <cuda.h> #include <iterator> #include "../util_ptx.cuh" #include "../util_type.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \addtogroup UtilIo * @{ */ //----------------------------------------------------------------------------- // Tags and constants //----------------------------------------------------------------------------- /** * \brief Enumeration of cache modifiers for memory load operations. */ enum CacheLoadModifier { LOAD_DEFAULT, ///< Default (no modifier) LOAD_CA, ///< Cache at all levels LOAD_CG, ///< Cache at global level LOAD_CS, ///< Cache streaming (likely to be accessed once) LOAD_CV, ///< Cache as volatile (including cached system lines) LOAD_LDG, ///< Cache as texture LOAD_VOLATILE, ///< Volatile (any memory space) }; /** * \name Thread I/O (cache modified) * @{ */ /** * \brief Thread utility for reading memory using cub::CacheLoadModifier cache modifiers. Can be used to load any data type. * * \par Example * \code * #include <cub/cub.cuh> // or equivalently <cub/thread/thread_load.cuh> * * // 32-bit load using cache-global modifier: * int *d_in; * int val = cub::ThreadLoad<cub::LOAD_CA>(d_in + threadIdx.x); * * // 16-bit load using default modifier * short *d_in; * short val = cub::ThreadLoad<cub::LOAD_DEFAULT>(d_in + threadIdx.x); * * // 256-bit load using cache-volatile modifier * double4 *d_in; * double4 val = cub::ThreadLoad<cub::LOAD_CV>(d_in + threadIdx.x); * * // 96-bit load using cache-streaming modifier * struct TestFoo { bool a; short b; }; * TestFoo *d_struct; * TestFoo val = cub::ThreadLoad<cub::LOAD_CS>(d_in + threadIdx.x); * \endcode * * \tparam MODIFIER <b>[inferred]</b> CacheLoadModifier enumeration * \tparam InputIteratorT <b>[inferred]</b> Input iterator type \iterator */ template < CacheLoadModifier MODIFIER, typename InputIteratorT> __device__ __forceinline__ typename std::iterator_traits<InputIteratorT>::value_type ThreadLoad(InputIteratorT itr); //@} end member group #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document /// Helper structure for templated load iteration (inductive case) template <int COUNT, int MAX> struct IterateThreadLoad { template <CacheLoadModifier MODIFIER, typename T> static __device__ __forceinline__ void Load(T const *ptr, T *vals) { vals[COUNT] = ThreadLoad<MODIFIER>(ptr + COUNT); IterateThreadLoad<COUNT + 1, MAX>::template Load<MODIFIER>(ptr, vals); } template <typename InputIteratorT, typename T> static __device__ __forceinline__ void Dereference(InputIteratorT itr, T *vals) { vals[COUNT] = itr[COUNT]; IterateThreadLoad<COUNT + 1, MAX>::Dereference(itr, vals); } }; /// Helper structure for templated load iteration (termination case) template <int MAX> struct IterateThreadLoad<MAX, MAX> { template <CacheLoadModifier MODIFIER, typename T> static __device__ __forceinline__ void Load(T const * /*ptr*/, T * /*vals*/) {} template <typename InputIteratorT, typename T> static __device__ __forceinline__ void Dereference(InputIteratorT /*itr*/, T * /*vals*/) {} }; /** * Define a uint4 (16B) ThreadLoad specialization for the given Cache load modifier */ #define _CUB_LOAD_16(cub_modifier, ptx_modifier) \ template<> \ __device__ __forceinline__ uint4 ThreadLoad<cub_modifier, uint4 const *>(uint4 const *ptr) \ { \ uint4 retval; \ asm volatile ("ld."#ptx_modifier".v4.u32 {%0, %1, %2, %3}, [%4];" : \ "=r"(retval.x), \ "=r"(retval.y), \ "=r"(retval.z), \ "=r"(retval.w) : \ _CUB_ASM_PTR_(ptr)); \ return retval; \ } \ template<> \ __device__ __forceinline__ ulonglong2 ThreadLoad<cub_modifier, ulonglong2 const *>(ulonglong2 const *ptr) \ { \ ulonglong2 retval; \ asm volatile ("ld."#ptx_modifier".v2.u64 {%0, %1}, [%2];" : \ "=l"(retval.x), \ "=l"(retval.y) : \ _CUB_ASM_PTR_(ptr)); \ return retval; \ } /** * Define a uint2 (8B) ThreadLoad specialization for the given Cache load modifier */ #define _CUB_LOAD_8(cub_modifier, ptx_modifier) \ template<> \ __device__ __forceinline__ ushort4 ThreadLoad<cub_modifier, ushort4 const *>(ushort4 const *ptr) \ { \ ushort4 retval; \ asm volatile ("ld."#ptx_modifier".v4.u16 {%0, %1, %2, %3}, [%4];" : \ "=h"(retval.x), \ "=h"(retval.y), \ "=h"(retval.z), \ "=h"(retval.w) : \ _CUB_ASM_PTR_(ptr)); \ return retval; \ } \ template<> \ __device__ __forceinline__ uint2 ThreadLoad<cub_modifier, uint2 const *>(uint2 const *ptr) \ { \ uint2 retval; \ asm volatile ("ld."#ptx_modifier".v2.u32 {%0, %1}, [%2];" : \ "=r"(retval.x), \ "=r"(retval.y) : \ _CUB_ASM_PTR_(ptr)); \ return retval; \ } \ template<> \ __device__ __forceinline__ unsigned long long ThreadLoad<cub_modifier, unsigned long long const *>(unsigned long long const *ptr) \ { \ unsigned long long retval; \ asm volatile ("ld."#ptx_modifier".u64 %0, [%1];" : \ "=l"(retval) : \ _CUB_ASM_PTR_(ptr)); \ return retval; \ } /** * Define a uint (4B) ThreadLoad specialization for the given Cache load modifier */ #define _CUB_LOAD_4(cub_modifier, ptx_modifier) \ template<> \ __device__ __forceinline__ unsigned int ThreadLoad<cub_modifier, unsigned int const *>(unsigned int const *ptr) \ { \ unsigned int retval; \ asm volatile ("ld."#ptx_modifier".u32 %0, [%1];" : \ "=r"(retval) : \ _CUB_ASM_PTR_(ptr)); \ return retval; \ } /** * Define a unsigned short (2B) ThreadLoad specialization for the given Cache load modifier */ #define _CUB_LOAD_2(cub_modifier, ptx_modifier) \ template<> \ __device__ __forceinline__ unsigned short ThreadLoad<cub_modifier, unsigned short const *>(unsigned short const *ptr) \ { \ unsigned short retval; \ asm volatile ("ld."#ptx_modifier".u16 %0, [%1];" : \ "=h"(retval) : \ _CUB_ASM_PTR_(ptr)); \ return retval; \ } /** * Define an unsigned char (1B) ThreadLoad specialization for the given Cache load modifier */ #define _CUB_LOAD_1(cub_modifier, ptx_modifier) \ template<> \ __device__ __forceinline__ unsigned char ThreadLoad<cub_modifier, unsigned char const *>(unsigned char const *ptr) \ { \ unsigned short retval; \ asm volatile ( \ "{" \ " .reg .u8 datum;" \ " ld."#ptx_modifier".u8 datum, [%1];" \ " cvt.u16.u8 %0, datum;" \ "}" : \ "=h"(retval) : \ _CUB_ASM_PTR_(ptr)); \ return (unsigned char) retval; \ } /** * Define powers-of-two ThreadLoad specializations for the given Cache load modifier */ #define _CUB_LOAD_ALL(cub_modifier, ptx_modifier) \ _CUB_LOAD_16(cub_modifier, ptx_modifier) \ _CUB_LOAD_8(cub_modifier, ptx_modifier) \ _CUB_LOAD_4(cub_modifier, ptx_modifier) \ _CUB_LOAD_2(cub_modifier, ptx_modifier) \ _CUB_LOAD_1(cub_modifier, ptx_modifier) \ /** * Define powers-of-two ThreadLoad specializations for the various Cache load modifiers */ #if CUB_PTX_ARCH >= 200 _CUB_LOAD_ALL(LOAD_CA, ca) _CUB_LOAD_ALL(LOAD_CG, cg) _CUB_LOAD_ALL(LOAD_CS, cs) _CUB_LOAD_ALL(LOAD_CV, cv) #else _CUB_LOAD_ALL(LOAD_CA, global) // Use volatile to ensure coherent reads when this PTX is JIT'd to run on newer architectures with L1 _CUB_LOAD_ALL(LOAD_CG, volatile.global) _CUB_LOAD_ALL(LOAD_CS, global) _CUB_LOAD_ALL(LOAD_CV, volatile.global) #endif #if CUB_PTX_ARCH >= 350 _CUB_LOAD_ALL(LOAD_LDG, global.nc) #else _CUB_LOAD_ALL(LOAD_LDG, global) #endif // Macro cleanup #undef _CUB_LOAD_ALL #undef _CUB_LOAD_1 #undef _CUB_LOAD_2 #undef _CUB_LOAD_4 #undef _CUB_LOAD_8 #undef _CUB_LOAD_16 /** * ThreadLoad definition for LOAD_DEFAULT modifier on iterator types */ template <typename InputIteratorT> __device__ __forceinline__ typename std::iterator_traits<InputIteratorT>::value_type ThreadLoad( InputIteratorT itr, Int2Type<LOAD_DEFAULT> /*modifier*/, Int2Type<false> /*is_pointer*/) { return *itr; } /** * ThreadLoad definition for LOAD_DEFAULT modifier on pointer types */ template <typename T> __device__ __forceinline__ T ThreadLoad( T *ptr, Int2Type<LOAD_DEFAULT> /*modifier*/, Int2Type<true> /*is_pointer*/) { return *ptr; } /** * ThreadLoad definition for LOAD_VOLATILE modifier on primitive pointer types */ template <typename T> __device__ __forceinline__ T ThreadLoadVolatilePointer( T *ptr, Int2Type<true> /*is_primitive*/) { T retval = *reinterpret_cast<volatile T*>(ptr); return retval; } /** * ThreadLoad definition for LOAD_VOLATILE modifier on non-primitive pointer types */ template <typename T> __device__ __forceinline__ T ThreadLoadVolatilePointer( T *ptr, Int2Type<false> /*is_primitive*/) { typedef typename UnitWord<T>::VolatileWord VolatileWord; // Word type for memcopying const int VOLATILE_MULTIPLE = sizeof(T) / sizeof(VolatileWord); /* VolatileWord words[VOLATILE_MULTIPLE]; IterateThreadLoad<0, VOLATILE_MULTIPLE>::Dereference( reinterpret_cast<volatile VolatileWord*>(ptr), words); return *reinterpret_cast<T*>(words); */ T retval; VolatileWord *words = reinterpret_cast<VolatileWord*>(&retval); IterateThreadLoad<0, VOLATILE_MULTIPLE>::Dereference( reinterpret_cast<volatile VolatileWord*>(ptr), words); return retval; } /** * ThreadLoad definition for LOAD_VOLATILE modifier on pointer types */ template <typename T> __device__ __forceinline__ T ThreadLoad( T *ptr, Int2Type<LOAD_VOLATILE> /*modifier*/, Int2Type<true> /*is_pointer*/) { // Apply tags for partial-specialization return ThreadLoadVolatilePointer(ptr, Int2Type<Traits<T>::PRIMITIVE>()); } /** * ThreadLoad definition for generic modifiers on pointer types */ template <typename T, int MODIFIER> __device__ __forceinline__ T ThreadLoad( T const *ptr, Int2Type<MODIFIER> /*modifier*/, Int2Type<true> /*is_pointer*/) { typedef typename UnitWord<T>::DeviceWord DeviceWord; const int DEVICE_MULTIPLE = sizeof(T) / sizeof(DeviceWord); DeviceWord words[DEVICE_MULTIPLE]; IterateThreadLoad<0, DEVICE_MULTIPLE>::template Load<CacheLoadModifier(MODIFIER)>( reinterpret_cast<DeviceWord*>(const_cast<T*>(ptr)), words); return *reinterpret_cast<T*>(words); } /** * ThreadLoad definition for generic modifiers */ template < CacheLoadModifier MODIFIER, typename InputIteratorT> __device__ __forceinline__ typename std::iterator_traits<InputIteratorT>::value_type ThreadLoad(InputIteratorT itr) { // Apply tags for partial-specialization return ThreadLoad( itr, Int2Type<MODIFIER>(), Int2Type<IsPointer<InputIteratorT>::VALUE>()); } #endif // DOXYGEN_SHOULD_SKIP_THIS /** @} */ // end group UtilIo } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
* \file * DeviceScan provides device-wide, parallel operations for computing a prefix scan across a sequence of data items residing within device-accessible memory. */ #pragma once #include <iterator> #include "../../agent/agent_scan_by_key.cuh" #include "../../thread/thread_operators.cuh" #include "../../config.cuh" #include "../../util_debug.cuh" #include "../../util_device.cuh" #include "../../util_math.cuh" #include "dispatch_scan.cuh" #include <thrust/system/cuda/detail/core/triple_chevron_launch.h> CUB_NAMESPACE_BEGIN /****************************************************************************** * Kernel entry points *****************************************************************************/ /** * Scan kernel entry point (multi-block) */ template < typename ChainedPolicyT, ///< Chained tuning policy typename KeysInputIteratorT, ///< Random-access input iterator type typename ValuesInputIteratorT, ///< Random-access input iterator type typename ValuesOutputIteratorT, ///< Random-access output iterator type typename ScanByKeyTileStateT, ///< Tile status interface type typename EqualityOp, ///< Equality functor type typename ScanOpT, ///< Scan functor type typename InitValueT, ///< The init_value element for ScanOpT type (cub::NullType for inclusive scan) typename OffsetT> ///< Signed integer type for global offsets __launch_bounds__ (int(ChainedPolicyT::ActivePolicy::ScanByKeyPolicyT::BLOCK_THREADS)) __global__ void DeviceScanByKeyKernel( KeysInputIteratorT d_keys_in, ///< Input keys data ValuesInputIteratorT d_values_in, ///< Input values data ValuesOutputIteratorT d_values_out, ///< Output values data ScanByKeyTileStateT tile_state, ///< Tile status interface int start_tile, ///< The starting tile for the current grid EqualityOp equality_op, ///< Binary equality functor ScanOpT scan_op, ///< Binary scan functor InitValueT init_value, ///< Initial value to seed the exclusive scan OffsetT num_items) ///< Total number of scan items for the entire problem { typedef typename ChainedPolicyT::ActivePolicy::ScanByKeyPolicyT ScanByKeyPolicyT; // Thread block type for scanning input tiles typedef AgentScanByKey< ScanByKeyPolicyT, KeysInputIteratorT, ValuesInputIteratorT, ValuesOutputIteratorT, EqualityOp, ScanOpT, InitValueT, OffsetT> AgentScanByKeyT; // Shared memory for AgentScanByKey __shared__ typename AgentScanByKeyT::TempStorage temp_storage; // Process tiles AgentScanByKeyT( temp_storage, d_keys_in, d_values_in, d_values_out, equality_op, scan_op, init_value ).ConsumeRange( num_items, tile_state, start_tile); } /****************************************************************************** * Policy ******************************************************************************/ template <typename KeysInputIteratorT, typename ValuesInputIteratorT, typename InitValueT> struct DeviceScanByKeyPolicy { using KeyT = cub::detail::value_t<KeysInputIteratorT>; using ValueT = cub::detail::conditional_t< std::is_same<InitValueT, NullType>::value, cub::detail::value_t<ValuesInputIteratorT>, InitValueT>; static constexpr size_t MaxInputBytes = (sizeof(KeyT) > sizeof(ValueT) ? sizeof(KeyT) : sizeof(ValueT)); static constexpr size_t CombinedInputBytes = sizeof(KeyT) + sizeof(ValueT); // SM350 struct Policy350 : ChainedPolicy<350, Policy350, Policy350> { enum { NOMINAL_4B_ITEMS_PER_THREAD = 6, ITEMS_PER_THREAD = ((MaxInputBytes <= 8) ? 6 : Nominal4BItemsToItemsCombined(NOMINAL_4B_ITEMS_PER_THREAD, CombinedInputBytes)), }; typedef AgentScanByKeyPolicy< 128, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_LDG, BLOCK_SCAN_WARP_SCANS, BLOCK_STORE_WARP_TRANSPOSE> ScanByKeyPolicyT; }; // SM520 struct Policy520 : ChainedPolicy<520, Policy520, Policy350> { enum { NOMINAL_4B_ITEMS_PER_THREAD = 9, ITEMS_PER_THREAD = ((MaxInputBytes <= 8) ? 9 : Nominal4BItemsToItemsCombined(NOMINAL_4B_ITEMS_PER_THREAD, CombinedInputBytes)), }; typedef AgentScanByKeyPolicy< 256, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_LDG, BLOCK_SCAN_WARP_SCANS, BLOCK_STORE_WARP_TRANSPOSE> ScanByKeyPolicyT; }; /// MaxPolicy typedef Policy520 MaxPolicy; }; /****************************************************************************** * Dispatch ******************************************************************************/ /** * Utility class for dispatching the appropriately-tuned kernels for DeviceScan */ template < typename KeysInputIteratorT, ///< Random-access input iterator type typename ValuesInputIteratorT, ///< Random-access input iterator type typename ValuesOutputIteratorT, ///< Random-access output iterator type typename EqualityOp, ///< Equality functor type typename ScanOpT, ///< Scan functor type typename InitValueT, ///< The init_value element for ScanOpT type (cub::NullType for inclusive scan) typename OffsetT, ///< Signed integer type for global offsets typename SelectedPolicy = DeviceScanByKeyPolicy<KeysInputIteratorT, ValuesInputIteratorT, InitValueT> > struct DispatchScanByKey: SelectedPolicy { //--------------------------------------------------------------------- // Constants and Types //--------------------------------------------------------------------- enum { INIT_KERNEL_THREADS = 128 }; // The input key type using KeyT = cub::detail::value_t<KeysInputIteratorT>; // The input value type using InputT = cub::detail::value_t<ValuesInputIteratorT>; // The output value type -- used as the intermediate accumulator // Per https://wg21.link/P0571, use InitValueT if provided, otherwise the // input iterator's value type. using OutputT = cub::detail::conditional_t<std::is_same<InitValueT, NullType>::value, InputT, InitValueT>; void* d_temp_storage; ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes; ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation KeysInputIteratorT d_keys_in; ///< [in] Iterator to the input sequence of key items ValuesInputIteratorT d_values_in; ///< [in] Iterator to the input sequence of value items ValuesOutputIteratorT d_values_out; ///< [out] Iterator to the input sequence of value items EqualityOp equality_op; ///< [in]Binary equality functor ScanOpT scan_op; ///< [in] Binary scan functor InitValueT init_value; ///< [in] Initial value to seed the exclusive scan OffsetT num_items; ///< [in] Total number of input items (i.e., the length of \p d_in) cudaStream_t stream; ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous; int ptx_version; CUB_RUNTIME_FUNCTION __forceinline__ DispatchScanByKey( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation KeysInputIteratorT d_keys_in, ///< [in] Iterator to the input sequence of key items ValuesInputIteratorT d_values_in, ///< [in] Iterator to the input sequence of value items ValuesOutputIteratorT d_values_out, ///< [out] Iterator to the input sequence of value items EqualityOp equality_op, ///< [in] Binary equality functor ScanOpT scan_op, ///< [in] Binary scan functor InitValueT init_value, ///< [in] Initial value to seed the exclusive scan OffsetT num_items, ///< [in] Total number of input items (i.e., the length of \p d_in) cudaStream_t stream, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous, int ptx_version ): d_temp_storage(d_temp_storage), temp_storage_bytes(temp_storage_bytes), d_keys_in(d_keys_in), d_values_in(d_values_in), d_values_out(d_values_out), equality_op(equality_op), scan_op(scan_op), init_value(init_value), num_items(num_items), stream(stream), debug_synchronous(debug_synchronous), ptx_version(ptx_version) {} template <typename ActivePolicyT, typename InitKernel, typename ScanKernel> CUB_RUNTIME_FUNCTION __host__ __forceinline__ cudaError_t Invoke(InitKernel init_kernel, ScanKernel scan_kernel) { #ifndef CUB_RUNTIME_ENABLED (void)init_kernel; (void)scan_kernel; // Kernel launch not supported from this device return CubDebug(cudaErrorNotSupported); #else typedef typename ActivePolicyT::ScanByKeyPolicyT Policy; typedef ReduceByKeyScanTileState<OutputT, OffsetT> ScanByKeyTileStateT; cudaError error = cudaSuccess; do { // Get device ordinal int device_ordinal; if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; // Number of input tiles int tile_size = Policy::BLOCK_THREADS * Policy::ITEMS_PER_THREAD; int num_tiles = static_cast<int>(cub::DivideAndRoundUp(num_items, tile_size)); // Specify temporary storage allocation requirements size_t allocation_sizes[1]; if (CubDebug(error = ScanByKeyTileStateT::AllocationSize(num_tiles, allocation_sizes[0]))) break; // bytes needed for tile status descriptors // Compute allocation pointers into the single storage blob (or compute the necessary size of the blob) void* allocations[1] = {}; if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; if (d_temp_storage == NULL) { // Return if the caller is simply requesting the size of the storage allocation break; } // Return if empty problem if (num_items == 0) break; // Construct the tile status interface ScanByKeyTileStateT tile_state; if (CubDebug(error = tile_state.Init(num_tiles, allocations[0], allocation_sizes[0]))) break; // Log init_kernel configuration int init_grid_size = cub::DivideAndRoundUp(num_tiles, INIT_KERNEL_THREADS); if (debug_synchronous) _CubLog("Invoking init_kernel<<<%d, %d, 0, %lld>>>()\n", init_grid_size, INIT_KERNEL_THREADS, (long long) stream); // Invoke init_kernel to initialize tile descriptors THRUST_NS_QUALIFIER::cuda_cub::launcher::triple_chevron( init_grid_size, INIT_KERNEL_THREADS, 0, stream ).doit(init_kernel, tile_state, num_tiles); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; // Get SM occupancy for scan_kernel int scan_sm_occupancy; if (CubDebug(error = MaxSmOccupancy( scan_sm_occupancy, // out scan_kernel, Policy::BLOCK_THREADS))) break; // Get max x-dimension of grid int max_dim_x; if (CubDebug(error = cudaDeviceGetAttribute(&max_dim_x, cudaDevAttrMaxGridDimX, device_ordinal))) break; // Run grids in epochs (in case number of tiles exceeds max x-dimension int scan_grid_size = CUB_MIN(num_tiles, max_dim_x); for (int start_tile = 0; start_tile < num_tiles; start_tile += scan_grid_size) { // Log scan_kernel configuration if (debug_synchronous) _CubLog("Invoking %d scan_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", start_tile, scan_grid_size, Policy::BLOCK_THREADS, (long long) stream, Policy::ITEMS_PER_THREAD, scan_sm_occupancy); // Invoke scan_kernel THRUST_NS_QUALIFIER::cuda_cub::launcher::triple_chevron( scan_grid_size, Policy::BLOCK_THREADS, 0, stream ).doit( scan_kernel, d_keys_in, d_values_in, d_values_out, tile_state, start_tile, equality_op, scan_op, init_value, num_items); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; } } while (0); return error; #endif // CUB_RUNTIME_ENABLED } template <typename ActivePolicyT> CUB_RUNTIME_FUNCTION __host__ __forceinline__ cudaError_t Invoke() { typedef typename DispatchScanByKey::MaxPolicy MaxPolicyT; typedef ReduceByKeyScanTileState<OutputT, OffsetT> ScanByKeyTileStateT; // Ensure kernels are instantiated. return Invoke<ActivePolicyT>( DeviceScanInitKernel<ScanByKeyTileStateT>, DeviceScanByKeyKernel< MaxPolicyT, KeysInputIteratorT, ValuesInputIteratorT, ValuesOutputIteratorT, ScanByKeyTileStateT, EqualityOp, ScanOpT, InitValueT, OffsetT> ); } /** * Internal dispatch routine */ CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation KeysInputIteratorT d_keys_in, ///< [in] Iterator to the input sequence of key items ValuesInputIteratorT d_values_in, ///< [in] Iterator to the input sequence of value items ValuesOutputIteratorT d_values_out, ///< [out] Iterator to the input sequence of value items EqualityOp equality_op, ///< [in]Binary equality functor ScanOpT scan_op, ///< [in] Binary scan functor InitValueT init_value, ///< [in] Initial value to seed the exclusive scan OffsetT num_items, ///< [in] Total number of input items (i.e., the length of \p d_in) cudaStream_t stream, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous) { typedef typename DispatchScanByKey::MaxPolicy MaxPolicyT; cudaError_t error; do { // Get PTX version int ptx_version = 0; if (CubDebug(error = PtxVersion(ptx_version))) break; // Create dispatch functor DispatchScanByKey dispatch( d_temp_storage, temp_storage_bytes, d_keys_in, d_values_in, d_values_out, equality_op, scan_op, init_value, num_items, stream, debug_synchronous, ptx_version ); // Dispatch to chained policy if (CubDebug(error = MaxPolicyT::Invoke(ptx_version, dispatch))) break; } while (0); return error; } }; CUB_NAMESPACE_END
the_stack
#include <gtest/gtest.h> #include <cuda_runtime.h> #include <stdio.h> #include <opencv2/imgcodecs.hpp> #include <opencv2/imgproc.hpp> #include <random> #include <string> #include <vector> #include "dali/core/dev_buffer.h" #include "dali/core/format.h" #include "dali/core/math_util.h" #include "dali/core/tensor_shape_print.h" #include "dali/kernels/imgproc/resample/separable.h" #include "dali/kernels/test/test_data.h" #include "dali/kernels/scratch.h" #include "dali/kernels/imgproc/resample.h" #include "dali/kernels/imgproc/resample_cpu.h" #include "dali/kernels/test/resampling_test/resampling_test_params.h" #include "dali/test/cv_mat_utils.h" #include "dali/test/tensor_test_utils.h" #include "dali/test/test_tensors.h" using std::cout; using std::endl; namespace dali { namespace kernels { namespace resample_test { static constexpr int kMaxChannels = 16; struct Bubble { vec3 centre; float color[kMaxChannels]; float frequency; float decay; }; template <typename T> __global__ void DrawBubblesKernel(T *data, ivec3 size, int nch, const Bubble *bubbles, int nbubbles) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if (x >= size.x || y >= size.y || z >= size.z) return; T *pixel = &data[nch * (x + size.x * (y + size.y * z))]; vec3 pos(x + 0.5f, y + 0.5f, z + 0.5f); float color[kMaxChannels] = { 0 }; for (int i = 0; i < nbubbles; i++) { float dsq = (bubbles[i].centre - pos).length_square(); float d = dsq*rsqrt(dsq); float magnitude = expf(bubbles[i].decay * dsq); float phase = bubbles[i].frequency * d; for (int c = 0; c < nch; c++) color[c] += bubbles[i].color[c] * (1 + cos(phase)) * magnitude * 0.5f; } for (int c = 0; c < nch; c++) pixel[c] = ConvertSatNorm<T>(color[c]); } template <typename T> struct TestDataGenerator { DeviceBuffer<Bubble> gpu_bubbles; template <int ndim> void DrawBubbles(const TensorView<StorageGPU, T, ndim> &tensor, span<const Bubble> bubbles, cudaStream_t stream) { static_assert(ndim == 4 || ndim == DynamicDimensions, "Tensor must be 4D or dynamic (and 4D)"); assert(tensor.dim() == 4 && "Tensor must be 4D"); gpu_bubbles.from_host(bubbles.data(), bubbles.size(), stream); ivec3 size(tensor.shape[2], tensor.shape[1], tensor.shape[0]); int nch = tensor.shape[3]; assert(tensor.shape[3] <= kMaxChannels); dim3 block(32, 32, 1); dim3 grid(div_ceil(size.x, 32), div_ceil(size.y, 32), size.z); DrawBubblesKernel<<<grid, block, 0, stream>>>(tensor.data, size, nch, gpu_bubbles, bubbles.size()); } template <int ndim> void GenerateTestData(const TensorView<StorageGPU, T, ndim> &tensor, int num_bubbles = 5, cudaStream_t stream = 0) { static_assert(ndim == 4 || ndim == DynamicDimensions, "Tensor must be 4D or dynamic (and 4D)"); assert(tensor.dim() == 4 && "Tensor must be 4D"); std::mt19937_64 rng(1234); std::uniform_real_distribution<float> dist(0, 1); std::uniform_real_distribution<float> freq_dist(M_PI/10, M_PI/3); std::uniform_real_distribution<float> sigma_dist(10, 100); auto shape = tensor.shape; int nch = shape[3]; assert(nch <= kMaxChannels); std::vector<Bubble> bubbles(num_bubbles); for (int i = 0; i < num_bubbles; i++) { bubbles[i].centre = { shape[2] * dist(rng), shape[1] * dist(rng), shape[0] * dist(rng) }; for (int c = 0; c < nch; c++) bubbles[i].color[c] = dist(rng); bubbles[i].frequency = freq_dist(rng); bubbles[i].decay = -1/(M_SQRT2 * sigma_dist(rng)); } DrawBubbles(tensor, make_span(bubbles), stream); } }; // Slices - duplicate params and shapes for depth slices as if they were additional samples template <int ndim> TensorListShape<ndim == DynamicDimensions ? DynamicDimensions : ndim-1> GetSliceShapes(const TensorListShape<ndim> &tls) { TensorListShape<ndim == DynamicDimensions ? DynamicDimensions : ndim-1> slice_tls; int N = tls.num_samples(); int total_slices = 0; for (int i = 0; i < N; i++) { total_slices += tls.tensor_shape_span(i)[0]; } int D = tls.sample_dim() - 1; slice_tls.resize(total_slices, D); for (int i = 0, slice = 0; i < N; i++) { auto ts = tls.tensor_shape_span(i); for (int z = 0; z < ts[0]; z++, slice++) { auto slice_ts = slice_tls.tensor_shape_span(slice); for (int d = 0; d < D; d++) { slice_ts[d] = ts[d+1]; } } } return slice_tls; } template <typename Storage, typename T, int ndim> auto GetSliceImages(const TensorListView<Storage, T, ndim> &volumes) { return reshape(volumes, GetSliceShapes(volumes.shape), true); } template <int ndim> void GetSliceParams(vector<ResamplingParams2D> &slice_params, span<const ResamplingParams3D> params, const TensorListShape<ndim> &in_shape) { slice_params.clear(); int N = in_shape.num_samples(); assert(static_cast<int>(params.size()) == N); for (int i = 0; i < N; i++) { int depth = in_shape.tensor_shape_span(i)[0]; ResamplingParams2D p; p[0] = params[i][1]; p[1] = params[i][2]; for (int z = 0; z < depth; z++) { slice_params.push_back(p); } } } // ZShapes, ZImages - resize Z dim, fuse XY and keep old size template <int ndim> auto GetZShapes(const TensorListShape<ndim> &tls) { return collapse_dim(tls, 1); } template <typename Storage, typename T, int ndim> auto GetZImages(const TensorListView<Storage, T, ndim> &volumes) { return reshape(volumes, GetZShapes(volumes.shape), true); } /** * @param z_params - parameters for resizing along Z axis, keeping fused XY intact * @param params - original parameters * @param in_shape - input shape for _this stage_ (if Z is resized after XY, it is tmp_shape) * * @remarks This function cannot work with ROI in X/Y axes - it must be run as the second stage * (after resizing all the slices). */ template <int ndim> void GetZParams(vector<ResamplingParams2D> &z_params, span<const ResamplingParams3D> params, const TensorListShape<ndim> &in_shape) { z_params.clear(); int N = in_shape.num_samples(); assert(static_cast<int>(params.size()) == N); for (int i = 0; i < N; i++) { auto sample_shape = in_shape.tensor_shape_span(i); int depth = sample_shape[0]; ResamplingParams2D p = {}; p[0] = params[i][0]; p[1].output_size = sample_shape[1] * sample_shape[2]; p[1].roi.start = 0; p[1].roi.end = p[1].output_size; z_params.push_back(p); } } /** * @brief Use 2x 2D resampling to achieve 3D * * The first step decomposes the resampling into slices and resamples XY dimensions, fusing depth * and batch dim. * The second step fuses XY dimensions into generalized rows - which is OK, since we don't resize * that dimension and ROI is already applied. The Z dimension becomes the new Y. * * The result may differ slightly between this and true 3D resampling, because the order of * operations is not optimized and may be different. */ template <typename Out, typename In> void Resample3Dvia2D(TestTensorList<Out> &out, TestTensorList<In> &in, span<const ResamplingParams3D> params, cudaStream_t stream) { TestTensorList<float> tmp; auto in_view = in.template gpu<4>(stream); const auto &in_shape = in_view.shape; assert(in_shape.sample_dim() == 4); TensorListShape<4> tmp_shape, out_shape; int N = in_shape.num_samples(); tmp_shape.resize(N); out_shape.resize(N); for (int i = 0; i < N; i++) { auto in_sample_shape = in_shape.tensor_shape_span(i); auto tmp_sample_shape = tmp_shape.tensor_shape_span(i); auto out_sample_shape = out_shape.tensor_shape_span(i); for (int d = 0; d < 3; d++) { out_sample_shape[d] = params[i][d].output_size; if (out_sample_shape[d] == KeepOriginalSize) out_sample_shape[d] = in_sample_shape[d]; tmp_sample_shape[d] = d == 0 ? in_sample_shape[d] : out_sample_shape[d]; } tmp_sample_shape[3] = out_sample_shape[3] = in_sample_shape[3]; // number of channels } tmp.reshape(tmp_shape); out.reshape(out_shape); auto tmp_view = tmp.gpu<4>(stream); auto out_view = out.template gpu<4>(stream); vector<ResamplingParams2D> params_xy; vector<ResamplingParams2D> params_z; GetSliceParams(params_xy, params, in_shape); auto in_slices = GetSliceImages(in_view); auto tmp_slices = GetSliceImages(tmp_view); assert(in_slices.num_samples() == tmp_slices.num_samples()); ScratchpadAllocator sa; { ResampleGPU<float, In, 2> res_xy; KernelContext ctx; ctx.gpu.stream = stream; auto req = res_xy.Setup(ctx, in_slices, make_span(params_xy)); sa.Reserve(req.scratch_sizes); auto scratch = sa.GetScratchpad(); ctx.scratchpad = &scratch; assert(req.output_shapes[0] == tmp_slices.shape); res_xy.Run(ctx, tmp_slices, in_slices, make_span(params_xy)); } GetZParams(params_z, params, tmp_shape); auto tmp_z = GetZImages(tmp_view); auto out_z = GetZImages(out_view); { ResampleGPU<Out, float, 2> res_z; KernelContext ctx; ctx.gpu.stream = stream; auto req = res_z.Setup(ctx, tmp_z, make_span(params_z)); sa.Reserve(req.scratch_sizes); auto scratch = sa.GetScratchpad(); ctx.scratchpad = &scratch; assert(req.output_shapes[0] == out_z.shape); res_z.Run(ctx, out_z, tmp_z, make_span(params_z)); } } template <typename TestParams> class Resample3DTest; template <typename Out, typename In, ResamplingFilterType interp> struct ResamplingTestParams { using OutputType = Out; using InputType = In; static constexpr ResamplingFilterType interp_type() { return interp; } }; template <typename Out, typename In, ResamplingFilterType interp> class Resample3DTest<ResamplingTestParams<Out, In, interp>> : public ::testing::Test { public: Resample3DTest() { InitShapes(); } protected: void InitShapes() { in_shapes.resize(3); out_shapes.resize(3); // NOTE: The shapes are chosen as to avoid source pixel centers exactly halfway // between original pixels, because it can lead to rounding discrepancies between // cpu and gpu variants (and we're using two-pass GPU as a reference here). // 3 channels in_shapes[0] = {{ { 40, 60, 50, 3 }, { 32, 80, 120, 3 }, }}; out_shapes[0] = {{ { 51, 40, 70, 3 }, { 73, 87, 29, 3 }, }}; // variable number of channels in_shapes[1] = {{ { 10, 200, 120, 1 }, { 100, 10, 10, 3 }, { 70, 80, 90, 6 }, }}; out_shapes[1] = {{ { 31, 200, 120, 1 }, { 51, 27, 33, 3 }, { 73, 181, 43, 6 }, }}; // many channels in_shapes[2] = {{ { 40, 40, 40, 11 }, }}; out_shapes[2] = {{ { 51, 51, 51, 11 }, }}; } vector<ResamplingParams3D> GenerateParams(const TensorListShape<4> &out_shape, const TensorListShape<4> &in_shape) { vector<ResamplingParams3D> params; params.resize(in_shape.num_samples()); std::bernoulli_distribution dist; std::uniform_real_distribution<float> start_dist(0.05, 0.3); std::uniform_real_distribution<float> end_dist(0.7, 0.95); for (int i = 0; i < in_shape.num_samples(); i++) { auto in_sample_shape = in_shape.tensor_shape_span(i); for (int d = 0; d < 3; d++) { params[i][d].min_filter = interp; params[i][d].mag_filter = interp; params[i][d].output_size = out_shape.tensor_shape_span(i)[d]; if (d == 2) { do { params[i][d].roi.use_roi = true; params[i][d].roi.start = start_dist(rng) * in_sample_shape[d]; params[i][d].roi.end = end_dist(rng) * in_sample_shape[d]; if (dist(rng)) std::swap(params[i][d].roi.start, params[i][d].roi.end); } while (interp == ResamplingFilterType::Nearest && !CheckNN(params[i][d].output_size, params[i][d].roi.start, params[i][d].roi.end)); } } } return params; } // Checks for possible rounding problems leading to selecting different source pixel // when running NN resampling. static bool CheckNN(int size, float start, float end) { float step = (end - start) / size; float x = start + step * 0.5f; for (int i = 0; i < size; i++, x += step) { if (std::abs(x - std::floor(x)) < 0.01f) return false; } return true; } void RunGPU() { cudaStream_t stream = 0; ResampleGPU<Out, In, 3> kernel; KernelContext ctx; ctx.gpu.stream = stream; ScratchpadAllocator sa; TestDataGenerator<In> tdg; TestTensorList<In> in; TestTensorList<Out> out, ref; int niter = NumIter(); for (int iter = 0; iter < niter; iter++) { const TensorListShape<4> &in_shape = in_shapes[iter]; int N = in_shape.num_samples(); in.reshape(in_shape); for (int i = 0; i < N; i++) tdg.GenerateTestData(in.gpu(stream)[i], 30, stream); const TensorListShape<4> &out_shape = out_shapes[iter]; vector<ResamplingParams3D> params = GenerateParams(out_shape, in_shape); Resample3Dvia2D(ref, in, make_span(params), stream); auto ref_cpu = ref.cpu(stream); assert(ref_cpu.shape == out_shape); auto req = kernel.Setup(ctx, in.template gpu<4>(stream), make_span(params)); ASSERT_EQ(req.output_shapes.size(), 1u) << "Expected only 1 output"; ASSERT_EQ(req.output_shapes[0], out_shape) << "Unexpected output shape"; out.reshape(out_shape); CUDA_CALL( cudaMemsetAsync(out.gpu(stream).data[0], 0, sizeof(Out)*out_shape.num_elements(), stream)); sa.Reserve(req.scratch_sizes); auto scratchpad = sa.GetScratchpad(); ctx.scratchpad = &scratchpad; auto out_gpu = out.template gpu<4>(stream); kernel.Run(ctx, out_gpu, in.template gpu<4>(stream), make_span(params)); auto out_cpu = out.cpu(stream); if (interp == ResamplingFilterType::Nearest) { Check(out_cpu, ref_cpu); } else { // Epsilons are quite big because, processing order in the reference is forced to be XYZ // or YXZ, whereas the tested implementation can use any order. double eps = std::is_integral<Out>::value ? 1 : 1e-3; Check(out_cpu, ref_cpu, EqualEpsRel(eps, 1e-4)); } } } void RunCPU() { cudaStream_t stream = 0; ResampleCPU<Out, In, 3> kernel; KernelContext ctx; ScratchpadAllocator sa; TestDataGenerator<In> tdg; TestTensorList<In> in; TestTensorList<Out> out, ref; int niter = NumIter(); for (int iter = 0; iter < niter; iter++) { const TensorListShape<4> &in_shape = in_shapes[iter]; int N = in_shape.num_samples(); in.reshape(in_shape); for (int i = 0; i < N; i++) tdg.GenerateTestData(in.gpu(stream)[i], 10, stream); const TensorListShape<4> &out_shape = out_shapes[iter]; out.reshape(out_shape); memset(out.cpu(stream).data[0], 0, sizeof(Out)*out_shape.num_elements()); vector<ResamplingParams3D> params = GenerateParams(out_shape, in_shape); if (iter != 1) continue; Resample3Dvia2D(ref, in, make_span(params), stream); auto ref_cpu = ref.cpu(stream); ref.invalidate_gpu(); assert(ref_cpu.shape == out_shape); auto in_cpu = in.template cpu<4>(stream); auto out_cpu = out.template cpu<4>(stream); for (int i = 0; i < N; i++) { auto req = kernel.Setup(ctx, in_cpu[i], params[i]); ASSERT_EQ(req.output_shapes.size(), 1u) << "Expected only 1 output"; ASSERT_EQ(req.output_shapes[0][0], out_shape[i]) << "Unexpected output shape"; sa.Reserve(req.scratch_sizes); auto scratchpad = sa.GetScratchpad(); ctx.scratchpad = &scratchpad; kernel.Run(ctx, out_cpu[i], in_cpu[i], params[i]); if (interp == ResamplingFilterType::Nearest) { Check(out_cpu[i], ref_cpu[i]); } else { // Epsilons are quite big because: // - GPU uses fma // - GPU uses different rounding // - processing order in the reference is forced to be XYZ or YXZ, whereas // the tested implementation can use any order. double eps = std::is_integral<Out>::value ? 1 : std::is_integral<In>::value ? max_value<In>()*1e-6 : 1e-5; Check(out_cpu[i], ref_cpu[i], EqualEpsRel(eps, 2e-3)); } } } } vector<TensorListShape<4>> in_shapes, out_shapes; int NumIter() const { return in_shapes.size(); } std::mt19937_64 rng{1234}; }; using Resample3DTestTypes = ::testing::Types< ResamplingTestParams<uint8_t, uint8_t, ResamplingFilterType::Nearest>, ResamplingTestParams<float, uint8_t, ResamplingFilterType::Linear>, ResamplingTestParams<int16_t, int16_t, ResamplingFilterType::Cubic>, ResamplingTestParams<float, uint16_t, ResamplingFilterType::Lanczos3> >; TYPED_TEST_SUITE(Resample3DTest, Resample3DTestTypes); TYPED_TEST(Resample3DTest, TestGPU) { this->RunGPU(); } TYPED_TEST(Resample3DTest, TestCPU) { this->RunCPU(); } } // namespace resample_test } // namespace kernels } // namespace dali
the_stack
#include <cudpp_globals.h> #include "cudpp_stringsort.h" #include <cudpp.h> #include <stdio.h> #include <cudpp_util.h> #include <math.h> #include "sharedmem.h" /** * @file * stringsort_cta.cu * * @brief CUDPP CTA-level sort routines */ /** \addtogroup cudpp_cta * @{ */ /** @name Merge Sort Functions * @{ */ #define BLOCKSORT_SIZE 1024 #define CTA_BLOCK 128 #define DEPTH_simple 2 #define DEPTH_multi 2 #define CTASIZE_simple 256 #define CTASIZE_multi 256 #define INTERSECT_A_BLOCK_SIZE_simple DEPTH_simple*CTASIZE_simple #define INTERSECT_B_BLOCK_SIZE_simple 2*DEPTH_simple*CTASIZE_simple #define INTERSECT_A_BLOCK_SIZE_multi DEPTH_multi*CTASIZE_multi #define INTERSECT_B_BLOCK_SIZE_multi 2*DEPTH_multi*CTASIZE_multi typedef unsigned int uint; /** @brief Breaks ties in keys (first four characters) returns true if cmpVal > myVal false otherwise * @param[in] myLoc, cmpLoc Location of the two inputs * @param[in] myBound, cmpBound Local memory bounds for the two addresses * @param[in] myAdd Address into global memory of our current value * @param[in] cmpAdd Address into global memory of the value we are comparing against * @param[in] stringLoc Global memory array (input string) * @param[in] stringSize Size of our input string * @param[in] termC Termination character for our strings * @return Returns 1 if cmpVal > myVal 0 otherwise **/ __device__ int tie_break_simp(unsigned int myLoc, unsigned int cmpLoc, unsigned int myBound, unsigned int cmpBound, unsigned int myAdd, unsigned int cmpAdd, unsigned int* stringLoc, unsigned int stringSize, unsigned char termC) { if(myLoc >= myBound && cmpLoc >= cmpBound) return cmpLoc > myLoc; else if(myLoc >= myBound) return 0; else if(cmpLoc >= cmpBound) return 1; //Our tie is in bounds therefore we can break the tie using traditional means if(myAdd >= stringSize) return 0; else if(cmpAdd >= stringSize) return 1; unsigned int a = stringLoc[myAdd]; unsigned int b = stringLoc[cmpAdd]; while(a == b && ((a&255) != termC) && ((b&255) != termC) && myAdd < (stringSize-1) && cmpAdd < (stringSize-1) ) { a = stringLoc[++myAdd]; b = stringLoc[++cmpAdd]; } if(a==b) return (myAdd > cmpAdd ? 0 : 1); return a > b ? 0 : 1; } /** @brief Binary search within a single block (blockSort) * @param[in,out] cmpValue Value being considered from other partition * @param[in] tmpVal My Value * @param[in] in input keys * @param[in] addressPad addresses of string locations in case of tie breaks * @param[in] stringVals global string array used to break ties * @param[in,out] j The index we are considering * @param[in] bump The offset we update by * @param[in] sizeRemain Size of our block (if it's smaller than blockSize) * @param[in] stringSize Size of our global string array (for tie breaks) * @param[in] termC Termination character for our strings **/ template<class T, int depth> __device__ void bin_search_block_string(T &cmpValue, T tmpVal, T* in, T* addressPad, T* stringVals, int & j, int bump, int sizeRemain, unsigned int stringSize, unsigned char termC) { cmpValue = in[j]; __syncthreads(); __threadfence(); if(cmpValue == tmpVal) { unsigned int myAdd = addressPad[depth*threadIdx.x]; unsigned int cmpAdd = addressPad[j]; j = (tie_break_simp(depth*threadIdx.x, j, sizeRemain, sizeRemain, myAdd, cmpAdd, stringVals, stringSize, termC) == 0 ? j + bump : j - bump); } else j = (cmpValue < tmpVal ? j + bump : j - bump); __syncthreads(); } /** @brief Linear search within a single block (blockSort) * @param[in,out] cmpValue Value being considered from other partition * @param[in,out] tmpVal Temporary register which is used initially to compare our value, and then to store the final address * after our search * @param[in] in, addressPad, stringVals in = keys, addressPad = values, stringVals = global string array for tie breaks * @param[in] j index in B partition we are considering * @param[in] offset Since this is register packed, offset is the ith iteration of linear search * @param[in] last The end of partition B we are allowed to look upto * @param[in] startAddress The beginning of our partition * @param[in] stringSize Size of our global string array * @param[in] termC Termination character for our strings **/ template<class T, int depth> __device__ void lin_search_block_string(T &cmpValue, T &tmpVal, T* in, T* addressPad, T* stringVals, int &j, int offset, int last, int startAddress, int stringSize, unsigned char termC){ while (cmpValue < tmpVal && j < last) cmpValue = in[++j]; __threadfence(); __syncthreads(); //If we need to tie break while linearly searching while(cmpValue == tmpVal && j < last) { unsigned int myAdd = addressPad[depth*threadIdx.x+offset]; unsigned int cmpAdd = addressPad[j]; T myTmp = 0, cmpTmp = 0; //printf("tie break occured in linear at index %d for value %u compared to index %d (%d) unless corner case comparing addresses %d %d\n", // depth*threadIdx.x+offset, cmpValue, j, j+startAddress+offset, myAdd, cmpAdd); while( (myAdd != cmpAdd) &&(++myAdd) < stringSize && (++cmpAdd) < stringSize && myTmp == cmpTmp) { myTmp = stringVals[myAdd]; cmpTmp = stringVals[cmpAdd]; } if(myAdd > stringSize || (cmpAdd > stringSize)) break; if((cmpTmp < myTmp) && j < last) cmpValue = in[++j]; else if (cmpTmp > myTmp || j == last) break; } __syncthreads(); //Corner case to handle being at the edge of our shared memory search j = ((j==last && cmpValue < tmpVal) ? j+1 : j); if (j == last && cmpValue == tmpVal) { int myLoc = depth*threadIdx.x + offset; int cmpLoc = j; int myAdd = addressPad[depth*threadIdx.x+offset]; int cmpAdd = addressPad[j]; j = tie_break_simp(myLoc, cmpLoc, BLOCKSORT_SIZE, BLOCKSORT_SIZE, myAdd, cmpAdd, stringVals, stringSize, termC) == 0 ? j+1 : j; } tmpVal = j+startAddress+offset; } /** @brief For blockSort. Compares two values and decides to swap if A1 > A2 * @param[in,out] A1 First value being compared * @param[in,out] A2 Second value being compared * @param[in] index1 Local address of A1 * @param[in] index2 Local address of A2 * @param[in,out] scratch Scratch memory storing the addresses * @param[in] stringVals String Values for tie breaks * @param[in] size size of our array * @param[in] termC Termination character for our strings * **/ template<class T> __device__ void compareSwapVal(T &A1, T &A2, const int index1, const int index2, T* scratch, T* stringVals, unsigned int size, unsigned char termC) { if(A1 > A2) { T tmp = A1; A1 = A2; A2 = tmp; tmp = scratch[index1]; scratch[index1] = scratch[index2]; scratch[index2] = tmp; } else if(A1 == A2 && index1 < size && index2 < size) { //bad case (hopefully infrequent) we have to gather from global memory to find out if we wanna swap T tmp = stringVals[scratch[index1]+1]; T tmp2 = stringVals[scratch[index2]+1]; int i = 2; while(tmp == tmp2) { tmp = stringVals[scratch[index1]+i]; tmp2 = stringVals[scratch[index2]+i]; i++; } if(tmp > tmp2) { tmp = A1; A1 = A2; A2 = tmp; tmp = scratch[index1]; scratch[index1] = scratch[index2]; scratch[index2] = tmp; } } } /** @brief Performs a binary search in our shared memory, with tie breaks for strings * @param[in] keys, address Keys and address from our array * @param[in] offset, mid The current "middle" we are searching and the offset we will move to next * @param[in] cmpValue, testValue testValue is the value we are searching for from array A, cmpValue the value we have currently in B * @param[in] myAddress, myLoc, cmpLoc, myBound, cmpBound Same values from tie_break_simp which will be passed along * @param[in] globalStringArray, stringSize Our string array for breaking ties, and stringSize so we don't go out of bounds * @param[in] termC Termination character for our strings **/ template<class T, int depth> __device__ void binSearch_fragment(T* keys, T* address, int offset, int &mid, T cmpValue, T testValue, T myAddress, int myLoc, int cmpLoc, int myBound, int cmpBound, T* globalStringArray, int stringSize, unsigned char termC) { cmpValue = keys[mid]; if(cmpValue != testValue) mid = (cmpValue > testValue ? mid-offset : mid+offset); T cmpKey = cmpValue; if(cmpKey == testValue) { unsigned int cmpAdd = address[mid]; mid = tie_break_simp(myLoc, cmpLoc, myBound, cmpBound, myAddress, cmpAdd, globalStringArray, stringSize, termC) == 0 ? mid + offset : mid - offset; } } //TODO: merge binsearch_mult w/ regular template<class T, int depth> __device__ void binSearch_frag_mult(T* keyArraySmem, T* valueArraySmem, int offset, int &mid, T cmpValue, T testValue, int myAddress, T* globalStringArray, int myStartIdxA, int myStartIdxB, int aIndex, int bIndex, int size, int stringSize, unsigned char termC) { cmpValue = keyArraySmem[mid]; if(cmpValue != testValue) mid = (cmpValue > testValue ? mid-offset : mid+offset); if(cmpValue == testValue) { int myLoc = myStartIdxA + aIndex + depth*threadIdx.x; int cmpLoc = myStartIdxB + bIndex + mid; mid = (tie_break_simp(myLoc, cmpLoc, size, size, myAddress, valueArraySmem[mid], globalStringArray, stringSize, termC) == 0 ? mid + offset : mid - offset); } } /** @brief Performs a linear search in our shared memory (done after binary search), with tie breaks for strings * @param[in, out] cmpValue The current value we are looking at in our B array * @param[in] myKey, myAddress Keys and address from our array * @param[in] index Current index we are considering in our B array * @param[in] BKeys, BValues Keys and Addresses for array B * @param[in, out] stringValues, A_keys, A_values, A_keys_out, A_values_out Global arrays for our strings, keys, values * @param[in] myStartIdxA, myStartIdxB, myStartIdxC Beginning indices for our partitions * @param[in] localMinB, localMaxB The minimum and maximum values in our B partition * @param[in] aCont, bCont, totalSize, mySizeA, mySizeB, stringSize Address bounds and calculation helpers * @param[in] i The index of the local element we are merging * @param[in] stepNum Debug helper * @param[in] placed Whether value has been placed yet or not * @param[in] termC Termination character for our strings **/ template<class T, int depth> __device__ void lin_merge_simple(T& cmpValue, T myKey, T myAddress, int& index, T* BKeys, T* BValues, T* stringValues, T* A_keys, T* A_values, T*A_keys_out, T*A_values_out, int myStartIdxA, int myStartIdxB, int myStartIdxC, T localMinB, T localMaxB, int aCont, int bCont, int totalSize, int mySizeA, int mySizeB, unsigned int stringSize, int i, int stepNum, bool &placed, unsigned char termC) { int tid = threadIdx.x; //Here we keep climbing until we either reach the end of our partitions //Or we pass a value greater than ours while(cmpValue < myKey && index < INTERSECT_B_BLOCK_SIZE_simple) { index++; if(index < INTERSECT_B_BLOCK_SIZE_simple) cmpValue = BKeys[index]; else if(index == INTERSECT_B_BLOCK_SIZE_simple && bCont+index < mySizeB) { cmpValue = A_keys[myStartIdxB+bCont+index]; } else cmpValue = UINT_MAX; } //If we have a tie, brea it while(cmpValue == myKey && index < INTERSECT_B_BLOCK_SIZE_simple) { int myLoc = myStartIdxA + depth*threadIdx.x + i; int cmpLoc = myStartIdxB + bCont + index; int cmpAdd = BValues[index]; //if(myKey == 1820065792) // printf("B: (%d %d %d) (%d %d %d) %u %u\n", myLoc, cmpLoc, totalSize, cmpAdd, myAddress, stringSize, stringValues[cmpAdd], stringValues[myAddress]); if(tie_break_simp(myLoc, cmpLoc, totalSize, totalSize, myAddress, cmpAdd, stringValues, stringSize, termC) == 0) { index = index + 1; if(index < INTERSECT_B_BLOCK_SIZE_simple) cmpValue = BKeys[index]; else if(index == INTERSECT_B_BLOCK_SIZE_simple-1 && bCont+index < mySizeB) cmpValue = A_keys[myStartIdxB+bCont+index]; else cmpValue = UINT_MAX; } else break; } int globalCAddress = myStartIdxC + bCont + index + aCont + i; bool isInWindow = (index > 0 && index < (INTERSECT_B_BLOCK_SIZE_simple)); isInWindow = (isInWindow || (index == 0 && myKey > localMinB)); isInWindow = (isInWindow || (index >= (INTERSECT_B_BLOCK_SIZE_simple-1) && myKey < localMaxB)); isInWindow = (isInWindow || (bCont+index >= mySizeB)); //if(myKey >= 1820065792 && myKey <= 1820916440) // printf("key %u %u index %u placed %u %d (min %u max %u %d)\n", myKey, cmpValue, globalCAddress, placed, myStartIdxA+aCont+i, localMinB, localMaxB, index); if(!isInWindow && index == 0 && myKey <= localMinB) { if(!placed) isInWindow = true; } else if(!isInWindow && index >= (INTERSECT_B_BLOCK_SIZE_simple-1) && myKey == localMaxB && cmpValue <= myKey) { //Here we must check if our string is greater than our tie @ index INTERSECT_B_BLOCK_SIZE_simple (or our shared memory partition) unsigned int myLoc = myStartIdxA + depth*tid + i; unsigned int cmpLoc = myStartIdxB + bCont + index; unsigned int cmpAdd = (bCont+index < mySizeB ? A_values[cmpLoc] : UINT_MAX); if(cmpAdd > totalSize || tie_break_simp(myLoc, cmpLoc, totalSize, totalSize, myAddress, cmpAdd, stringValues, stringSize, termC) == 1) isInWindow = true; } if(globalCAddress >= totalSize) return; //Save Value if it is valid (correct window) //If we are on the edge of a window, and we are tied with the localMax or localMin value //we must go to global memory to find out if we are valid if(!placed && isInWindow) { A_keys_out[globalCAddress] = myKey; A_values_out[globalCAddress] = myAddress; placed = true; } } /** @brief Performs a linear search in our shared memory, used by multiMerge kernel * @param[in] BKeys, BValues Keys and Addresses for array B * @param[in] myKey, myAddress Keys and address from our array * @param[in] placed Whether value has been placed yet or not * @param[in] index Current index we are considering in our B array * @param[in, out] cmpValue The current value we are looking at in our B array * @param[in, out] stringValues, A_keys, A_values, A_keys_out, A_values_out Global arrays for our strings, keys, values * @param[in] myStartIdxA, myStartIdxB, myStartIdxC Beginning indices for our partitions * @param[in] localAPartSize, localBPartSize, localCPartSize Array of partition sizes for our inputs and outputs * @param[in] localMinB, localMaxB The minimum and maximum values in our B partition * @param[in] tid thread ID * @param[in] aIndex, bIndex, totalSize, stringSize Address bounds and calculation helpers * @param[in] i The index of the local element we are merging * @param[in] termC Termination character for our strings **/ template<class T, int depth> __device__ void linearStringMerge(T* BKeys, T* BValues, T myKey, T myAddress, bool &placed, int &index, T &cmpValue, T* A_keys, T* A_values, T* A_keys_out, T* A_values_out, T* stringValues, int myStartIdxC, int myStartIdxA, int myStartIdxB, int localAPartSize, int localBPartSize, int localCPartSize, T localMaxB, T localMinB, int tid, int aIndex, int bIndex, int i, int stringSize, int totalSize, unsigned char termC) { while(cmpValue < myKey && index < INTERSECT_B_BLOCK_SIZE_multi ) { index++; if(index < INTERSECT_B_BLOCK_SIZE_multi) cmpValue = BKeys[index]; else if(index == INTERSECT_B_BLOCK_SIZE_multi && bIndex+index < localBPartSize) { cmpValue = A_keys[myStartIdxB+bIndex+index]; } else cmpValue = UINT_MAX; } while(cmpValue == myKey && index < INTERSECT_B_BLOCK_SIZE_multi) { int myLoc = myStartIdxA + depth*threadIdx.x + i; int cmpLoc = myStartIdxB + bIndex + index; int cmpAdd = BValues[index]; if(tie_break_simp(myLoc, cmpLoc, totalSize, totalSize, myAddress, cmpAdd, stringValues, stringSize, termC) == 0) { index = index + 1; if(index < INTERSECT_B_BLOCK_SIZE_multi) cmpValue = BKeys[index]; else if(index == INTERSECT_B_BLOCK_SIZE_multi-1 && bIndex+index < localBPartSize) cmpValue = A_keys[myStartIdxB+bIndex+index]; else cmpValue = UINT_MAX; } else break; } int globalCAddress = myStartIdxC + index + bIndex + aIndex + i + tid*depth; bool isInWindow = (index > 0 && index < (INTERSECT_B_BLOCK_SIZE_multi)); isInWindow = (isInWindow || (index == 0 && myKey > localMinB)); isInWindow = (isInWindow || (index >= (INTERSECT_B_BLOCK_SIZE_multi-1) && myKey < localMaxB)); isInWindow = (isInWindow || (bIndex+index >= localBPartSize)); if((myKey == localMaxB) && index >= (INTERSECT_B_BLOCK_SIZE_multi-1) && globalCAddress <= (myStartIdxC+localCPartSize)) { //Here we must check if our string is greater than our tie @ index INTERSECT_B_BLOCK_SIZE_simple (or our shared memory partition) unsigned int myLoc = myStartIdxA + depth*tid + i; unsigned int cmpLoc = myStartIdxB + bIndex + index; unsigned int cmpAdd = (bIndex+index < localBPartSize ? A_values[cmpLoc] : UINT_MAX); if(cmpAdd > totalSize || tie_break_simp(myLoc, cmpLoc, totalSize, totalSize, myAddress, cmpAdd, stringValues, stringSize, termC) == 1) isInWindow = true; } else if(!isInWindow && index == 0 && myKey <= localMinB) { //Here we must check if our string is greater than our tie @ index 0 (or our shared memory partition) int myLoc = myStartIdxA + depth*tid + i; int cmpLoc = myStartIdxB + bIndex; int cmpAdd = BValues[0]; if(!placed || tie_break_simp(myLoc, cmpLoc, totalSize, totalSize, myAddress, cmpAdd, stringValues, stringSize, termC) == 0) isInWindow = true; } if(!placed && isInWindow) { A_keys_out [globalCAddress] = myKey; A_values_out[globalCAddress] = myAddress; placed = true; } } /** @} */ // end stringsort functions /** @} */ // end cudpp_cta
the_stack
* * Code and text by Sean Baxter, NVIDIA Research * See http://nvlabs.github.io/moderngpu for repository and documentation. * ******************************************************************************/ #pragma once #include "../mgpuhost.cuh" #include "../kernels/segreduce.cuh" #include "../kernels/bulkinsert.cuh" namespace mgpu { template<size_t Size, bool LoadLeft> struct SpmvTuningNormal { enum { Indirect = false }; typedef LaunchBox< SegReduceTuning<128, 11, 0, false, false>, SegReduceTuning<128, 11, 0, true, false>, SegReduceTuning<128, 7, 0, true, false> > Tuning; }; template<size_t Size, bool LoadLeft> struct SpmvTuningIndirect { enum { Indirect = true }; typedef LaunchBox< SegReduceTuning<128, 11, 0, false, false>, SegReduceTuning<128, 11, 0, true, false>, SegReduceTuning<128, 7, 0, true, false> > Tuning; }; template<size_t Size, bool LoadLeft> struct SpmvTuningPreprocess { enum { Indirect = false }; typedef LaunchBox< SegReduceTuning<128, 11, 0, false, false>, SegReduceTuning<128, 11, 0, true, false>, SegReduceTuning<128, (Size > 4) ? 11 : 7, 0, true, false> > Tuning; }; //////////////////////////////////////////////////////////////////////////////// // CTASpmvLoad // Loads matrix values and column indices and gathers vector values. Finds // products and transposes terms into register output in thread order. template<int NT, int VT, bool LoadLeft, bool HalfCapacity, typename T, typename MulOp> struct CTASpmvLoad { enum { NV = NT * VT, Capacity = HalfCapacity ? (NV / 2) : NV }; typedef CTASegReduce<NT, VT, HalfCapacity, T, MulOp> SegReduce; union Storage { int sources[NV]; T data[Capacity]; typename SegReduce::Storage segReduceStorage; }; template<typename MatrixIt, typename ColumnsIt, typename VecIt> MGPU_DEVICE static void LoadDirect(int count2, int tid, int gid, MatrixIt matrix_global, ColumnsIt cols_global, VecIt vec_global, T identity, MulOp mulOp, T data[VT], Storage& storage) { // Load columns directly from cols_global. int columns[VT]; DeviceGlobalToRegDefault<NT, VT>(count2, cols_global + gid, tid, columns, 0); // Load data into stridedData. T matrixData[VT]; if(LoadLeft) DeviceGlobalToRegDefault<NT, VT>(count2, matrix_global + gid, tid, matrixData, identity); // Use ldg to load vector data in strided order. T vecData[VT]; #pragma unroll for(int i = 0; i < VT; ++i) vecData[i] = ldg(vec_global + columns[i]); // Clear out the out-of-range inputs. if(count2 < NV) { #pragma unroll for(int i = 0; i < VT; ++i) if(NT * i + tid >= count2) vecData[i] = identity; } // Multiply matrix and vector values together. T stridedData[VT]; #pragma unroll for(int i = 0; i < VT; ++i) stridedData[i] = LoadLeft ? mulOp(matrixData[i], vecData[i]) : vecData[i]; // Transpose from strided to thread order. if(HalfCapacity) HalfSmemTranspose<NT, VT>(stridedData, tid, storage.data, data); else { DeviceRegToShared<NT, VT>(stridedData, tid, storage.data); DeviceSharedToThread<VT>(storage.data, tid, data); } } template<typename SourcesIt, typename MatrixIt, typename ColumnsIt, typename VecIt> MGPU_DEVICE static void LoadIndirect(int count2, int tid, int gid, int numRows, int startRow, const int rows[VT], const int rowStarts[VT], SourcesIt sources_global, MatrixIt matrix_global, ColumnsIt cols_global, VecIt vec_global, T identity, MulOp mulOp, T data[VT], Storage& storage) { // Load source offsets from sources_global into smem. DeviceGlobalToSharedLoop<NT, VT>(numRows, sources_global + startRow, tid, storage.sources); // Compute the offset of each element within its row. int indices[VT]; #pragma unroll for(int i = 0; i < VT; ++i) { int index = VT * tid + i; int rowOffset = gid + index - rowStarts[i]; int source = storage.sources[rows[i]]; indices[i] = source + rowOffset; } __syncthreads(); // Transpose indices through shared memory into strided order. DeviceThreadToShared<VT>(indices, tid, storage.sources); DeviceSharedToReg<NT, VT>(storage.sources, tid, indices); // Gather columns from cols_global. int columns[VT]; DeviceGatherDefault<NT, VT>(count2, cols_global, indices, tid, columns, 0); // Gather data into stridedData. T matrixData[VT]; if(LoadLeft) DeviceGatherDefault<NT, VT>(count2, matrix_global, indices, tid, matrixData, identity); // Use ldg to load vector data in strided order. T vecData[VT]; #pragma unroll for(int i = 0; i < VT; ++i) vecData[i] = ldg(vec_global + columns[i]); // Multiply matrix and vector values together. T stridedData[VT]; #pragma unroll for(int i = 0; i < VT; ++i) stridedData[i] = LoadLeft ? mulOp(matrixData[i], vecData[i]) : vecData[i]; // Transpose from strided to thread order. if(HalfCapacity) HalfSmemTranspose<NT, VT>(stridedData, tid, storage.data, data); else { DeviceRegToShared<NT, VT>(stridedData, tid, storage.data); DeviceSharedToThread<VT>(storage.data, tid, data); } } }; //////////////////////////////////////////////////////////////////////////////// // KernelSpmvCsr template<typename Tuning, bool Indirect, bool LoadLeft, typename MatrixIt, typename ColsIt, typename CsrIt, typename SourcesIt, typename VecIt, typename DestIt, typename T, typename MulOp, typename AddOp> MGPU_LAUNCH_BOUNDS void KernelSpmvCsr(MatrixIt matrix_global, ColsIt cols_global, int nz, CsrIt csr_global, SourcesIt sources_global, VecIt vec_global, const int* limits_global, DestIt dest_global, T* carryOut_global, T identity, MulOp mulOp, AddOp addOp) { typedef MGPU_LAUNCH_PARAMS Params; const int NT = Params::NT; const int VT = Params::VT; const int NV = NT * VT; const bool HalfCapacity = (sizeof(T) > sizeof(int)) && Params::HalfCapacity; typedef CTAReduce<NT, AddOp> FastReduce; typedef CTASegReduce<NT, VT, HalfCapacity, T, AddOp> SegReduce; typedef CTASpmvLoad<NT, VT, LoadLeft, HalfCapacity, T, MulOp> SpmvLoad; union Shared { int csr[NV + 1]; typename SegReduce::Storage segReduceStorage; typename SpmvLoad::Storage spmvLoadStorage; }; __shared__ Shared shared; int tid = threadIdx.x; int block = blockIdx.x; int gid = NV * block; int count2 = min(NV, nz - gid); // Retrieve the left and right row limits. int limit0 = limits_global[block]; int limit1 = limits_global[block + 1]; SegReduceRange range; SegReduceTerms terms; int rows[VT + 1], rowStarts[VT]; T data[VT]; if(Indirect) { // Transform the row limits into ranges. range = DeviceShiftRange(limit0, limit1); int numRows = range.end - range.begin; // Load the CSR interval. DeviceGlobalToSharedLoop<NT, VT>(numRows, csr_global + range.begin, tid, shared.csr); // Flatten CSR->COO and return the segmented scan terms. terms = DeviceSegReducePrepare<NT, VT>(shared.csr, numRows, tid, gid, range.flushLast, rows, rowStarts); // Load tile of data in thread order from row IDs. SpmvLoad::LoadIndirect(count2, tid, gid, numRows, range.begin, rows, rowStarts, sources_global, matrix_global, cols_global, vec_global, identity, mulOp, data, shared.spmvLoadStorage); } else { // This is a direct load so we don't have a data-dependency on the // limits. SpmvLoad::LoadDirect(count2, tid, gid, matrix_global, cols_global, vec_global, identity, mulOp, data, shared.spmvLoadStorage); // Transform the row limits into ranges. range = DeviceShiftRange(limit0, limit1); int numRows = range.end - range.begin; // Load the CSR interval. DeviceGlobalToSharedLoop<NT, VT>(numRows, csr_global + range.begin, tid, shared.csr); // Flatten CSR->COO and return the segmented scan terms. terms = DeviceSegReducePrepare<NT, VT>(shared.csr, numRows, tid, gid, range.flushLast, rows, rowStarts); } // Reduce tile data and store to dest_global. Write tile's carry-out // term to carryOut_global. SegReduce::ReduceToGlobal(rows, range.total, terms.tidDelta, range.begin, block, tid, data, dest_global, carryOut_global, identity, addOp, shared.segReduceStorage); } //////////////////////////////////////////////////////////////////////////////// // SpmvCsrHost template<typename Tuning, bool Indirect, bool LoadLeft, typename MatrixIt, typename ColsIt, typename CsrIt, typename SourcesIt, typename VecIt, typename DestIt, typename T, typename MulOp, typename AddOp> MGPU_HOST void SpmvCsrInner(MatrixIt matrix_global, ColsIt cols_global, int nz, CsrIt csr_global, SourcesIt sources_global, int numRows, const int* numRows2_global, VecIt vec_global, DestIt dest_global, T identity, MulOp mulOp, AddOp addOp, CudaContext& context) { int2 launch = Tuning::GetLaunchParams(context); int NV = launch.x * launch.y; int numBlocks = MGPU_DIV_UP(nz, NV); // Use upper-bound binary search to partition the CSR structure into tiles. MGPU_MEM(int) limitsDevice = PartitionCsrSegReduce(nz, NV, csr_global, numRows, numRows2_global, numBlocks + 1, context); // Evaluate the Spmv product. MGPU_MEM(T) carryOutDevice = context.Malloc<T>(numBlocks); KernelSpmvCsr<Tuning, Indirect, LoadLeft> <<<numBlocks, launch.x, 0, context.Stream()>>>(matrix_global, cols_global, nz, csr_global, sources_global, vec_global, limitsDevice->get(), dest_global, carryOutDevice->get(), identity, mulOp, addOp); MGPU_SYNC_CHECK("KernelSpmvCsr"); // Add the carry-in values. SegReduceSpine(limitsDevice->get(), numBlocks, dest_global, carryOutDevice->get(), identity, addOp, context); } template<typename Tuning, bool Indirect, bool LoadLeft, typename MatrixIt, typename ColsIt, typename CsrIt, typename SourcesIt, typename VecIt, typename DestIt, typename T, typename MulOp, typename AddOp> MGPU_HOST void SpmvCsrHost(MatrixIt matrix_global, ColsIt cols_global, int nz, CsrIt csr_global, SourcesIt sources_global, int numRows, VecIt vec_global, bool supportEmpty, DestIt dest_global, T identity, MulOp mulOp, AddOp addOp, CudaContext& context) { if(supportEmpty) { // Allocate space for CSR2 and Sources2. MGPU_MEM(int) csr2Device = context.Malloc<int>(numRows + 1); MGPU_MEM(int) sources2Device; if(Indirect) sources2Device = context.Malloc<int>(numRows); // Strip the empties from CSR and store in CSR2. CsrStripEmpties<Indirect>(nz, csr_global, sources_global, numRows, csr2Device->get(), Indirect ? sources2Device->get() : (int*)0, (int*)0, context); // Run the Spmv in the CSR2 coordinate space. MGPU_MEM(T) destDevice = context.Malloc<T>(numRows); SpmvCsrInner<Tuning, Indirect, LoadLeft>(matrix_global, cols_global, nz, csr2Device->get(), Indirect ? sources2Device->get() : (const int*)0, -1, csr2Device->get() + numRows, vec_global, destDevice->get(), identity, mulOp, addOp, context); // Transform into the CSR space with BulkInsert. CsrBulkInsert(csr2Device->get(), numRows, destDevice->get(), identity, dest_global, context); } else { SpmvCsrInner<Tuning, Indirect, LoadLeft>(matrix_global, cols_global, nz, csr_global, sources_global, numRows, (const int*)0, vec_global, dest_global, identity, mulOp, addOp, context); } } //////////////////////////////////////////////////////////////////////////////// // Spmv host functions template<typename T> struct spmv_pass_through : public std::binary_function<T, T, T> { MGPU_HOST_DEVICE T operator()(T a, T b) { return a; } }; template<typename ColsIt, typename CsrIt, typename VecIt, typename DestIt, typename T, typename AddOp> MGPU_HOST void SpmvCsrUnary( ColsIt cols_global, int nz, CsrIt csr_global, int numRows, VecIt vec_global, bool supportEmpty, DestIt dest_global, T identity, AddOp addOp, CudaContext& context) { typedef typename SpmvTuningNormal<sizeof(T), false>::Tuning Tuning; SpmvCsrHost<Tuning, false, false>((const T*)0, cols_global, nz, csr_global, (const int*)0, numRows, vec_global, supportEmpty, dest_global, identity, spmv_pass_through<T>(), addOp, context); } template<typename MatrixIt, typename ColsIt, typename CsrIt, typename VecIt, typename DestIt, typename T, typename MulOp, typename AddOp> MGPU_HOST void SpmvCsrBinary(MatrixIt matrix_global, ColsIt cols_global, int nz, CsrIt csr_global, int numRows, VecIt vec_global, bool supportEmpty, DestIt dest_global, T identity, MulOp mulOp, AddOp addOp, CudaContext& context) { typedef typename SpmvTuningNormal<sizeof(T), true>::Tuning Tuning; SpmvCsrHost<Tuning, false, true>(matrix_global, cols_global, nz, csr_global, (const int*)0, numRows, vec_global, supportEmpty, dest_global, identity, mulOp, addOp, context); } template<typename ColsIt, typename CsrIt, typename SourcesIt, typename VecIt, typename DestIt, typename T, typename AddOp> MGPU_HOST void SpmvCsrIndirectUnary(ColsIt cols_global, int nz, CsrIt csr_global, SourcesIt sources_global, int numRows, VecIt vec_global, bool supportEmpty, DestIt dest_global, T identity, AddOp addOp, CudaContext& context) { typedef typename SpmvTuningIndirect<sizeof(T), false>::Tuning Tuning; SpmvCsrHost<Tuning, true, false>((const T*)0, cols_global, nz, csr_global, sources_global, numRows, vec_global, supportEmpty, dest_global, identity, spmv_pass_through<T>(), addOp, context); } template<typename MatrixIt, typename ColsIt, typename CsrIt, typename SourcesIt, typename VecIt, typename DestIt, typename T, typename MulOp, typename AddOp> MGPU_HOST void SpmvCsrIndirectBinary(MatrixIt matrix_global, ColsIt cols_global, int nz, CsrIt csr_global, SourcesIt sources_global, int numRows, VecIt vec_global, bool supportEmpty, DestIt dest_global, T identity, MulOp mulOp, AddOp addOp, CudaContext& context) { typedef typename SpmvTuningIndirect<sizeof(T), true>::Tuning Tuning; SpmvCsrHost<Tuning, true, true>(matrix_global, cols_global, nz, csr_global, sources_global, numRows, vec_global, supportEmpty, dest_global, identity, mulOp, addOp, context); } //////////////////////////////////////////////////////////////////////////////// // Spmv preprocessing template<typename T, typename CsrIt> MGPU_HOST void SpmvPreprocessUnary(int nz, CsrIt csr_global, int numRows, bool supportEmpty, std::auto_ptr<SpmvPreprocessData>* ppData, CudaContext& context) { typedef typename SpmvTuningPreprocess<sizeof(T), false>::Tuning Tuning; SegReducePreprocess<Tuning>(nz, csr_global, numRows, supportEmpty, ppData, context); } template<typename T, typename CsrIt> MGPU_HOST void SpmvPreprocessBinary(int nz, CsrIt csr_global, int numRows, bool supportEmpty, std::auto_ptr<SpmvPreprocessData>* ppData, CudaContext& context) { typedef typename SpmvTuningPreprocess<sizeof(T), true>::Tuning Tuning; SegReducePreprocess<Tuning>(nz, csr_global, numRows, supportEmpty, ppData, context); } //////////////////////////////////////////////////////////////////////////////// // KernelSpmvApply template<typename Tuning, bool LoadLeft, typename MatrixIt, typename ColsIt, typename VecIt, typename DestIt, typename T, typename MulOp, typename AddOp> MGPU_LAUNCH_BOUNDS void KernelSpmvApply(const int* threadCodes_global, MatrixIt matrix_global, ColsIt cols_global, int nz, VecIt vec_global, const int* limits_global, DestIt dest_global, T* carryOut_global, T identity, MulOp mulOp, AddOp addOp) { typedef MGPU_LAUNCH_PARAMS Params; const int NT = Params::NT; const int VT = Params::VT; const int NV = NT * VT; const bool HalfCapacity = (sizeof(T) > sizeof(int)) && Params::HalfCapacity; typedef CTASegReduce<NT, VT, HalfCapacity, T, AddOp> SegReduce; typedef CTASpmvLoad<NT, VT, LoadLeft, HalfCapacity, T, MulOp> SpmvLoad; union Shared { typename SegReduce::Storage segReduceStorage; typename SpmvLoad::Storage spmvLoadStorage; }; __shared__ Shared shared; int tid = threadIdx.x; int block = blockIdx.x; int gid = NV * block; int count2 = min(NV, nz - gid); // Retrieve the left and right row limits and thread codes.. int limit0 = limits_global[block]; int limit1 = limits_global[block + 1]; int threadCodes = threadCodes_global[NT * block + tid]; // Load the tile's data before dereferencing limit0/limit1. T data[VT]; SpmvLoad::LoadDirect(count2, tid, gid, matrix_global, cols_global, vec_global, identity, mulOp, data, shared.spmvLoadStorage); // Transform the row limits into ranges. SegReduceRange range = DeviceShiftRange(limit0, limit1); // Expand the row indices. int rows[VT + 1]; DeviceExpandFlagsToRows<VT>(threadCodes>> 20, threadCodes, rows); // Reduce tile data and store to dest_global. Write tile's carry-out // term to carryOut_global. int tidDelta = 0x7f & (threadCodes>> 13); SegReduce::ReduceToGlobal(rows, range.total, tidDelta, range.begin, block, tid, data, dest_global, carryOut_global, identity, addOp, shared.segReduceStorage); } template<bool LoadLeft, typename MatrixIt, typename ColsIt, typename VecIt, typename DestIt, typename T, typename MulOp, typename AddOp> MGPU_HOST void SpmvApplyHost(const SpmvPreprocessData& preprocess, MatrixIt matrix_global, ColsIt cols_global, VecIt vec_global, DestIt dest_global, T identity, MulOp mulOp, AddOp addOp, CudaContext& context) { typedef typename SpmvTuningPreprocess<sizeof(T), LoadLeft>::Tuning Tuning; int2 launch = Tuning::GetLaunchParams(context); if(preprocess.csr2Device.get()) { // Support empties. MGPU_MEM(T) destDevice = context.Malloc<T>(preprocess.numSegments2); MGPU_MEM(T) carryOutDevice = context.Malloc<T>(preprocess.numBlocks); KernelSpmvApply<Tuning, LoadLeft> <<<preprocess.numBlocks, launch.x, 0, context.Stream()>>>( preprocess.threadCodesDevice->get(), matrix_global, cols_global, preprocess.count, vec_global, preprocess.limitsDevice->get(), destDevice->get(), carryOutDevice->get(), identity, mulOp, addOp); // Add the carry-in values. SegReduceSpine(preprocess.limitsDevice->get(), preprocess.numBlocks, destDevice->get(), carryOutDevice->get(), identity, addOp, context); // Transform into the CSR space with BulkInsert. CsrBulkInsert(preprocess.csr2Device->get(), preprocess.numSegments, destDevice->get(), identity, dest_global, context); } else { // No empties. // Evaluate the Spmv product. MGPU_MEM(T) carryOutDevice = context.Malloc<T>(preprocess.numBlocks); KernelSpmvApply<Tuning, LoadLeft> <<<preprocess.numBlocks, launch.x, 0, context.Stream()>>>( preprocess.threadCodesDevice->get(), matrix_global, cols_global, preprocess.count, vec_global, preprocess.limitsDevice->get(), dest_global, carryOutDevice->get(), identity, mulOp, addOp); MGPU_SYNC_CHECK("KernelSpmvApply"); // Add the carry-in values. SegReduceSpine(preprocess.limitsDevice->get(), preprocess.numBlocks, dest_global, carryOutDevice->get(), identity, addOp, context); } } template<typename ColsIt, typename VecIt, typename DestIt, typename T, typename MulOp, typename AddOp> MGPU_HOST void SpmvUnaryApply(const SpmvPreprocessData& preprocess, ColsIt cols_global, VecIt vec_global, DestIt dest_global, T identity, AddOp addOp, CudaContext& context) { SpmvApplyHost<false>(preprocess, (const T*)0, cols_global, vec_global, dest_global, identity, spmv_pass_through<T>(), addOp, context); } template<typename MatrixIt, typename ColsIt, typename VecIt, typename DestIt, typename T, typename MulOp, typename AddOp> MGPU_HOST void SpmvBinaryApply(const SpmvPreprocessData& preprocess, MatrixIt matrix_global, ColsIt cols_global, VecIt vec_global, DestIt dest_global, T identity, MulOp mulOp, AddOp addOp, CudaContext& context) { SpmvApplyHost<true>(preprocess, matrix_global, cols_global, vec_global, dest_global, identity, mulOp, addOp, context); } } // namespace mgpu
the_stack
#include "lead_lag_nested_detail.cuh" #include "rolling/rolling_collect_list.cuh" #include "rolling/rolling_detail.hpp" #include "rolling/rolling_jit_detail.hpp" #include "rolling_detail.hpp" #include <cudf/aggregation.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_view.hpp> #include <cudf/detail/aggregation/aggregation.cuh> #include <cudf/detail/aggregation/aggregation.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/gather.hpp> #include <cudf/detail/groupby/sort_helper.hpp> #include <cudf/detail/unary.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/device_operators.cuh> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/lists/detail/drop_list_duplicates.hpp> #include <cudf/types.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <jit/cache.hpp> #include <jit/parser.hpp> #include <jit/type.hpp> #include <jit_preprocessed_files/rolling/jit/kernel.cu.jit.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_scalar.hpp> #include <rmm/exec_policy.hpp> #include <thrust/find.h> #include <thrust/iterator/counting_iterator.h> #include <cuda/std/limits> #include <memory> namespace cudf { namespace detail { namespace { // anonymous /** * @brief Operator for applying a generic (non-specialized) rolling aggregation on a single window. */ template <typename InputType, aggregation::Kind op> struct DeviceRolling { size_type min_periods; // what operations do we support template <typename T = InputType, aggregation::Kind O = op> static constexpr bool is_supported() { return cudf::detail::is_valid_aggregation<T, O>() && has_corresponding_operator<O>() && // TODO: Delete all this extra logic once is_valid_aggregation<> cleans up some edge // cases it isn't handling. // MIN/MAX supports all fixed width types (((O == aggregation::MIN || O == aggregation::MAX) && cudf::is_fixed_width<T>()) || // SUM supports all fixed width types except timestamps ((O == aggregation::SUM) && (cudf::is_fixed_width<T>() && !cudf::is_timestamp<T>())) || // MEAN supports numeric and duration ((O == aggregation::MEAN) && (cudf::is_numeric<T>() || cudf::is_duration<T>()))); } // operations we do support template <typename T = InputType, aggregation::Kind O = op> DeviceRolling(size_type _min_periods, typename std::enable_if_t<is_supported<T, O>()>* = nullptr) : min_periods(_min_periods) { } // operations we don't support template <typename T = InputType, aggregation::Kind O = op> DeviceRolling(size_type _min_periods, typename std::enable_if_t<!is_supported<T, O>()>* = nullptr) : min_periods(_min_periods) { CUDF_FAIL("Invalid aggregation/type pair"); } // perform the windowing operation template <typename OutputType, bool has_nulls> bool __device__ operator()(column_device_view const& input, column_device_view const&, mutable_column_device_view& output, size_type start_index, size_type end_index, size_type current_index) { using AggOp = typename corresponding_operator<op>::type; AggOp agg_op; // declare this as volatile to avoid some compiler optimizations that lead to incorrect results // for CUDA 10.0 and below (fixed in CUDA 10.1) volatile cudf::size_type count = 0; OutputType val = AggOp::template identity<OutputType>(); for (size_type j = start_index; j < end_index; j++) { if (!has_nulls || input.is_valid(j)) { OutputType element = input.element<device_storage_type_t<InputType>>(j); val = agg_op(element, val); count++; } } bool output_is_valid = (count >= min_periods); // store the output value, one per thread cudf::detail::rolling_store_output_functor<OutputType, op == aggregation::MEAN>{}( output.element<OutputType>(current_index), val, count); return output_is_valid; } }; /** * @brief Operator for applying an ARGMAX/ARGMIN rolling aggregation on a single window. */ template <typename InputType, aggregation::Kind op> struct DeviceRollingArgMinMax { size_type min_periods; // what operations do we support template <typename T = InputType, aggregation::Kind O = op> static constexpr bool is_supported() { // strictly speaking, I think it would be ok to make this work // for comparable types as well. but right now the only use case is // for MIN/MAX on strings. return std::is_same_v<T, cudf::string_view>; } DeviceRollingArgMinMax(size_type _min_periods) : min_periods(_min_periods) {} template <typename OutputType, bool has_nulls> bool __device__ operator()(column_device_view const& input, column_device_view const&, mutable_column_device_view& output, size_type start_index, size_type end_index, size_type current_index) { using AggOp = typename corresponding_operator<op>::type; AggOp agg_op; // declare this as volatile to avoid some compiler optimizations that lead to incorrect results // for CUDA 10.0 and below (fixed in CUDA 10.1) volatile cudf::size_type count = 0; InputType val = AggOp::template identity<InputType>(); OutputType val_index = (op == aggregation::ARGMIN) ? ARGMIN_SENTINEL : ARGMAX_SENTINEL; for (size_type j = start_index; j < end_index; j++) { if (!has_nulls || input.is_valid(j)) { InputType element = input.element<InputType>(j); val = agg_op(element, val); if (val == element) { val_index = j; } count++; } } bool output_is_valid = (count >= min_periods); // -1 will help identify null elements while gathering for Min and Max // In case of count, this would be null, so doesn't matter. output.element<OutputType>(current_index) = (output_is_valid) ? val_index : -1; // The gather mask shouldn't contain null values, so // always return zero return true; } }; /** * @brief Operator for applying a COUNT_VALID rolling aggregation on a single window. */ template <typename InputType> struct DeviceRollingCountValid { size_type min_periods; // what operations do we support template <typename T = InputType, aggregation::Kind O = aggregation::COUNT_VALID> static constexpr bool is_supported() { return true; } DeviceRollingCountValid(size_type _min_periods) : min_periods(_min_periods) {} template <typename OutputType, bool has_nulls> bool __device__ operator()(column_device_view const& input, column_device_view const&, mutable_column_device_view& output, size_type start_index, size_type end_index, size_type current_index) { // declare this as volatile to avoid some compiler optimizations that lead to incorrect results // for CUDA 10.0 and below (fixed in CUDA 10.1) volatile cudf::size_type count = 0; bool output_is_valid = ((end_index - start_index) >= min_periods); if (output_is_valid) { if (!has_nulls) { count = end_index - start_index; } else { count = thrust::count_if(thrust::seq, thrust::make_counting_iterator(start_index), thrust::make_counting_iterator(end_index), [&input](auto i) { return input.is_valid_nocheck(i); }); } output.element<OutputType>(current_index) = count; } return output_is_valid; } }; /** * @brief Operator for applying a COUNT_ALL rolling aggregation on a single window. */ template <typename InputType> struct DeviceRollingCountAll { size_type min_periods; // what operations do we support template <typename T = InputType, aggregation::Kind O = aggregation::COUNT_ALL> static constexpr bool is_supported() { return true; } DeviceRollingCountAll(size_type _min_periods) : min_periods(_min_periods) {} template <typename OutputType, bool has_nulls> bool __device__ operator()(column_device_view const&, column_device_view const&, mutable_column_device_view& output, size_type start_index, size_type end_index, size_type current_index) { cudf::size_type count = end_index - start_index; bool output_is_valid = count >= min_periods; output.element<OutputType>(current_index) = count; return output_is_valid; } }; /** * @brief Operator for applying a VAR rolling aggregation on a single window. */ template <typename InputType> struct DeviceRollingVariance { size_type const min_periods; size_type const ddof; // what operations do we support template <typename T = InputType, aggregation::Kind O = aggregation::VARIANCE> static constexpr bool is_supported() { return is_fixed_width<InputType>() and not is_chrono<InputType>(); } DeviceRollingVariance(size_type _min_periods, size_type _ddof) : min_periods(_min_periods), ddof{_ddof} { } template <typename OutputType, bool has_nulls> bool __device__ operator()(column_device_view const& input, column_device_view const&, mutable_column_device_view& output, size_type start_index, size_type end_index, size_type current_index) const { using DeviceInputType = device_storage_type_t<InputType>; // valid counts in the window cudf::size_type const count = has_nulls ? thrust::count_if(thrust::seq, thrust::make_counting_iterator(start_index), thrust::make_counting_iterator(end_index), [&input](auto i) { return input.is_valid_nocheck(i); }) : end_index - start_index; // Result will be null if any of the following conditions are met: // - All inputs are null // - Number of valid inputs is less than `min_periods` bool output_is_valid = count > 0 and (count >= min_periods); if (output_is_valid) { if (count >= ddof) { // Welford algorithm // See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance OutputType m{0}, m2{0}; size_type running_count{0}; for (size_type i = start_index; i < end_index; i++) { if (has_nulls and input.is_null_nocheck(i)) { continue; } OutputType const x = static_cast<OutputType>(input.element<DeviceInputType>(i)); running_count++; OutputType const tmp1 = x - m; m += tmp1 / running_count; OutputType const tmp2 = x - m; m2 += tmp1 * tmp2; } if constexpr (is_fixed_point<InputType>()) { // For fixed_point types, the previous computed value used unscaled rep-value, // the final result should be multiplied by the square of decimal `scale`. OutputType scaleby = exp10(static_cast<double>(input.type().scale())); scaleby *= scaleby; output.element<OutputType>(current_index) = m2 / (count - ddof) * scaleby; } else { output.element<OutputType>(current_index) = m2 / (count - ddof); } } else { output.element<OutputType>(current_index) = cuda::std::numeric_limits<OutputType>::signaling_NaN(); } } return output_is_valid; } }; /** * @brief Operator for applying a ROW_NUMBER rolling aggregation on a single window. */ template <typename InputType> struct DeviceRollingRowNumber { size_type min_periods; // what operations do we support template <typename T = InputType, aggregation::Kind O = aggregation::ROW_NUMBER> static constexpr bool is_supported() { return true; } DeviceRollingRowNumber(size_type _min_periods) : min_periods(_min_periods) {} template <typename OutputType, bool has_nulls> bool __device__ operator()(column_device_view const&, column_device_view const&, mutable_column_device_view& output, size_type start_index, size_type end_index, size_type current_index) { bool output_is_valid = end_index - start_index >= min_periods; output.element<OutputType>(current_index) = current_index - start_index + 1; return output_is_valid; } }; struct agg_specific_empty_output { template <typename InputType, aggregation::Kind op> std::unique_ptr<column> operator()(column_view const& input, rolling_aggregation const&) const { using target_type = cudf::detail::target_type_t<InputType, op>; if constexpr (std::is_same_v<cudf::detail::target_type_t<InputType, op>, void>) { CUDF_FAIL("Unsupported combination of column-type and aggregation."); } if constexpr (cudf::is_fixed_width<target_type>()) { return cudf::make_empty_column(data_type{type_to_id<target_type>()}); } if constexpr (op == aggregation::COLLECT_LIST) { return cudf::make_lists_column( 0, make_empty_column(data_type{type_to_id<offset_type>()}), empty_like(input), 0, {}); } return empty_like(input); } }; std::unique_ptr<column> empty_output_for_rolling_aggregation(column_view const& input, rolling_aggregation const& agg) { // TODO: // Ideally, for UDF aggregations, the returned column would match // the agg's return type. It currently returns empty_like(input), because: // 1. This preserves prior behavior for empty input columns. // 2. There is insufficient information to construct nested return columns. // `cudf::make_udf_aggregation()` expresses the return type as a `data_type` // which cannot express recursively nested types (e.g. `STRUCT<LIST<INT32>>`.) // 3. In any case, UDFs that return nested types are not currently supported. // Constructing a more accurate return type for UDFs will be taken up // at a later date. return agg.kind == aggregation::CUDA || agg.kind == aggregation::PTX ? empty_like(input) : cudf::detail::dispatch_type_and_aggregation( input.type(), agg.kind, agg_specific_empty_output{}, input, agg); } /** * @brief Operator for applying a LEAD rolling aggregation on a single window. */ template <typename InputType> struct DeviceRollingLead { size_type row_offset; // what operations do we support template <typename T = InputType, aggregation::Kind O = aggregation::LEAD> static constexpr bool is_supported() { return cudf::is_fixed_width<T>(); } template <typename T = InputType, typename std::enable_if_t<is_supported<T>()>* = nullptr> DeviceRollingLead(size_type _row_offset) : row_offset(_row_offset) { } template <typename T = InputType, typename std::enable_if_t<!is_supported<T>()>* = nullptr> DeviceRollingLead(size_type _row_offset) : row_offset(_row_offset) { CUDF_FAIL("Invalid aggregation/type pair"); } template <typename OutputType, bool has_nulls> bool __device__ operator()(column_device_view const& input, column_device_view const& default_outputs, mutable_column_device_view& output, size_type, size_type end_index, size_type current_index) { // Offsets have already been normalized. // Check if row is invalid. if (row_offset > (end_index - current_index - 1)) { // Invalid row marked. Use default value, if available. if (default_outputs.size() == 0 || default_outputs.is_null(current_index)) { return false; } output.element<OutputType>(current_index) = default_outputs.element<OutputType>(current_index); return true; } // Not an invalid row. auto index = current_index + row_offset; auto is_null = input.is_null(index); if (!is_null) { output.element<OutputType>(current_index) = input.element<device_storage_type_t<InputType>>(index); } return !is_null; } }; /** * @brief Operator for applying a LAG rolling aggregation on a single window. */ template <typename InputType> struct DeviceRollingLag { size_type row_offset; // what operations do we support template <typename T = InputType, aggregation::Kind O = aggregation::LAG> static constexpr bool is_supported() { return cudf::is_fixed_width<T>(); } template <typename T = InputType, typename std::enable_if_t<is_supported<T>()>* = nullptr> DeviceRollingLag(size_type _row_offset) : row_offset(_row_offset) { } template <typename T = InputType, typename std::enable_if_t<!is_supported<T>()>* = nullptr> DeviceRollingLag(size_type _row_offset) : row_offset(_row_offset) { CUDF_FAIL("Invalid aggregation/type pair"); } template <typename OutputType, bool has_nulls> bool __device__ operator()(column_device_view const& input, column_device_view const& default_outputs, mutable_column_device_view& output, size_type start_index, size_type, size_type current_index) { // Offsets have already been normalized. // Check if row is invalid. if (row_offset > (current_index - start_index)) { // Invalid row marked. Use default value, if available. if (default_outputs.size() == 0 || default_outputs.is_null(current_index)) { return false; } output.element<OutputType>(current_index) = default_outputs.element<OutputType>(current_index); return true; } // Not an invalid row. auto index = current_index - row_offset; auto is_null = input.is_null(index); if (!is_null) { output.element<OutputType>(current_index) = input.element<device_storage_type_t<InputType>>(index); } return !is_null; } }; /** * @brief Maps an `InputType and `aggregation::Kind` value to it's corresponding * rolling window operator. * * @tparam InputType The input type to map to its corresponding operator * @tparam k The `aggregation::Kind` value to map to its corresponding operator */ template <typename InputType, aggregation::Kind k> struct corresponding_rolling_operator { using type = DeviceRolling<InputType, k>; }; template <typename InputType> struct corresponding_rolling_operator<InputType, aggregation::ARGMIN> { using type = DeviceRollingArgMinMax<InputType, aggregation::ARGMIN>; }; template <typename InputType> struct corresponding_rolling_operator<InputType, aggregation::ARGMAX> { using type = DeviceRollingArgMinMax<InputType, aggregation::ARGMAX>; }; template <typename InputType> struct corresponding_rolling_operator<InputType, aggregation::COUNT_VALID> { using type = DeviceRollingCountValid<InputType>; }; template <typename InputType> struct corresponding_rolling_operator<InputType, aggregation::COUNT_ALL> { using type = DeviceRollingCountAll<InputType>; }; template <typename InputType> struct corresponding_rolling_operator<InputType, aggregation::ROW_NUMBER> { using type = DeviceRollingRowNumber<InputType>; }; template <typename InputType> struct corresponding_rolling_operator<InputType, aggregation::Kind::LEAD> { using type = DeviceRollingLead<InputType>; }; template <typename InputType> struct corresponding_rolling_operator<InputType, aggregation::Kind::VARIANCE> { using type = DeviceRollingVariance<InputType>; }; template <typename InputType> struct corresponding_rolling_operator<InputType, aggregation::Kind::LAG> { using type = DeviceRollingLag<InputType>; }; /** * @brief Functor for creating a device rolling operator based on input type and aggregation type. */ template <typename InputType, aggregation::Kind op, typename Enable = void> struct create_rolling_operator { auto operator()(size_type min_periods, rolling_aggregation const& agg) { CUDF_FAIL("Invalid aggregation/type pair"); } }; template <typename InputType, aggregation::Kind op> struct create_rolling_operator< InputType, op, std::enable_if_t<corresponding_rolling_operator<InputType, op>::type::is_supported()>> { template <typename T = InputType, aggregation::Kind O = op, std::enable_if_t<O != aggregation::Kind::LEAD && O != aggregation::Kind::LAG && O != aggregation::Kind::VARIANCE>* = nullptr> auto operator()(size_type min_periods, rolling_aggregation const&) { return typename corresponding_rolling_operator<InputType, op>::type(min_periods); } template <typename T = InputType, aggregation::Kind O = op, std::enable_if_t<O == aggregation::Kind::VARIANCE>* = nullptr> auto operator()(size_type min_periods, rolling_aggregation const& agg) { return DeviceRollingVariance<InputType>{ min_periods, dynamic_cast<cudf::detail::var_aggregation const&>(agg)._ddof}; } template <typename T = InputType, aggregation::Kind O = op, std::enable_if_t<O == aggregation::Kind::LEAD>* = nullptr> auto operator()(size_type, rolling_aggregation const& agg) { return DeviceRollingLead<InputType>{ dynamic_cast<cudf::detail::lead_lag_aggregation const&>(agg).row_offset}; } template <typename T = InputType, aggregation::Kind O = op, std::enable_if_t<O == aggregation::Kind::LAG>* = nullptr> auto operator()(size_type, rolling_aggregation const& agg) { return DeviceRollingLag<InputType>{ dynamic_cast<cudf::detail::lead_lag_aggregation const&>(agg).row_offset}; } }; /** * @brief Rolling window specific implementation of simple_aggregations_collector. * * The purpose of this class is to preprocess incoming aggregation/type pairs and * potentially transform them into other aggregation/type pairs. Typically when this * happens, the equivalent aggregation/type implementation of finalize() will perform * some postprocessing step. * * An example of this would be applying a MIN aggregation to strings. This cannot be done * directly in the rolling operation, so instead the following happens: * * - the rolling_aggregation_preprocessor transforms the incoming MIN/string pair to * an ARGMIN/int pair. * - The ARGMIN/int has the rolling operation applied to it, generating a list of indices * that can then be used as a gather map. * - The rolling_aggregation_postprocessor then takes this gather map and performs a final * gather() on the input string data to generate the final output. * * Another example is COLLECT_LIST. COLLECT_LIST is odd in that it doesn't go through the * normal gpu rolling kernel at all. It has a completely custom implementation. So the * following happens: * * - the rolling_aggregation_preprocessor transforms the COLLECT_LIST aggregation into nothing, * since no actual rolling window operation will be performed. * - the rolling_aggregation_postprocessor calls the specialized rolling_collect_list() * function to generate the final output. * */ class rolling_aggregation_preprocessor final : public cudf::detail::simple_aggregations_collector { public: using cudf::detail::simple_aggregations_collector::visit; // NOTE : all other aggregations are passed through unchanged via the default // visit() function in the simple_aggregations_collector. // MIN aggregations with strings are processed in 2 passes. The first pass performs // the rolling operation on a ARGMIN aggregation to generate indices instead of values. // Then a second pass uses those indices to gather the final strings. This step // translates the the MIN -> ARGMIN aggregation std::vector<std::unique_ptr<aggregation>> visit(data_type col_type, cudf::detail::min_aggregation const&) override { std::vector<std::unique_ptr<aggregation>> aggs; aggs.push_back(col_type.id() == type_id::STRING ? make_argmin_aggregation() : make_min_aggregation()); return aggs; } // MAX aggregations with strings are processed in 2 passes. The first pass performs // the rolling operation on a ARGMAX aggregation to generate indices instead of values. // Then a second pass uses those indices to gather the final strings. This step // translates the the MAX -> ARGMAX aggregation std::vector<std::unique_ptr<aggregation>> visit(data_type col_type, cudf::detail::max_aggregation const&) override { std::vector<std::unique_ptr<aggregation>> aggs; aggs.push_back(col_type.id() == type_id::STRING ? make_argmax_aggregation() : make_max_aggregation()); return aggs; } // COLLECT_LIST aggregations do not perform a rolling operation at all. They get processed // entirely in the finalize() step. std::vector<std::unique_ptr<aggregation>> visit( data_type, cudf::detail::collect_list_aggregation const&) override { return {}; } // COLLECT_SET aggregations do not perform a rolling operation at all. They get processed // entirely in the finalize() step. std::vector<std::unique_ptr<aggregation>> visit( data_type, cudf::detail::collect_set_aggregation const&) override { return {}; } // STD aggregations depends on VARIANCE aggregation. Each element is applied // with sqaured-root in the finalize() step. std::vector<std::unique_ptr<aggregation>> visit(data_type, cudf::detail::std_aggregation const& agg) override { std::vector<std::unique_ptr<aggregation>> aggs; aggs.push_back(make_variance_aggregation(agg._ddof)); return aggs; } // LEAD and LAG have custom behaviors for non fixed-width types. std::vector<std::unique_ptr<aggregation>> visit( data_type col_type, cudf::detail::lead_lag_aggregation const& agg) override { // no rolling operation for non-fixed width. just a postprocess step at the end if (!cudf::is_fixed_width(col_type)) { return {}; } // otherwise, pass through std::vector<std::unique_ptr<aggregation>> aggs; aggs.push_back(agg.clone()); return aggs; } }; /** * @brief Rolling window specific implementation of aggregation_finalizer. * * The purpose of this class is to postprocess rolling window data depending on the * aggregation/type pair. See the description of rolling_aggregation_preprocessor for * a detailed description. * */ template <typename PrecedingWindowIterator, typename FollowingWindowIterator> class rolling_aggregation_postprocessor final : public cudf::detail::aggregation_finalizer { public: using cudf::detail::aggregation_finalizer::visit; rolling_aggregation_postprocessor(column_view const& _input, column_view const& _default_outputs, data_type _result_type, PrecedingWindowIterator _preceding_window_begin, FollowingWindowIterator _following_window_begin, int _min_periods, std::unique_ptr<column>&& _intermediate, rmm::cuda_stream_view _stream, rmm::mr::device_memory_resource* _mr) : input(_input), default_outputs(_default_outputs), result_type(_result_type), preceding_window_begin(_preceding_window_begin), following_window_begin(_following_window_begin), min_periods(_min_periods), intermediate(std::move(_intermediate)), result(nullptr), stream(_stream), mr(_mr) { } // all non-specialized aggregation types simply pass the intermediate result through. void visit(aggregation const&) override { result = std::move(intermediate); } // perform a final gather on the generated ARGMIN data void visit(cudf::detail::min_aggregation const&) override { if (result_type.id() == type_id::STRING) { // The rows that represent null elements will have negative values in gather map, // and that's why nullify_out_of_bounds/ignore_out_of_bounds is true. auto output_table = detail::gather(table_view{{input}}, intermediate->view(), cudf::out_of_bounds_policy::NULLIFY, detail::negative_index_policy::NOT_ALLOWED, stream, mr); result = std::make_unique<cudf::column>(std::move(output_table->get_column(0))); } else { result = std::move(intermediate); } } // perform a final gather on the generated ARGMAX data void visit(cudf::detail::max_aggregation const&) override { if (result_type.id() == type_id::STRING) { // The rows that represent null elements will have negative values in gather map, // and that's why nullify_out_of_bounds/ignore_out_of_bounds is true. auto output_table = detail::gather(table_view{{input}}, intermediate->view(), cudf::out_of_bounds_policy::NULLIFY, detail::negative_index_policy::NOT_ALLOWED, stream, mr); result = std::make_unique<cudf::column>(std::move(output_table->get_column(0))); } else { result = std::move(intermediate); } } // perform the actual COLLECT_LIST operation entirely. void visit(cudf::detail::collect_list_aggregation const& agg) override { result = rolling_collect_list(input, default_outputs, preceding_window_begin, following_window_begin, min_periods, agg._null_handling, stream, mr); } // perform the actual COLLECT_SET operation entirely. void visit(cudf::detail::collect_set_aggregation const& agg) override { auto const collected_list = rolling_collect_list(input, default_outputs, preceding_window_begin, following_window_begin, min_periods, agg._null_handling, stream, rmm::mr::get_current_device_resource()); result = lists::detail::drop_list_duplicates( lists_column_view(collected_list->view()), agg._nulls_equal, agg._nans_equal, stream, mr); } // perform the element-wise square root operation on result of VARIANCE void visit(cudf::detail::std_aggregation const&) override { result = detail::unary_operation(intermediate->view(), unary_operator::SQRT, stream, mr); } std::unique_ptr<column> get_result() { CUDF_EXPECTS(result != nullptr, "Calling result on rolling aggregation postprocessor that has not been visited in " "rolling_window"); return std::move(result); } // LEAD and LAG have custom behaviors for non fixed-width types. void visit(cudf::detail::lead_lag_aggregation const& agg) override { // if this is non-fixed width, run the custom lead-lag code if (!cudf::is_fixed_width(result_type)) { result = cudf::detail::compute_lead_lag_for_nested<PrecedingWindowIterator, FollowingWindowIterator>( agg.kind, input, default_outputs, preceding_window_begin, following_window_begin, agg.row_offset, stream, mr); } // otherwise just pass through the intermediate else { result = std::move(intermediate); } } private: column_view input; column_view default_outputs; data_type result_type; PrecedingWindowIterator preceding_window_begin; FollowingWindowIterator following_window_begin; int min_periods; std::unique_ptr<column> intermediate; std::unique_ptr<column> result; rmm::cuda_stream_view stream; rmm::mr::device_memory_resource* mr; }; /** * @brief Computes the rolling window function * * @tparam InputType Datatype of `input` * @tparam OutputType Datatype of `output` * @tparam op The aggregation operator (enum value) * @tparam block_size CUDA block size for the kernel * @tparam has_nulls true if the input column has nulls * @tparam DeviceRollingOperator An operator that performs a single windowing operation * @tparam PrecedingWindowIterator iterator type (inferred) * @tparam FollowingWindowIterator iterator type (inferred) * @param input Input column device view * @param default_outputs A column of per-row default values to be returned instead * of nulls for certain aggregation types. * @param output Output column device view * @param output_valid_count Output count of valid values * @param device_operator The operator used to perform a single window operation * @param[in] preceding_window_begin Rolling window size iterator, accumulates from * in_col[i-preceding_window] to in_col[i] inclusive * @param[in] following_window_begin Rolling window size iterator in the forward * direction, accumulates from in_col[i] to * in_col[i+following_window] inclusive */ template <typename InputType, typename OutputType, aggregation::Kind op, int block_size, bool has_nulls, typename DeviceRollingOperator, typename PrecedingWindowIterator, typename FollowingWindowIterator> __launch_bounds__(block_size) __global__ void gpu_rolling(column_device_view input, column_device_view default_outputs, mutable_column_device_view output, size_type* __restrict__ output_valid_count, DeviceRollingOperator device_operator, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin) { size_type i = blockIdx.x * block_size + threadIdx.x; size_type stride = block_size * gridDim.x; size_type warp_valid_count{0}; auto active_threads = __ballot_sync(0xffffffff, i < input.size()); while (i < input.size()) { size_type preceding_window = preceding_window_begin[i]; size_type following_window = following_window_begin[i]; // compute bounds size_type start = min(input.size(), max(0, i - preceding_window + 1)); size_type end = min(input.size(), max(0, i + following_window + 1)); size_type start_index = min(start, end); size_type end_index = max(start, end); // aggregate // TODO: We should explore using shared memory to avoid redundant loads. // This might require separating the kernel into a special version // for dynamic and static sizes. volatile bool output_is_valid = false; output_is_valid = device_operator.template operator()<OutputType, has_nulls>( input, default_outputs, output, start_index, end_index, i); // set the mask cudf::bitmask_type result_mask{__ballot_sync(active_threads, output_is_valid)}; // only one thread writes the mask if (0 == threadIdx.x % cudf::detail::warp_size) { output.set_mask_word(cudf::word_index(i), result_mask); warp_valid_count += __popc(result_mask); } // process next element i += stride; active_threads = __ballot_sync(active_threads, i < input.size()); } // sum the valid counts across the whole block size_type block_valid_count = cudf::detail::single_lane_block_sum_reduce<block_size, 0>(warp_valid_count); if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); } } /** * @brief Type/aggregation dispatched functor for launching the gpu rolling window * kernel. */ template <typename InputType> struct rolling_window_launcher { template <aggregation::Kind op, typename PrecedingWindowIterator, typename FollowingWindowIterator> std::enable_if_t<corresponding_rolling_operator<InputType, op>::type::is_supported(), std::unique_ptr<column>> operator()(column_view const& input, column_view const& default_outputs, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, int min_periods, rolling_aggregation const& agg, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const output_type = target_type(input.type(), op); auto device_operator = create_rolling_operator<InputType, op>{}(min_periods, agg); auto output = make_fixed_width_column(output_type, input.size(), mask_state::UNINITIALIZED, stream, mr); cudf::mutable_column_view output_view = output->mutable_view(); size_type valid_count{0}; { using Type = device_storage_type_t<InputType>; using OutType = device_storage_type_t<target_type_t<InputType, op>>; constexpr cudf::size_type block_size = 256; cudf::detail::grid_1d grid(input.size(), block_size); auto input_device_view = column_device_view::create(input, stream); auto output_device_view = mutable_column_device_view::create(output_view, stream); auto default_outputs_device_view = column_device_view::create(default_outputs, stream); rmm::device_scalar<size_type> device_valid_count{0, stream}; if (input.has_nulls()) { gpu_rolling<Type, OutType, op, block_size, true> <<<grid.num_blocks, block_size, 0, stream.value()>>>(*input_device_view, *default_outputs_device_view, *output_device_view, device_valid_count.data(), device_operator, preceding_window_begin, following_window_begin); } else { gpu_rolling<Type, OutType, op, block_size, false> <<<grid.num_blocks, block_size, 0, stream.value()>>>(*input_device_view, *default_outputs_device_view, *output_device_view, device_valid_count.data(), device_operator, preceding_window_begin, following_window_begin); } valid_count = device_valid_count.value(stream); // check the stream for debugging CHECK_CUDA(stream.value()); } output->set_null_count(output->size() - valid_count); return output; } template <aggregation::Kind op, typename PrecedingWindowIterator, typename FollowingWindowIterator> std::enable_if_t<!corresponding_rolling_operator<InputType, op>::type::is_supported(), std::unique_ptr<column>> operator()(column_view const&, column_view const&, PrecedingWindowIterator, FollowingWindowIterator, int, rolling_aggregation const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) { CUDF_FAIL("Invalid aggregation type/pair"); } }; /** * @brief Functor for performing the high level rolling logic. * * This does 3 basic things: * * - It calls the preprocess step on incoming aggregation/type pairs * - It calls the aggregation-dispatched gpu-rolling operation * - It calls the final postprocess step */ struct dispatch_rolling { template <typename InputType, typename PrecedingWindowIterator, typename FollowingWindowIterator> std::unique_ptr<column> operator()(column_view const& input, column_view const& default_outputs, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, rolling_aggregation const& agg, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // do any preprocessing of aggregations (eg, MIN -> ARGMIN, COLLECT_LIST -> nothing) rolling_aggregation_preprocessor preprocessor; auto preprocessed_aggs = agg.get_simple_aggregations(input.type(), preprocessor); CUDF_EXPECTS(preprocessed_aggs.size() <= 1, "Encountered a non-trivial rolling aggregation result"); // perform the rolling window if we produced an aggregation to use auto intermediate = preprocessed_aggs.size() > 0 ? aggregation_dispatcher( dynamic_cast<rolling_aggregation const&>(*preprocessed_aggs[0]).kind, rolling_window_launcher<InputType>{}, input, default_outputs, preceding_window_begin, following_window_begin, min_periods, dynamic_cast<rolling_aggregation const&>(*preprocessed_aggs[0]), stream, mr) : nullptr; // finalize. auto const result_type = target_type(input.type(), agg.kind); rolling_aggregation_postprocessor postprocessor(input, default_outputs, result_type, preceding_window_begin, following_window_begin, min_periods, std::move(intermediate), stream, mr); agg.finalize(postprocessor); return postprocessor.get_result(); } }; } // namespace // Applies a user-defined rolling window function to the values in a column. template <typename PrecedingWindowIterator, typename FollowingWindowIterator> std::unique_ptr<column> rolling_window_udf(column_view const& input, PrecedingWindowIterator preceding_window, std::string const& preceding_window_str, FollowingWindowIterator following_window, std::string const& following_window_str, size_type min_periods, rolling_aggregation const& agg, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(), "bitmask_type size does not match CUDA warp size"); if (input.has_nulls()) { CUDF_FAIL("Currently the UDF version of rolling window does NOT support inputs with nulls."); } min_periods = std::max(min_periods, 0); auto& udf_agg = dynamic_cast<udf_aggregation const&>(agg); std::string hash = "prog_rolling." + std::to_string(std::hash<std::string>{}(udf_agg._source)); std::string cuda_source; switch (udf_agg.kind) { case aggregation::Kind::PTX: cuda_source += cudf::jit::parse_single_function_ptx(udf_agg._source, udf_agg._function_name, cudf::jit::get_type_name(udf_agg._output_type), {0, 5}); // args 0 and 5 are pointers. break; case aggregation::Kind::CUDA: cuda_source += cudf::jit::parse_single_function_cuda(udf_agg._source, udf_agg._function_name); break; default: CUDF_FAIL("Unsupported UDF type."); } std::unique_ptr<column> output = make_numeric_column( udf_agg._output_type, input.size(), cudf::mask_state::UNINITIALIZED, stream, mr); auto output_view = output->mutable_view(); rmm::device_scalar<size_type> device_valid_count{0, stream}; std::string kernel_name = jitify2::reflection::Template("cudf::rolling::jit::gpu_rolling_new") // .instantiate(cudf::jit::get_type_name(input.type()), // list of template arguments cudf::jit::get_type_name(output->type()), udf_agg._operator_name, preceding_window_str.c_str(), following_window_str.c_str()); cudf::jit::get_program_cache(*rolling_jit_kernel_cu_jit) .get_kernel( kernel_name, {}, {{"rolling/jit/operation-udf.hpp", cuda_source}}, {"-arch=sm_."}) // ->configure_1d_max_occupancy(0, 0, 0, stream.value()) // ->launch(input.size(), cudf::jit::get_data_ptr(input), input.null_mask(), cudf::jit::get_data_ptr(output_view), output_view.null_mask(), device_valid_count.data(), preceding_window, following_window, min_periods); output->set_null_count(output->size() - device_valid_count.value(stream)); // check the stream for debugging CHECK_CUDA(stream.value()); return output; } /** * @copydoc cudf::rolling_window(column_view const& input, * PrecedingWindowIterator preceding_window_begin, * FollowingWindowIterator following_window_begin, * size_type min_periods, * rolling_aggregation const& agg, * rmm::mr::device_memory_resource* mr) * * @param stream CUDA stream used for device memory operations and kernel launches. */ template <typename PrecedingWindowIterator, typename FollowingWindowIterator> std::unique_ptr<column> rolling_window(column_view const& input, column_view const& default_outputs, PrecedingWindowIterator preceding_window_begin, FollowingWindowIterator following_window_begin, size_type min_periods, rolling_aggregation const& agg, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { static_assert(warp_size == cudf::detail::size_in_bits<cudf::bitmask_type>(), "bitmask_type size does not match CUDA warp size"); if (input.is_empty()) { return cudf::detail::empty_output_for_rolling_aggregation(input, agg); } if (cudf::is_dictionary(input.type())) { CUDF_EXPECTS(agg.kind == aggregation::COUNT_ALL || agg.kind == aggregation::COUNT_VALID || agg.kind == aggregation::ROW_NUMBER || agg.kind == aggregation::MIN || agg.kind == aggregation::MAX || agg.kind == aggregation::LEAD || agg.kind == aggregation::LAG, "Invalid aggregation for dictionary column"); } if (agg.kind != aggregation::LEAD && agg.kind != aggregation::LAG) { CUDF_EXPECTS(default_outputs.is_empty(), "Only LEAD/LAG window functions support default values."); } min_periods = std::max(min_periods, 0); auto input_col = cudf::is_dictionary(input.type()) ? dictionary_column_view(input).get_indices_annotated() : input; auto output = cudf::type_dispatcher(input_col.type(), dispatch_rolling{}, input_col, default_outputs, preceding_window_begin, following_window_begin, min_periods, agg, stream, mr); if (!cudf::is_dictionary(input.type())) return output; // dictionary column post processing if (agg.kind == aggregation::COUNT_ALL || agg.kind == aggregation::COUNT_VALID || agg.kind == aggregation::ROW_NUMBER) { return output; } // output is new dictionary indices (including nulls) auto keys = std::make_unique<column>(dictionary_column_view(input).keys(), stream, mr); auto const indices_type = output->type(); // capture these auto const output_size = output->size(); // before calling auto const null_count = output->null_count(); // release() auto contents = output->release(); // create indices column from output column data auto indices = std::make_unique<column>(indices_type, output_size, std::move(*(contents.data.release())), rmm::device_buffer{0, stream, mr}, 0); // create dictionary from keys and indices return make_dictionary_column( std::move(keys), std::move(indices), std::move(*(contents.null_mask.release())), null_count); } } // namespace detail } // namespace cudf
the_stack
* \file * dnn/src/cuda/convolution_helper/block_tile_consumer/iconv_block_consumer_unroll_width.cuh * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #pragma once #include "src/cuda/utils.cuh" namespace megdnn { namespace cuda { namespace convolution { template <typename RegBlockConfig_, typename ThreadConfig_, bool pipelined> struct IConvBlockConsumerUnrollWidth; template <typename RegBlockConfig_, typename ThreadConfig_> struct IConvBlockConsumerUnrollWidth<RegBlockConfig_, ThreadConfig_, true> { using ThreadConfig = ThreadConfig_; using RegBlockConfig = RegBlockConfig_; int32_t reg_src[RegBlockConfig::reg_n][RegBlockConfig::reg_width][2]; int32_t reg_filter[RegBlockConfig::reg_m][2]; int32_t reg_acc[RegBlockConfig::reg_n][RegBlockConfig::reg_width] [RegBlockConfig::reg_m]; __device__ __forceinline__ void init_accumulator() { #pragma unroll for (int i = 0; i < RegBlockConfig::reg_n; ++i) { #pragma unroll for (int j = 0; j < RegBlockConfig::reg_width; ++j) { #pragma unroll for (int k = 0; k < RegBlockConfig::reg_m; ++k) { reg_acc[i][j][k] = 0; } } } } template < typename DataGlobal2ShareMemVisitor, typename FilterGlobal2ShareMemVisitor> __device__ __forceinline__ void consume_block( DataGlobal2ShareMemVisitor data_gl2sh_visitor, FilterGlobal2ShareMemVisitor filter_gl2sh_visitor) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; using smem_storage_dtype = typename DataGlobal2ShareMemVisitor::smem_storage_dtype; static bool const use_wide_store = !(RegBlockConfig::reg_n & 0x1); if (use_wide_store) { #pragma unroll for (int i = 0; i < (RegBlockConfig::reg_n >> 1); ++i) { #pragma unroll for (int j = 0; j < RegBlockConfig::reg_width; ++j) { int i2 = (i << 1); int tidx2 = (tidx << 1); reg_src[i2][j][0] = *(data_gl2sh_visitor.sh_ptr( 0, j, tidx2 + i2 * ThreadConfig::nr_thread_x)); reg_src[i2 + 1][j][0] = *(data_gl2sh_visitor.sh_ptr( 0, j, tidx2 + i2 * ThreadConfig::nr_thread_x + 1)); } } } else { #pragma unroll for (int i = 0; i < RegBlockConfig::reg_n; ++i) { #pragma unroll for (int j = 0; j < RegBlockConfig::reg_width; ++j) { reg_src[i][j][0] = *(data_gl2sh_visitor.sh_ptr( 0, j, tidx + i * ThreadConfig::nr_thread_x)); } } } #pragma unroll for (int j = 0; j < RegBlockConfig::reg_m_packed; ++j) { int32_t* ker_sh_ptr = filter_gl2sh_visitor.sh_ptr( 0, tidy * RegBlockConfig::pack_size + j * ThreadConfig::nr_thread_y * RegBlockConfig::pack_size); #pragma unroll for (int packed = 0; packed < RegBlockConfig::pack_size; ++packed) { reg_filter[j * RegBlockConfig::pack_size + packed][0] = *(ker_sh_ptr++); } } #pragma unroll for (int ci_inner = 0; ci_inner < RegBlockConfig::reg_k_packed; ++ci_inner) { const int comp_idx = (ci_inner & 0x1); const int load_idx = 1 - comp_idx; if (ci_inner < RegBlockConfig::reg_k_packed - 1) { int32_t* ker_sh_ptr = filter_gl2sh_visitor.sh_ptr(ci_inner + 1, 0); if (use_wide_store) { #pragma unroll for (int i = 0; i < (RegBlockConfig::reg_n >> 1); ++i) { #pragma unroll for (int j = 0; j < RegBlockConfig::reg_width; ++j) { int i2 = (i << 1); int tidx2 = (tidx << 1); reg_src[i2][j][load_idx] = *(data_gl2sh_visitor.sh_ptr( ci_inner + 1, j, tidx2 + i2 * ThreadConfig::nr_thread_x)); reg_src[i2 + 1][j][load_idx] = *(data_gl2sh_visitor.sh_ptr( ci_inner + 1, j, tidx2 + i2 * ThreadConfig::nr_thread_x + 1)); } } } else { #pragma unroll for (int i = 0; i < RegBlockConfig::reg_n; ++i) { #pragma unroll for (int j = 0; j < RegBlockConfig::reg_width; ++j) { reg_src[i][j][load_idx] = *(data_gl2sh_visitor.sh_ptr( ci_inner + 1, j, tidx + i * ThreadConfig::nr_thread_x)); } } } #pragma unroll for (int j = 0; j < RegBlockConfig::reg_m_packed; ++j) { int32_t* ker_sh_ptr_packed = &ker_sh_ptr [(tidy + j * ThreadConfig::nr_thread_y) * RegBlockConfig::pack_size]; #pragma unroll for (int packed = 0; packed < RegBlockConfig::pack_size; ++packed) { reg_filter[j * RegBlockConfig::pack_size + packed][load_idx] = *(ker_sh_ptr_packed++); } } } #pragma unroll for (int i = 0; i < RegBlockConfig::reg_n; ++i) { #pragma unroll for (int j = 0; j < RegBlockConfig::reg_width; ++j) { #pragma unroll for (int k = 0; k < RegBlockConfig::reg_m; ++k) { dot_prod( reg_src[i][j][comp_idx], reg_filter[k][comp_idx], reg_acc[i][j][k], reg_acc[i][j][k]); } } } } } }; template <typename RegBlockConfig_, typename ThreadConfig_> struct IConvBlockConsumerUnrollWidth<RegBlockConfig_, ThreadConfig_, false> { using ThreadConfig = ThreadConfig_; using RegBlockConfig = RegBlockConfig_; int32_t reg_src[RegBlockConfig::reg_n][RegBlockConfig::reg_width]; int32_t reg_filter[RegBlockConfig::reg_m]; int32_t reg_acc[RegBlockConfig::reg_n][RegBlockConfig::reg_width] [RegBlockConfig::reg_m]; __device__ __forceinline__ void init_accumulator() { #pragma unroll for (int i = 0; i < RegBlockConfig::reg_n; ++i) { #pragma unroll for (int j = 0; j < RegBlockConfig::reg_width; ++j) { #pragma unroll for (int k = 0; k < RegBlockConfig::reg_m; ++k) { reg_acc[i][j][k] = 0; } } } } template < typename DataGlobal2ShareMemVisitor, typename FilterGlobal2ShareMemVisitor> __device__ __forceinline__ void consume_block( DataGlobal2ShareMemVisitor data_gl2sh_visitor, FilterGlobal2ShareMemVisitor filter_gl2sh_visitor) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; using smem_storage_dtype = typename DataGlobal2ShareMemVisitor::smem_storage_dtype; static bool const use_wide_store = !(RegBlockConfig::reg_n & 0x1); #pragma unroll for (int ci_inner = 0; ci_inner < RegBlockConfig::reg_k_packed; ++ci_inner) { int32_t* ker_sh_ptr = filter_gl2sh_visitor.sh_ptr(ci_inner, 0); if (use_wide_store) { #pragma unroll for (int i = 0; i < (RegBlockConfig::reg_n >> 1); ++i) { #pragma unroll for (int j = 0; j < RegBlockConfig::reg_width; ++j) { int i2 = (i << 1); int tidx2 = (tidx << 1); reg_src[i2][j] = *(data_gl2sh_visitor.sh_ptr( ci_inner, j, tidx2 + i2 * ThreadConfig::nr_thread_x)); reg_src[i2 + 1][j] = *(data_gl2sh_visitor.sh_ptr( ci_inner, j, tidx2 + i2 * ThreadConfig::nr_thread_x + 1)); } } } else { #pragma unroll for (int i = 0; i < RegBlockConfig::reg_n; ++i) { #pragma unroll for (int j = 0; j < RegBlockConfig::reg_width; ++j) { reg_src[i][j] = *(data_gl2sh_visitor.sh_ptr( ci_inner, j, tidx + i * ThreadConfig::nr_thread_x)); } } } #pragma unroll for (int j = 0; j < RegBlockConfig::reg_m_packed; ++j) { int32_t* ker_sh_ptr_packed = &ker_sh_ptr [(tidy + j * ThreadConfig::nr_thread_y) * RegBlockConfig::pack_size]; #pragma unroll for (int packed = 0; packed < RegBlockConfig::pack_size; ++packed) { reg_filter[j * RegBlockConfig::pack_size + packed] = *(ker_sh_ptr_packed++); } } #pragma unroll for (int i = 0; i < RegBlockConfig::reg_n; ++i) { #pragma unroll for (int j = 0; j < RegBlockConfig::reg_width; ++j) { #pragma unroll for (int k = 0; k < RegBlockConfig::reg_m; ++k) { dot_prod( reg_src[i][j], reg_filter[k], reg_acc[i][j][k], reg_acc[i][j][k]); } } } } } }; } // namespace convolution } // namespace cuda } // namespace megdnn // vim: ft=cpp syntax=cuda.doxygen foldmethod=marker foldmarker=f{{{,f}}}
the_stack
#include <stdlib.h> #include <stdio.h> #include "cuda.h" #include <cufft.h> extern int nblock_size; extern int maxgsx; static cudaError_t crc; static cufftResult cfrc; static cufftHandle planrx, planxr, planrxn, planxrn; static cufftHandle plany, planyn; __global__ void gpuctpose4(float2 f[], float2 g[], int nx, int ny, int nxv, int nyv); __global__ void gpuctpose4n(float2 fn[], float2 gn[], int nx, int ny, int ndim, int nxv, int nyv); /*--------------------------------------------------------------------*/ __global__ void gpusctpose4(float2 f[], float2 g[], float ani, int nx, int ny, int nxv, int nyv) { /* scaled complex transpose using blocking algorithm with gaps */ /* local data */ int j, k, js, ks, joff, koff, mx, mxv; float2 a; extern __shared__ float2 shm2[]; mx = blockDim.x; mxv = mx + 1; joff = mx*blockIdx.x; koff = mx*blockIdx.y; js = threadIdx.x; ks = threadIdx.y; /* copy into block */ j = js + joff; k = ks + koff; if ((j < nx) && (k < ny)) { shm2[js+mxv*ks] = f[j+nxv*k]; } __syncthreads(); /* copy out from block with scaling */ j = ks + joff; k = js + koff; if ((j < nx) && (k < ny)) { a = shm2[ks+mxv*js]; a.x = ani*a.x; a.y = ani*a.y; g[k+nyv*j] = a; } return; } /*--------------------------------------------------------------------*/ __global__ void gpusctpose4n(float2 fn[], float2 gn[], float ani, int nx, int ny, int ndim, int nxv, int nyv) { /* scaled complex vector transpose using blocking algorithm with gaps */ /* ndim = vector dimension */ /* local data */ int i, j, k, js, ks, joff, koff, mx, mxv, nmxv, nnxv, nnyv, jj, kk; float2 a; extern __shared__ float2 shmn2[]; mx = blockDim.x; mxv = mx + 1; joff = mx*blockIdx.x; koff = mx*blockIdx.y; js = threadIdx.x; ks = threadIdx.y; nmxv = ndim*mxv; nnxv = ndim*nxv; nnyv = ndim*nyv; /* copy into block */ j = js + joff; k = ks + koff; if ((j < nx) && (k < ny)) { jj = j + nnxv*k; kk = js + nmxv*ks; for (i = 0; i < ndim; i++) { shmn2[kk+mxv*i] = fn[jj+nxv*i]; } } __syncthreads(); /* copy out from block with scaling */ j = ks + joff; k = js + koff; if ((j < nx) && (k < ny)) { kk = k + nnyv*j; jj = ks + nmxv*js; for (i = 0; i < ndim; i++) { a = shmn2[jj+mxv*i]; a.x = ani*a.x; a.y = ani*a.y; gn[kk+nyv*i] = a; } } return; } /*--------------------------------------------------------------------*/ extern "C" void gpufft2rrcuinit(int nx, int ny, int ndim) { cfrc = cufftPlan1d(&planrx,nx,CUFFT_R2C,ny); if (cfrc) { printf("cufftPlan1d planrx error=%d\n",cfrc); exit(1); } cfrc = cufftPlan1d(&planxr,nx,CUFFT_C2R,ny); if (cfrc) { printf("cufftPlan1d planxr error=%d\n",cfrc); exit(1); } cfrc = cufftPlan1d(&planrxn,nx,CUFFT_R2C,ndim*ny); if (cfrc) { printf("cufftPlan1d planrxn error=%d\n",cfrc); exit(1); } cfrc = cufftPlan1d(&planxrn,nx,CUFFT_C2R,ndim*ny); if (cfrc) { printf("cufftPlan1d planxrn error=%d\n",cfrc); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void gpufft2cuinit(int nx, int ny, int ndim) { int nxh1; nxh1 = nx/2 + 1; cfrc = cufftPlan1d(&plany,ny,CUFFT_C2C,nxh1); if (cfrc) { printf("cufftPlan1d plany error=%d\n",cfrc); exit(1); } cfrc = cufftPlan1d(&planyn,ny,CUFFT_C2C,ndim*nxh1); if (cfrc) { printf("cufftPlan1d planyn error=%d\n",cfrc); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void gpufft2rrcudel() { cfrc = cufftDestroy(planrx); if (cfrc) { printf("cufftDestroy planrx error=%d\n",cfrc); exit(1); } cfrc = cufftDestroy(planxr); if (cfrc) { printf("cufftDestroy planxr error=%d\n",cfrc); exit(1); } cfrc = cufftDestroy(planrxn); if (cfrc) { printf("cufftDestroy planrxn error=%d\n",cfrc); exit(1); } cfrc = cufftDestroy(planxrn); if (cfrc) { printf("cufftDestroy planxrn error=%d\n",cfrc); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void gpufft2cudel() { cfrc = cufftDestroy(plany); if (cfrc) { printf("cufftDestroy plany error=%d\n",cfrc); exit(1); } cfrc = cufftDestroy(planyn); if (cfrc) { printf("cufftDestroy planyn error=%d\n",cfrc); exit(1); } return; } /*--------------------------------------------------------------------*/ extern "C" void gpufft2rrcu(float2 f[], float2 g[], int isign, int indx, int indy, int nxh1d, int nyd) { /* wrapper function for real to complex fft, without packed data */ /* uses 1D real to complex and complex to complex NVIDIA FFTs */ /* nxh1d must be = nx/2+1 */ /* local data */ int nx, nxh1, ny, ns; int mx = 16; float ani; dim3 dimBlock(nblock_size); dim3 dimBlockt(mx,mx); /* calculate range of indices */ nx = 1L<<indx; nxh1 = nx/2 + 1; ny = 1L<<indy; dim3 dimGridtx((nxh1-1)/mx+1,(ny-1)/mx+1); dim3 dimGridty((ny-1)/mx+1,(nxh1-1)/mx+1); ns = (mx+1)*mx*sizeof(float2); /* inverse fourier transform */ if (isign < 0) { /* perform x fft */ cfrc = cufftExecR2C(planrx,(cufftReal *)f,(cufftComplex *)f); /* cudaThreadSynchronize(); */ if (cfrc) { printf("cufftExecR2C(-1) planrx error=%d\n",cfrc); exit(1); } /* transpose f to g and normalize */ ani = 1.0f/(((float) nx)*((float) ny)); crc = cudaGetLastError(); gpusctpose4<<<dimGridtx,dimBlockt,ns>>>(f,g,ani,nxh1,ny,nxh1d, nyd); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpusctpose4 error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } /* perform y fft */ cfrc = cufftExecC2C(plany,(cufftComplex *)g,(cufftComplex *)g, CUFFT_FORWARD); cudaThreadSynchronize(); if (cfrc) { printf("cufftExecC2C(-1) plany error=%d\n",cfrc); exit(1); } } /* forward fourier transform */ else if (isign > 0) { /* perform y fft */ cfrc = cufftExecC2C(plany,(cufftComplex *)g,(cufftComplex *)g, CUFFT_INVERSE); /* cudaThreadSynchronize(); */ if (cfrc) { printf("cufftExecC2C(1) plany error=%d\n",cfrc); exit(1); } /* transpose g to f */ crc = cudaGetLastError(); gpuctpose4<<<dimGridty,dimBlockt,ns>>>(g,f,ny,nxh1,nyd,nxh1d); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpuctpose4 error=%d:%s\n",crc,cudaGetErrorString(crc)); exit(1); } /* perform x fft */ cfrc = cufftExecC2R(planxr,(cufftComplex *)f,(cufftReal *)f); cudaThreadSynchronize(); if (cfrc) { printf("cufftExecC2R(1) planxr error=%d\n",cfrc); exit(1); } } return; } /*--------------------------------------------------------------------*/ extern "C" void gpufft2rrcun(float2 fn[], float2 gn[], int isign, int indx, int indy, int ndim, int nxh1d, int nyd) { /* wrapper function for real to complex fft, without packed data */ /* for vector data */ /* uses 1D real to complex and complex to complex NVIDIA FFTs */ /* ndim = vector dimension */ /* nxh1d must be = nx/2+1 */ /* local data */ int nx, nxh1, ny, ns; int mx = 16; float ani; dim3 dimBlock(nblock_size); dim3 dimBlockt(mx,mx); /* calculate range of indices */ nx = 1L<<indx; nxh1 = nx/2 + 1; ny = 1L<<indy; dim3 dimGridtx((nxh1-1)/mx+1,(ny-1)/mx+1); dim3 dimGridty((ny-1)/mx+1,(nxh1-1)/mx+1); ns = ndim*(mx+1)*mx*sizeof(float2); /* inverse fourier transform */ if (isign < 0) { /* perform x fft */ cfrc = cufftExecR2C(planrxn,(cufftReal *)fn,(cufftComplex *)fn); /* cudaThreadSynchronize(); */ if (cfrc) { printf("cufftExecR2C(-1) planrxn error=%d\n",cfrc); exit(1); } /* transpose f to g and normalize */ ani = 1.0f/(((float) nx)*((float) ny)); crc = cudaGetLastError(); gpusctpose4n<<<dimGridtx,dimBlockt,ns>>>(fn,gn,ani,nxh1,ny,ndim, nxh1d,nyd); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpusctpose4n error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } /* perform y fft */ cfrc = cufftExecC2C(planyn,(cufftComplex *)gn,(cufftComplex *)gn, CUFFT_FORWARD); cudaThreadSynchronize(); if (cfrc) { printf("cufftExecC2C(-1) planyn error=%d\n",cfrc); exit(1); } } /* forward fourier transform */ else if (isign > 0) { /* perform y fft */ cfrc = cufftExecC2C(planyn,(cufftComplex *)gn,(cufftComplex *)gn, CUFFT_INVERSE); /* cudaThreadSynchronize(); */ if (cfrc) { printf("cufftExecC2C(1) planyn error=%d\n",cfrc); exit(1); } /* transpose g to f */ crc = cudaGetLastError(); gpuctpose4n<<<dimGridty,dimBlockt,ns>>>(gn,fn,ny,nxh1,ndim,nyd, nxh1d); /* cudaThreadSynchronize(); */ crc = cudaGetLastError(); if (crc) { printf("gpuctpose4n error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } /* perform x fft */ cfrc = cufftExecC2R(planxrn,(cufftComplex *)fn,(cufftReal *)fn); cudaThreadSynchronize(); if (cfrc) { printf("cufftExecC2R(1) planxrn error=%d\n",cfrc); exit(1); } } return; } /* Interfaces to Fortran */ /*--------------------------------------------------------------------*/ extern "C" void gpufft2rrcuinit_(int *nx, int *ny, int *ndim) { gpufft2rrcuinit(*nx,*ny,*ndim); return; } /*--------------------------------------------------------------------*/ extern "C" void gpufft2cuinit_(int *nx, int *ny, int *ndim) { gpufft2cuinit(*nx,*ny,*ndim); return; } /*--------------------------------------------------------------------*/ extern "C" void gpufft2rrcudel_() { gpufft2rrcudel(); return; } /*--------------------------------------------------------------------*/ extern "C" void gpufft2cudel_() { gpufft2cudel(); return; } /*--------------------------------------------------------------------*/ extern "C" void gpufft2rrcu_(unsigned long *gp_f, unsigned long *gp_g, int *isign, int *indx, int *indy, int *nxh1d, int *nyd) { float2 *f, *g; f = (float2 *)*gp_f; g = (float2 *)*gp_g; gpufft2rrcu(f,g,*isign,*indx,*indy,*nxh1d,*nyd); return; } /*--------------------------------------------------------------------*/ extern "C" void gpufft2rrcun_(unsigned long *gp_fn, unsigned long *gp_gn, int *isign, int *indx, int *indy, int *ndim, int *nxh1d, int *nyd) { float2 *fn, *gn; fn = (float2 *)*gp_fn; gn = (float2 *)*gp_gn; gpufft2rrcun(fn,gn,*isign,*indx,*indy,*ndim,*nxh1d,*nyd); return; }
the_stack
* CTA-processing functionality for radix sort downsweep scan kernels ******************************************************************************/ #pragma once #include "../../util/basic_utils.cuh" #include "../../util/cta_work_distribution.cuh" #include "../../util/tex_vector.cuh" #include "../../util/io/load_tile.cuh" #include "../../util/ns_umbrella.cuh" #include "../../radix_sort/sort_utils.cuh" #include "../../radix_sort/cta_radix_rank.cuh" #include "../../radix_sort/downsweep/kernel_policy.cuh" #include "../../radix_sort/downsweep/tex_ref.cuh" B40C_NS_PREFIX namespace b40c { namespace radix_sort { namespace downsweep { /** * Partitioning downsweep scan CTA */ template < typename KernelPolicy, typename SizeT, typename KeyType, typename ValueType> struct Cta { //--------------------------------------------------------------------- // Type definitions and constants //--------------------------------------------------------------------- // Appropriate unsigned-bits representation of KeyType typedef typename KeyTraits<KeyType>::UnsignedBits UnsignedBits; static const UnsignedBits MIN_KEY = KeyTraits<KeyType>::MIN_KEY; static const UnsignedBits MAX_KEY = KeyTraits<KeyType>::MAX_KEY; static const util::io::ld::CacheModifier LOAD_MODIFIER = KernelPolicy::LOAD_MODIFIER; static const util::io::st::CacheModifier STORE_MODIFIER = KernelPolicy::STORE_MODIFIER; static const ScatterStrategy SCATTER_STRATEGY = KernelPolicy::SCATTER_STRATEGY; enum { RADIX_BITS = KernelPolicy::RADIX_BITS, RADIX_DIGITS = 1 << RADIX_BITS, KEYS_ONLY = util::Equals<ValueType, util::NullType>::VALUE, CURRENT_BIT = KernelPolicy::CURRENT_BIT, CURRENT_PASS = KernelPolicy::CURRENT_PASS, // Direction of flow though ping-pong buffers: (FLOP_TURN) ? (d_keys1 --> d_keys0) : (d_keys0 --> d_keys1) FLOP_TURN = KernelPolicy::CURRENT_PASS & 0x1, LOG_CTA_THREADS = KernelPolicy::LOG_CTA_THREADS, CTA_THREADS = 1 << LOG_CTA_THREADS, LOG_WARP_THREADS = CUB_LOG_WARP_THREADS(__CUB_CUDA_ARCH__), WARP_THREADS = 1 << LOG_WARP_THREADS, LOG_WARPS = LOG_CTA_THREADS - LOG_WARP_THREADS, WARPS = 1 << LOG_WARPS, LOG_THREAD_ELEMENTS = KernelPolicy::LOG_THREAD_ELEMENTS, KEYS_PER_THREAD = 1 << LOG_THREAD_ELEMENTS, LOG_TILE_ELEMENTS = LOG_CTA_THREADS + LOG_THREAD_ELEMENTS, TILE_ELEMENTS = 1 << LOG_TILE_ELEMENTS, BYTES_PER_SIZET = sizeof(SizeT), LOG_BYTES_PER_SIZET = util::Log2<BYTES_PER_SIZET>::VALUE, LOG_MEM_BANKS = CUB_LOG_MEM_BANKS(__CUB_CUDA_ARCH__), MEM_BANKS = 1 << LOG_MEM_BANKS, // Whether or not to insert padding for exchanging keys. (Padding is // worse than bank conflicts on GPUs that need two-phase scattering) PADDED_EXCHANGE = (SCATTER_STRATEGY != SCATTER_WARP_TWO_PHASE), PADDING_ELEMENTS = (PADDED_EXCHANGE) ? (TILE_ELEMENTS >> LOG_MEM_BANKS) : 0, DIGITS_PER_SCATTER_PASS = CTA_THREADS / MEM_BANKS, SCATTER_PASSES = RADIX_DIGITS / DIGITS_PER_SCATTER_PASS, LOG_STORE_TXN_THREADS = LOG_MEM_BANKS, STORE_TXN_THREADS = 1 << LOG_STORE_TXN_THREADS, ELEMENTS_PER_TEX = Textures<KeyType, ValueType, KEYS_PER_THREAD>::ELEMENTS_PER_TEX, THREAD_TEX_LOADS = KEYS_PER_THREAD / ELEMENTS_PER_TEX, TILE_TEX_LOADS = CTA_THREADS * THREAD_TEX_LOADS, }; // Texture types typedef Textures<KeyType, ValueType, KEYS_PER_THREAD> Textures; typedef typename Textures::KeyTexType KeyTexType; typedef typename Textures::ValueTexType ValueTexType; // CtaRadixRank utility type typedef CtaRadixRank< LOG_CTA_THREADS, RADIX_BITS, CURRENT_BIT, KernelPolicy::SMEM_CONFIG> CtaRadixRank; /** * Shared memory storage layout */ struct SmemStorage { SizeT tex_offset; SizeT tex_offset_limit; util::CtaWorkLimits<SizeT> work_limits; unsigned int digit_prefixes[RADIX_DIGITS + 1]; union { unsigned char digit_offset_bytes[1]; SizeT digit_offsets[RADIX_DIGITS]; }; union { typename CtaRadixRank::SmemStorage ranking_storage; UnsignedBits key_exchange[TILE_ELEMENTS + PADDING_ELEMENTS]; ValueType value_exchange[TILE_ELEMENTS + PADDING_ELEMENTS]; }; }; //--------------------------------------------------------------------- // Thread fields //--------------------------------------------------------------------- // Shared storage for this CTA SmemStorage &smem_storage; // Input and output device pointers UnsignedBits *d_in_keys; UnsignedBits *d_out_keys; ValueType *d_in_values; ValueType *d_out_values; // The global scatter base offset for each digit (valid in the first RADIX_DIGITS threads) SizeT my_digit_offset; //--------------------------------------------------------------------- // Helper structure for templated iteration. (NVCC currently won't // unroll loops with "unexpected control flow".) //--------------------------------------------------------------------- /** * Iterate */ template <int COUNT, int MAX> struct Iterate { /** * Scatter items to global memory */ template <bool FULL_TILE, typename T> static __device__ __forceinline__ void ScatterGlobal( T items[KEYS_PER_THREAD], SizeT digit_offsets[KEYS_PER_THREAD], T *d_out, const SizeT &guarded_elements) { // Scatter if not out-of-bounds int tile_element = threadIdx.x + (COUNT * CTA_THREADS); T* scatter = d_out + threadIdx.x + (COUNT * CTA_THREADS) + digit_offsets[COUNT]; if (FULL_TILE || (tile_element < guarded_elements)) { util::io::ModifiedStore<STORE_MODIFIER>::St(items[COUNT], scatter); } // Iterate next element Iterate<COUNT + 1, MAX>::template ScatterGlobal<FULL_TILE>( items, digit_offsets, d_out, guarded_elements); } /** * Scatter items to global memory */ template <bool FULL_TILE, typename T> static __device__ __forceinline__ void ScatterGlobal( T items[KEYS_PER_THREAD], unsigned int ranks[KEYS_PER_THREAD], SizeT digit_offsets[KEYS_PER_THREAD], T *d_out, const SizeT &guarded_elements) { // Scatter if not out-of-bounds T* scatter = d_out + ranks[COUNT] + digit_offsets[COUNT]; if (FULL_TILE || (ranks[COUNT] < guarded_elements)) { util::io::ModifiedStore<STORE_MODIFIER>::St(items[COUNT], scatter); } // Iterate next element Iterate<COUNT + 1, MAX>::template ScatterGlobal<FULL_TILE>( items, ranks, digit_offsets, d_out, guarded_elements); } /** * Warp based scattering that does not cross alignment boundaries, e.g., for SM1.0-1.1 * coalescing rules */ template <typename T> static __device__ __forceinline__ void AlignedScatterPass( SmemStorage &smem_storage, T *buffer, T *d_out, const SizeT &valid_elements) { typedef typename CtaRadixRank::PackedCounter PackedCounter; int store_txn_idx = threadIdx.x & (STORE_TXN_THREADS - 1); int store_txn_digit = threadIdx.x >> LOG_STORE_TXN_THREADS; int my_digit = (COUNT * DIGITS_PER_SCATTER_PASS) + store_txn_digit; if (my_digit < RADIX_DIGITS) { int my_exclusive_scan = smem_storage.digit_prefixes[my_digit]; int my_inclusive_scan = smem_storage.digit_prefixes[my_digit + 1]; int my_carry = smem_storage.digit_offsets[my_digit] + my_exclusive_scan; int my_aligned_offset = store_txn_idx - (my_carry & (STORE_TXN_THREADS - 1)); int gather_offset; while ((gather_offset = my_aligned_offset + my_exclusive_scan) < my_inclusive_scan) { if ((my_aligned_offset >= 0) && (gather_offset < valid_elements)) { int padded_gather_offset = (PADDED_EXCHANGE) ? gather_offset = util::SHR_ADD(gather_offset, LOG_MEM_BANKS, gather_offset) : gather_offset; T datum = buffer[padded_gather_offset]; d_out[my_carry + my_aligned_offset] = datum; } my_aligned_offset += STORE_TXN_THREADS; } } // Next scatter pass Iterate<COUNT + 1, MAX>::AlignedScatterPass(smem_storage, buffer, d_out, valid_elements); } }; /** * Terminate iteration */ template <int MAX> struct Iterate<MAX, MAX> { // ScatterGlobal template <bool FULL_TILE, typename T> static __device__ __forceinline__ void ScatterGlobal(T[KEYS_PER_THREAD], SizeT[KEYS_PER_THREAD], T*, const SizeT &) {} // ScatterGlobal template <bool FULL_TILE, typename T> static __device__ __forceinline__ void ScatterGlobal(T[KEYS_PER_THREAD], unsigned int[KEYS_PER_THREAD], SizeT[KEYS_PER_THREAD], T*, const SizeT &) {} // AlignedScatterPass template <typename T> static __device__ __forceinline__ void AlignedScatterPass(SmemStorage&, T*, T*, const SizeT&) {} }; //--------------------------------------------------------------------- // Methods //--------------------------------------------------------------------- /** * Constructor */ __device__ __forceinline__ Cta( SmemStorage &smem_storage, KeyType *d_keys0, KeyType *d_keys1, ValueType *d_values0, ValueType *d_values1, SizeT *d_spine) : smem_storage(smem_storage), d_in_keys(reinterpret_cast<UnsignedBits*>(FLOP_TURN ? d_keys1 : d_keys0)), d_out_keys(reinterpret_cast<UnsignedBits*>(FLOP_TURN ? d_keys0 : d_keys1)), d_in_values(FLOP_TURN ? d_values1 : d_values0), d_out_values(FLOP_TURN ? d_values0 : d_values1) { if ((CTA_THREADS == RADIX_DIGITS) || (threadIdx.x < RADIX_DIGITS)) { // Read digit scatter base (in parallel) int spine_digit_offset = (gridDim.x * threadIdx.x) + blockIdx.x; my_digit_offset = d_spine[spine_digit_offset]; } } /** * Perform a bit-wise twiddling transformation on keys */ template <UnsignedBits TwiddleOp(UnsignedBits)> __device__ __forceinline__ void TwiddleKeys( UnsignedBits keys[KEYS_PER_THREAD], UnsignedBits twiddled_keys[KEYS_PER_THREAD]) // out parameter { #pragma unroll for (int KEY = 0; KEY < KEYS_PER_THREAD; KEY++) { twiddled_keys[KEY] = TwiddleOp(keys[KEY]); } } /** * Scatter ranked items to shared memory buffer */ template <typename T> __device__ __forceinline__ void ScatterRanked( unsigned int ranks[KEYS_PER_THREAD], T items[KEYS_PER_THREAD], T *buffer) { #pragma unroll for (int KEY = 0; KEY < KEYS_PER_THREAD; KEY++) { int offset = ranks[KEY]; if (PADDED_EXCHANGE) { // Workaround for (CUAD4.2+NVCC+abi+m64) bug when sorting 16-bit key-value pairs offset = (sizeof(ValueType) == 2) ? (offset >> LOG_MEM_BANKS) + offset : util::SHR_ADD(offset, LOG_MEM_BANKS, offset); } buffer[offset] = items[KEY]; } } /** * Gather items from shared memory buffer */ template <typename T> __device__ __forceinline__ void GatherShared( T items[KEYS_PER_THREAD], T *buffer) { #pragma unroll for (int KEY = 0; KEY < KEYS_PER_THREAD; KEY++) { int gather_offset = (PADDED_EXCHANGE) ? (util::SHR_ADD(threadIdx.x, LOG_MEM_BANKS, threadIdx.x) + (KEY * CTA_THREADS) + ((KEY * CTA_THREADS) >> LOG_MEM_BANKS)) : (threadIdx.x + (KEY * CTA_THREADS)); items[KEY] = buffer[gather_offset]; } } /** * Decodes given keys to lookup digit offsets in shared memory */ __device__ __forceinline__ void DecodeDigitOffsets( UnsignedBits twiddled_keys[KEYS_PER_THREAD], SizeT digit_offsets[KEYS_PER_THREAD]) { #pragma unroll for (int KEY = 0; KEY < KEYS_PER_THREAD; KEY++) { // Decode address of bin-offset in smem unsigned int byte_offset = Extract< CURRENT_BIT, RADIX_BITS, LOG_BYTES_PER_SIZET>(twiddled_keys[KEY]); // Lookup base digit offset from shared memory digit_offsets[KEY] = *(SizeT *)(smem_storage.digit_offset_bytes + byte_offset); } } /** * Load tile of keys from global memory */ template <bool FULL_TILE> __device__ __forceinline__ void LoadKeys( UnsignedBits keys[KEYS_PER_THREAD], SizeT tex_offset, const SizeT &guarded_elements) { if ((LOAD_MODIFIER == util::io::ld::tex) && FULL_TILE) { // Unguarded loads through tex KeyTexType *vectors = (KeyTexType *) keys; #pragma unroll for (int PACK = 0; PACK < THREAD_TEX_LOADS; PACK++) { vectors[PACK] = tex1Dfetch( (Cta::FLOP_TURN) ? TexKeys<KeyTexType>::ref1 : TexKeys<KeyTexType>::ref0, tex_offset + (threadIdx.x * THREAD_TEX_LOADS) + PACK); } } else { // Guarded loads with default assignment of MAX_KEY to out-of-bound keys util::io::LoadTile< 0, // log loads per tile LOG_THREAD_ELEMENTS, CTA_THREADS, LOAD_MODIFIER, false>::LoadValid( (UnsignedBits (*)[KEYS_PER_THREAD]) keys, d_in_keys, (tex_offset * ELEMENTS_PER_TEX), guarded_elements, MAX_KEY); } } /** * Load tile of values from global memory */ template <bool FULL_TILE> __device__ __forceinline__ void LoadValues( ValueType values[KEYS_PER_THREAD], SizeT tex_offset, const SizeT &guarded_elements) { if ((LOAD_MODIFIER == util::io::ld::tex) && (util::NumericTraits<ValueType>::BUILT_IN) && FULL_TILE) { // Unguarded loads through tex ValueTexType *vectors = (ValueTexType*) values; #pragma unroll for (int PACK = 0; PACK < THREAD_TEX_LOADS; PACK++) { vectors[PACK] = tex1Dfetch( (Cta::FLOP_TURN) ? TexValues<ValueTexType>::ref1 : TexValues<ValueTexType>::ref0, tex_offset + (threadIdx.x * THREAD_TEX_LOADS) + PACK); } } else { // Guarded loads with default assignment of -1 to out-of-bound values util::io::LoadTile< 0, // log loads per tile LOG_THREAD_ELEMENTS, CTA_THREADS, LOAD_MODIFIER, false>::LoadValid( (ValueType (*)[KEYS_PER_THREAD]) values, d_in_values, (tex_offset * ELEMENTS_PER_TEX), guarded_elements); } } /** * Gather keys from smem, decode base digit offsets for keys,4 * and scatter to global */ template <bool FULL_TILE> __device__ __forceinline__ void ScatterKeys( UnsignedBits twiddled_keys[KEYS_PER_THREAD], SizeT digit_offsets[KEYS_PER_THREAD], // (out parameter) unsigned int ranks[KEYS_PER_THREAD], const SizeT &guarded_elements) { if (SCATTER_STRATEGY == SCATTER_DIRECT) { // Scatter keys directly to global memory // Compute scatter offsets DecodeDigitOffsets(twiddled_keys, digit_offsets); // Untwiddle keys before outputting UnsignedBits keys[KEYS_PER_THREAD]; TwiddleKeys<KeyTraits<KeyType>::TwiddleOut>(twiddled_keys, keys); // Scatter to global Iterate<0, KEYS_PER_THREAD>::template ScatterGlobal<FULL_TILE>( keys, ranks, digit_offsets, d_out_keys, guarded_elements); } else if (SCATTER_STRATEGY == SCATTER_WARP_TWO_PHASE) { // Use warp-aligned scattering of sorted keys from shared memory // Untwiddle keys before outputting UnsignedBits keys[KEYS_PER_THREAD]; TwiddleKeys<KeyTraits<KeyType>::TwiddleOut>(twiddled_keys, keys); // Scatter to shared memory first ScatterRanked(ranks, keys, smem_storage.key_exchange); __syncthreads(); // Gather sorted keys from smem and scatter to global using warp-aligned scattering Iterate<0, SCATTER_PASSES>::AlignedScatterPass( smem_storage, smem_storage.key_exchange, d_out_keys, guarded_elements); } else { // Normal two-phase scatter: exchange through shared memory, then // scatter sorted keys to global // Scatter to shared memory first (for better write-coalescing during global scatter) ScatterRanked(ranks, twiddled_keys, smem_storage.key_exchange); __syncthreads(); // Gather sorted keys from shared memory GatherShared(twiddled_keys, smem_storage.key_exchange); // Compute scatter offsets DecodeDigitOffsets(twiddled_keys, digit_offsets); // Untwiddle keys before outputting UnsignedBits keys[KEYS_PER_THREAD]; TwiddleKeys<KeyTraits<KeyType>::TwiddleOut>(twiddled_keys, keys); // Scatter keys to global memory Iterate<0, KEYS_PER_THREAD>::template ScatterGlobal<FULL_TILE>( keys, digit_offsets, d_out_keys, guarded_elements); } } /** * Truck along associated values */ template <bool FULL_TILE, typename _ValueType> __device__ __forceinline__ void GatherScatterValues( _ValueType values[KEYS_PER_THREAD], SizeT digit_offsets[KEYS_PER_THREAD], unsigned int ranks[KEYS_PER_THREAD], SizeT tex_offset, const SizeT &guarded_elements) { // Load tile of values LoadValues<FULL_TILE>(values, tex_offset, guarded_elements); if (SCATTER_STRATEGY == SCATTER_DIRECT) { // Scatter values directly to global memory Iterate<0, KEYS_PER_THREAD>::template ScatterGlobal<FULL_TILE>( values, ranks, digit_offsets, d_out_values, guarded_elements); } else if (SCATTER_STRATEGY == SCATTER_WARP_TWO_PHASE) { __syncthreads(); // Exchange values through shared memory for better write-coalescing ScatterRanked(ranks, values, smem_storage.value_exchange); __syncthreads(); // Use explicitly warp-aligned scattering of values from shared memory Iterate<0, SCATTER_PASSES>::AlignedScatterPass( smem_storage, smem_storage.value_exchange, d_out_values, guarded_elements); } else { __syncthreads(); // Exchange values through shared memory for better write-coalescing ScatterRanked(ranks, values, smem_storage.value_exchange); __syncthreads(); // Gather values from shared GatherShared(values, smem_storage.value_exchange); // Scatter to global memory Iterate<0, KEYS_PER_THREAD>::template ScatterGlobal<FULL_TILE>( values, digit_offsets, d_out_values, guarded_elements); } } /** * Truck along associated values (specialized for key-only sorting) */ template <bool FULL_TILE> __device__ __forceinline__ void GatherScatterValues( util::NullType values[KEYS_PER_THREAD], SizeT digit_offsets[KEYS_PER_THREAD], unsigned int ranks[KEYS_PER_THREAD], SizeT tex_offset, const SizeT &guarded_elements) {} /** * Process tile */ template <bool FULL_TILE> __device__ __forceinline__ void ProcessTile( SizeT tex_offset, const SizeT &guarded_elements = TILE_ELEMENTS) { // Per-thread tile data UnsignedBits keys[KEYS_PER_THREAD]; // Keys UnsignedBits twiddled_keys[KEYS_PER_THREAD]; // Twiddled (if necessary) keys ValueType values[KEYS_PER_THREAD]; // Values unsigned int ranks[KEYS_PER_THREAD]; // For each key, the local rank within the CTA SizeT digit_offsets[KEYS_PER_THREAD]; // For each key, the global scatter base offset of the corresponding digit // Load tile of keys and twiddle bits if necessary LoadKeys<FULL_TILE>(keys, tex_offset, guarded_elements); __syncthreads(); // Twiddle keys TwiddleKeys<KeyTraits<KeyType>::TwiddleIn>(keys, twiddled_keys); // Rank the twiddled keys CtaRadixRank::RankKeys( smem_storage.ranking_storage, twiddled_keys, ranks, smem_storage.digit_prefixes); __syncthreads(); // Update global scatter base offsets for each digit if ((CTA_THREADS == RADIX_DIGITS) || (threadIdx.x < RADIX_DIGITS)) { my_digit_offset -= smem_storage.digit_prefixes[threadIdx.x]; smem_storage.digit_offsets[threadIdx.x] = my_digit_offset; my_digit_offset += smem_storage.digit_prefixes[threadIdx.x + 1]; } __syncthreads(); // Scatter keys ScatterKeys<FULL_TILE>(twiddled_keys, digit_offsets, ranks, guarded_elements); // Gather/scatter values GatherScatterValues<FULL_TILE>(values, digit_offsets, ranks, tex_offset, guarded_elements); } /** * Process work range of tiles */ __device__ __forceinline__ void ProcessWorkRange( util::CtaWorkLimits<SizeT> &work_limits) { // Make sure we get a local copy of the cta's offset (work_limits may be in smem) SizeT tex_offset = smem_storage.tex_offset; // Process full tiles of tile_elements while (tex_offset < smem_storage.tex_offset_limit) { ProcessTile<true>(tex_offset); tex_offset += TILE_TEX_LOADS; } // Clean up last partial tile with guarded-io if (work_limits.guarded_elements) { ProcessTile<false>(tex_offset, work_limits.guarded_elements); } } }; } // namespace downsweep } // namespace radix_sort } // namespace b40c B40C_NS_POSTFIX
the_stack
namespace { /* * CUDA Kernel of the forward function for Node-Edge Multiplication(reduced on edge, designed for relative positional encoding). */ template <typename scalar_t> __global__ void node_mul_edge_forward_kernel(const int64_t* __restrict__ row, const int64_t* __restrict__ indptr, const int64_t* __restrict__ eid, const scalar_t* __restrict__ A, const scalar_t* __restrict__ B, scalar_t* __restrict__ y, const int d, const int n, const int h) { int i = blockIdx.x; int tx = threadIdx.x; if (i < n) { for (int j = indptr[i] + tx; j < indptr[i + 1]; j += blockDim.x) { for (int ko = 0; ko < h; ++ko) { scalar_t sum = 0; for (int ki = 0; ki < d; ++ki) { sum += A[(row[i] * h + ko) * d + ki] * B[eid[j] * d + ki]; } y[eid[j] * h + ko] = sum; } } } } /* * CUDA Kernel of the forward function for Masked Matrix Multiplication. (argument: csr format) */ template <typename scalar_t> __global__ void maskedmm_csr_forward_kernel(const int64_t* __restrict__ row, const int64_t* __restrict__ indptr, const int64_t* __restrict__ eid, const int64_t* __restrict__ indices, const scalar_t* __restrict__ A, const scalar_t* __restrict__ B, scalar_t* __restrict__ y, const int d, const int n, const int n_row, const int h) { int i = blockIdx.x; int tx = threadIdx.x; if (i < n_row) { for (int j = indptr[i] + tx; j < indptr[i + 1]; j += blockDim.x) { for (int ko = 0; ko < h; ++ko) { scalar_t sum = 0; for (int ki = 0; ki < d; ++ki) { sum += A[(row[i] * h + ko) * d + ki] * B[(ko * d + ki) * n + indices[j]]; } y[eid[j] * h + ko] = sum; } } } } /* * CUDA Kernel of the backward function for Node-Edge Multiplication(reduced on edge, designed for relative positional encoding). */ template <typename scalar_t> __global__ void node_mul_edge_backward_kernel_0(const int64_t* __restrict__ row, const int64_t* __restrict__ indptr, const int64_t* __restrict__ eid, const scalar_t* __restrict__ B, const scalar_t* __restrict__ dy, scalar_t* __restrict__ dA, const int d, const int n, const int h) { int tx = threadIdx.x; int i = blockIdx.x; if (i < n) { for (int j = tx; j < d * h; j += blockDim.x) { scalar_t sum = 0; for (int k = indptr[i]; k < indptr[i + 1]; ++k) sum += dy[eid[k] * h + j / d] * B[eid[k] * d + j % d]; dgl::AtomicAdd(dA + row[i] * d * h + j, sum); } } } /* * CUDA Kernel of the backward function for Node-Edge Multiplication(reduced on edge, designed for relative positional encoding). */ template <typename scalar_t> __global__ void node_mul_edge_backward_kernel_1(const int64_t* __restrict__ row, const int64_t* __restrict__ indptr, const int64_t* __restrict__ eid, const scalar_t* __restrict__ A, const scalar_t* __restrict__ dy, scalar_t* __restrict__ dB, const int d, const int n, const int h) { int tx = threadIdx.x; int i = blockIdx.x; if (i < n) { for (int j = tx; j < d; j += blockDim.x) { for (int k = indptr[i]; k < indptr[i + 1]; ++k) { scalar_t sum = 0; for (int ki = 0; ki < h; ++ki) { sum += dy[eid[k] * h + ki] * A[(row[i] * h + ki) * d + j]; } dB[eid[k] * d + j] = sum; } } } } /* * CUDA Kernel of the backward function for Masked Matrix Multiplication. (argument: csr format) */ template <typename scalar_t> __global__ void maskedmm_csr_backward_kernel(const int64_t* __restrict__ row, const int64_t* __restrict__ indptr, const int64_t* __restrict__ eid, const int64_t* __restrict__ indices, const scalar_t* __restrict__ B, const scalar_t* __restrict__ dy, scalar_t* __restrict__ dA, const int d, const int n, const int h) { int tx = threadIdx.x; int i = blockIdx.x; if (i < n) { for (int j = tx; j < d * h; j += blockDim.x) { scalar_t sum = 0; for (int k = indptr[i]; k < indptr[i + 1]; ++k) sum += dy[eid[k] * h + j / d] * B[indices[k] * d * h + j]; dgl::AtomicAdd(dA + row[i] * d * h + j, sum); } } } /* * CUDA Kernel of the forward function for Source Multiply Edge Function. * For `src_mul_edge` operation, the arguments are csr(column-major) representations. */ template <typename scalar_t> __global__ void vector_spmm_forward_kernel(const int64_t* __restrict__ row, const int64_t* __restrict__ indptr, const int64_t* __restrict__ eid, const int64_t* __restrict__ indices, const scalar_t* __restrict__ edata, const scalar_t* __restrict__ x, scalar_t* __restrict__ y, const int d, const int n, const int h) { int i = blockIdx.x; int tx = threadIdx.x; if (i < n) { for (int j = tx; j < d * h; j += blockDim.x) { scalar_t sum = 0; for (int k = indptr[i]; k < indptr[i + 1]; ++k) sum += edata[eid[k] * h + j / d] * x[indices[k] * d * h + j]; dgl::AtomicAdd(y + row[i] * d * h + j, sum); } } } /* * CUDA Kernel of the backward function for Source Multiply Edge Function. */ template <typename scalar_t> __global__ void vector_spmm_backward_kernel_0(const int64_t* __restrict__ row, const int64_t* __restrict__ indptr, const int64_t* __restrict__ eid, const int64_t* __restrict__ indices, const scalar_t* __restrict__ dy, const scalar_t* __restrict__ xt, scalar_t* __restrict__ dedata, const int d, const int n, const int n_row, const int h) { int i = blockIdx.x; int tx = threadIdx.x; if (i < n_row) { for (int j = indptr[i] + tx; j < indptr[i + 1]; j += blockDim.x) for (int ko = 0; ko < h; ++ko) { scalar_t sum = 0; for (int ki = 0; ki < d; ++ki) { sum += dy[(row[i] * h + ko) * d + ki] * xt[(ko * d + ki) * n + indices[j]]; } dedata[eid[j] * h + ko] = sum; } } } template <typename scalar_t> __global__ void vector_spmm_backward_kernel_1(const int64_t* __restrict__ row, const int64_t* __restrict__ indptr, const int64_t* __restrict__ eid, const int64_t* __restrict__ indices, const scalar_t* __restrict__ edata, const scalar_t* __restrict__ dy, scalar_t* __restrict__ dx, const int d, const int n_row, const int h) { int i = blockIdx.x; int tx = threadIdx.x; if (i < n_row) { for (int j = tx; j < d * h; j += blockDim.x) { scalar_t sum = 0; for (int k = indptr[i]; k < indptr[i + 1]; ++k) sum += edata[eid[k] * h + j / d] * dy[indices[k] * d * h + j]; dgl::AtomicAdd(dx + row[i] * d * h + j, sum); } } } /* * CUDA Kernel of forward function for Sparse Softmax * y = softmax(x), grouped by node. * indptr, eid: csr format */ template <typename scalar_t> __global__ void sparse_softmax_forward_kernel_max(const int64_t* __restrict__ row, const int64_t* __restrict__ indptr, const int64_t* __restrict__ eid, const scalar_t* __restrict__ x, scalar_t* __restrict__ max_val, const int n_row, const int h) { int i = blockIdx.x; int tx = threadIdx.x; if (i < n_row) { for (int k = indptr[i]; k < indptr[i + 1]; ++k) dgl::AtomicMax(max_val + row[i] * h + tx, x[eid[k] * h + tx]); } } template <typename scalar_t> __global__ void sparse_softmax_forward_kernel_minus_exp(const int64_t* __restrict__ row, const int64_t* __restrict__ indptr, const int64_t* __restrict__ eid, const scalar_t* __restrict__ x, const scalar_t* __restrict__ max_val, scalar_t* __restrict__ sum, scalar_t* __restrict__ y, const int n_row, const int h) { int i = blockIdx.x; int tx = threadIdx.x; if (i < n_row) { scalar_t max_v = max_val[row[i] * h + tx]; for (int k = indptr[i]; k < indptr[i + 1]; ++k) { scalar_t now = exp(x[eid[k] * h + tx] - max_v); y[eid[k] * h + tx] = now; dgl::AtomicAdd(sum + row[i] * h + tx, now); } } } template <typename scalar_t> __global__ void sparse_softmax_forward_kernel_norm(const int64_t* __restrict__ row, const int64_t* __restrict__ indptr, const int64_t* __restrict__ eid, const scalar_t* __restrict__ sum, scalar_t* __restrict__ y, const int n_row, const int h) { int i = blockIdx.x; int tx = threadIdx.x; if (i < n_row) { for (int k = indptr[i]; k < indptr[i + 1]; ++k) y[eid[k] * h + tx] /= sum[row[i] * h + tx]; } } /* * CUDA Kernel of backward function for Sparse Softmax. * indptr, eid: csr format */ template <typename scalar_t> __global__ void sparse_softmax_backward_kernel_0(const int64_t* __restrict__ row, const int64_t* __restrict__ indptr, const int64_t* __restrict__ eid, const scalar_t* __restrict__ dy, const scalar_t* __restrict__ y, scalar_t* __restrict__ aggre, const int n_row, const int h) { int i = blockIdx.x; int tx = threadIdx.x; if (i < n_row) { scalar_t sum = 0; for (int k = indptr[i]; k < indptr[i + 1]; ++k) { sum += dy[eid[k] * h + tx] * y[eid[k] * h + tx]; } dgl::AtomicAdd(aggre + row[i] * h + tx, sum); } } template <typename scalar_t> __global__ void sparse_softmax_backward_kernel_1(const int64_t* __restrict__ row, const int64_t* __restrict__ indptr, const int64_t* __restrict__ eid, const scalar_t* __restrict__ dy, const scalar_t* __restrict__ y, const scalar_t* __restrict__ aggre, scalar_t* __restrict__ dx, const int n_row, const int h) { int i = blockIdx.x; int tx = threadIdx.x; if (i < n_row) { for (int k = indptr[i]; k < indptr[i + 1]; ++k) { dx[eid[k] * h + tx] = dy[eid[k] * h + tx] * y[eid[k] * h + tx] - aggre[row[i] * h + tx] * y[eid[k] * h + tx] ; } } } } // End of namespace at::Tensor node_mul_edge_cuda_forward( const at::Tensor& row, const at::Tensor& indptr, const at::Tensor& eid, const at::Tensor& A, const at::Tensor& B) { // indptr: (n + 1); eid: (e); A: (n, d) or (n, h, d); B: (e, d); cudaSetDevice(indptr.get_device()); const auto e = eid.size(0); const auto n = row.size(0); const auto d = A.size(-1); const auto h = (A.dim() == 2) ? 1: A.size(1); auto y = (h == 1) ? at::zeros({e}, A.options()): at::zeros({e, h}, A.options()); const int threads = 32; const dim3 blocks(n); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES(A.type(), "node_mul_edge_cuda_forward", ([&] { node_mul_edge_forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>( row.data<int64_t>(), indptr.data<int64_t>(), eid.data<int64_t>(), A.data<scalar_t>(), B.data<scalar_t>(), y.data<scalar_t>(), d, n, h); })); THCudaCheck(cudaGetLastError()); return y; } // __global__ void maskedmm_csr_forward_kernel(int64_t* __restrict__ indptr, int64_t* __restrict__ eid, int64_t* __restrict__ indices, scalar_t* __restrict__ A, scalar_t* __restrict__ B, scalar_t* __restrict__ y, int d, int n) { at::Tensor maskedmm_csr_cuda_forward( const at::Tensor& row, const at::Tensor& indptr, const at::Tensor& eid, const at::Tensor& indices, const at::Tensor& A, const at::Tensor& B) { // indptr: (n + 1); eid, indices: (e); A, B: (n, d) or (n, h, d); cudaSetDevice(indptr.get_device()); const auto e = eid.size(0); const auto n = A.size(0); const auto n_row = row.size(0); const auto d = A.size(-1); const auto h = (A.dim() == 2) ? 1: A.size(1); auto y = (h == 1) ? at::zeros({e}, A.options()): at::zeros({e, h}, A.options()); const int threads = 32; const dim3 blocks(n_row); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto Bt = (B.dim() == 2) ? B.transpose(0, 1).contiguous(): B.permute({1, 2, 0}).contiguous(); AT_DISPATCH_FLOATING_TYPES(A.type(), "maskedmm_csr_cuda_forward", ([&] { maskedmm_csr_forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>( row.data<int64_t>(), indptr.data<int64_t>(), eid.data<int64_t>(), indices.data<int64_t>(), A.data<scalar_t>(), Bt.data<scalar_t>(), y.data<scalar_t>(), d, n, n_row, h); })); THCudaCheck(cudaGetLastError()); return y; } std::vector<at::Tensor> node_mul_edge_cuda_backward( const at::Tensor& row, const at::Tensor& indptr, const at::Tensor& eid, const at::Tensor& A, const at::Tensor& B, const at::Tensor& dy) { // indptr: (n + 1); eid: (e); dy: (e) or (e, h); A: (n, d) or (n, h, d); B: (e, d) cudaSetDevice(indptr.get_device()); const auto e = eid.size(0); const auto n = row.size(0); const auto d = A.size(-1); const auto h = (dy.dim() == 2) ? dy.size(1): 1; int threads = 128; const dim3 blocks(n); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto dA = at::zeros_like(A, A.options()); auto dB = at::zeros_like(B, B.options()); AT_DISPATCH_FLOATING_TYPES(A.type(), "node_mul_edge_cuda_backward_0", ([&] { node_mul_edge_backward_kernel_0<scalar_t><<<blocks, threads, 0, stream>>>( row.data<int64_t>(), indptr.data<int64_t>(), eid.data<int64_t>(), B.data<scalar_t>(), dy.data<scalar_t>(), dA.data<scalar_t>(), d, n, h); })); threads = d; AT_DISPATCH_FLOATING_TYPES(A.type(), "node_mul_edge_cuda_backward_1", ([&] { node_mul_edge_backward_kernel_1<scalar_t><<<blocks, threads, 0, stream>>>( row.data<int64_t>(), indptr.data<int64_t>(), eid.data<int64_t>(), A.data<scalar_t>(), dy.data<scalar_t>(), dB.data<scalar_t>(), d, n, h); })); THCudaCheck(cudaGetLastError()); return {dA, dB}; } // __global__ void maskedmm_csr_backward_kernel(int64_t* __restrict__ indptr_r, int64_t* __restrict__ eid_r, int64_t* __restrict__ indices_r, int64_t* __restrict__ indptr_c, int64_t* __restrict__ eid_c, int64_t* __restrict__ indices_c, scalar_t* __restrict__ A, scalar_t* __restrict__ B, scalar_t* __restrict__ dy, scalar_t* __restrict__ dA, scalar_t* __restrict__ dB, int d, int n) std::vector<at::Tensor> maskedmm_csr_cuda_backward( const at::Tensor& row, const at::Tensor& indptr_r, const at::Tensor& eid_r, const at::Tensor& indices_r, const at::Tensor& col, const at::Tensor& indptr_c, const at::Tensor& eid_c, const at::Tensor& indices_c, const at::Tensor& A, const at::Tensor& B, const at::Tensor& dy) { // indptr_r, indptr_c: (n + 1); eid_r, eid_c, indices_r, indices_c: (e); dy: (e) or (e, h); A, B: (n, d) or (n, h, d) cudaSetDevice(indptr_r.get_device()); const auto e = eid_r.size(0); const auto n_row = row.size(0); const auto d = A.size(-1); const auto h = (dy.dim() == 2) ? dy.size(1): 1; const int threads = 128; const dim3 blocks_row(n_row); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto dA = at::zeros_like(A, A.options()); auto dB = at::zeros_like(B, B.options()); AT_DISPATCH_FLOATING_TYPES(B.type(), "maskedmm_csr_cuda_backward", ([&] { maskedmm_csr_backward_kernel<scalar_t><<<blocks_row, threads, 0, stream>>>( row.data<int64_t>(), indptr_r.data<int64_t>(), eid_r.data<int64_t>(), indices_r.data<int64_t>(), B.data<scalar_t>(), dy.data<scalar_t>(), dA.data<scalar_t>(), d, n_row, h); })); THCudaCheck(cudaGetLastError()); const auto n_col = col.size(0); const dim3 blocks_col(n_col); AT_DISPATCH_FLOATING_TYPES(A.type(), "maskedmm_csr_cuda_backward", ([&] { maskedmm_csr_backward_kernel<scalar_t><<<blocks_col, threads, 0, stream>>>( col.data<int64_t>(), indptr_c.data<int64_t>(), eid_c.data<int64_t>(), indices_c.data<int64_t>(), A.data<scalar_t>(), dy.data<scalar_t>(), dB.data<scalar_t>(), d, n_col, h); })); return {dA, dB}; } at::Tensor sparse_softmax_cuda_forward( const at::Tensor& row, const at::Tensor& indptr, const at::Tensor& eid, const at::Tensor& x) { cudaSetDevice(indptr.get_device()); // indptr: (n + 1); eid: (e); x: (e) or (e, h); const auto n_row = row.size(0); const auto n = eid.size(0); // n <= e const auto h = (x.dim() == 2) ? x.size(1): 1; const dim3 threads(h); const dim3 blocks(n_row); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto sum = (h == 1) ? at::zeros({n}, x.options()): at::zeros({n, h}, x.options()); auto max_val = (h == 1) ? at::zeros({n}, x.options()): at::zeros({n, h}, x.options()); at::fill_(max_val, -1e9); const auto y = at::zeros_like(x, x.options()); AT_DISPATCH_FLOATING_TYPES(x.type(), "sparse_softmax_cuda_forward_0",([&] { sparse_softmax_forward_kernel_max<scalar_t><<<blocks, threads, 0, stream>>>( row.data<int64_t>(), indptr.data<int64_t>(), eid.data<int64_t>(), x.data<scalar_t>(), max_val.data<scalar_t>(), n_row, h); })); AT_DISPATCH_FLOATING_TYPES(x.type(), "sparse_softmax_cuda_forward_1",([&] { sparse_softmax_forward_kernel_minus_exp<scalar_t><<<blocks, threads, 0, stream>>>( row.data<int64_t>(), indptr.data<int64_t>(), eid.data<int64_t>(), x.data<scalar_t>(), max_val.data<scalar_t>(), sum.data<scalar_t>(), y.data<scalar_t>(), n_row, h); })); AT_DISPATCH_FLOATING_TYPES(x.type(), "sparse_softmax_cuda_forward_2",([&] { sparse_softmax_forward_kernel_norm<scalar_t><<<blocks, threads, 0, stream>>>( row.data<int64_t>(), indptr.data<int64_t>(), eid.data<int64_t>(), sum.data<scalar_t>(), y.data<scalar_t>(), n_row, h); })); THCudaCheck(cudaGetLastError()); return y; } at::Tensor sparse_softmax_cuda_backward( const at::Tensor& row, const at::Tensor& indptr, const at::Tensor& eid, const at::Tensor& y, const at::Tensor& dy) { cudaSetDevice(indptr.get_device()); // indptr: (n + 1); eid: (e); y: (e) or (e, h); dy: (e) or (e, h); const auto n_row = row.size(0); const auto n = eid.size(0); // n <= e const auto h = (dy.dim() == 2) ? dy.size(1): 1; const dim3 threads(h); const dim3 blocks(n_row); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto aggre = (h == 1) ? at::zeros({n}, dy.options()): at::zeros({n, h}, dy.options()); const auto dx = at::zeros_like(dy, dy.options()); AT_DISPATCH_FLOATING_TYPES(y.type(), "sparse_softmax_cuda_backward_0", ([&] { sparse_softmax_backward_kernel_0<scalar_t><<<blocks, threads, 0, stream>>>( row.data<int64_t>(), indptr.data<int64_t>(), eid.data<int64_t>(), dy.data<scalar_t>(), y.data<scalar_t>(), aggre.data<scalar_t>(), n_row, h); })); AT_DISPATCH_FLOATING_TYPES(y.type(), "sparse_softmax_cuda_backward_1", ([&] { sparse_softmax_backward_kernel_1<scalar_t><<<blocks, threads, 0, stream>>>( row.data<int64_t>(), indptr.data<int64_t>(), eid.data<int64_t>(), dy.data<scalar_t>(), y.data<scalar_t>(), aggre.data<scalar_t>(), dx.data<scalar_t>(), n_row, h); })); THCudaCheck(cudaGetLastError()); return dx; } at::Tensor vector_spmm_cuda_forward( const at::Tensor& row, const at::Tensor& indptr, const at::Tensor& eid, const at::Tensor& indices, const at::Tensor& edata, const at::Tensor& x) { // indptr: (n + 1); eid, indices: (e); edata: (e) or (e, h); x: (n, d) or (n, h, d); cudaSetDevice(indptr.get_device()); const auto n = row.size(0); const auto h = (edata.dim() == 2) ? edata.size(1): 1; const auto d = x.size(-1); const int threads = 32; const dim3 blocks(n); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const auto y = at::zeros_like(x, x.options()); AT_DISPATCH_FLOATING_TYPES(x.type(), "vector_spmm_forward", ([&] { vector_spmm_forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>( row.data<int64_t>(), indptr.data<int64_t>(), eid.data<int64_t>(), indices.data<int64_t>(), edata.data<scalar_t>(), x.data<scalar_t>(), y.data<scalar_t>(), d, n, h); })); THCudaCheck(cudaGetLastError()); return y; } std::vector<at::Tensor> vector_spmm_cuda_backward( const at::Tensor& row, const at::Tensor& indptr, const at::Tensor& eid, const at::Tensor& indices, const at::Tensor& col, const at::Tensor& indptr_t, const at::Tensor& eid_t, const at::Tensor& indices_t, const at::Tensor& edata, const at::Tensor& dy, const at::Tensor& x) { // indptr: (n + 1); eid, indices: (e); edata: (e) or (e, h); dy, x: (n, d) or (n, h, d); cudaSetDevice(indptr.get_device()); const auto n_row = row.size(0); const auto n_col = col.size(0); const auto n = x.size(0); const auto h = (edata.dim() == 2) ? edata.size(1): 1; const auto d = x.size(-1); int threads = 32; const dim3 blocks(n_row); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const auto xt = (h == 1) ? x.transpose(0, 1).contiguous(): x.permute({1, 2, 0}).contiguous(); const auto dx = at::zeros_like(x, x.options()); const auto dedata = at::zeros_like(edata, edata.options()); AT_DISPATCH_FLOATING_TYPES(x.type(), "vector_spmm_backward_0", ([&] { vector_spmm_backward_kernel_0<scalar_t><<<blocks, threads, 0, stream>>>( row.data<int64_t>(), indptr.data<int64_t>(), eid.data<int64_t>(), indices.data<int64_t>(), dy.data<scalar_t>(), xt.data<scalar_t>(), dedata.data<scalar_t>(), d, n, n_row, h); })); threads = 128; AT_DISPATCH_FLOATING_TYPES(x.type(), "vector_spmm_backward_1", ([&] { vector_spmm_backward_kernel_1<scalar_t><<<blocks, threads, 0, stream>>>( col.data<int64_t>(), indptr_t.data<int64_t>(), eid_t.data<int64_t>(), indices_t.data<int64_t>(), edata.data<scalar_t>(), dy.data<scalar_t>(), dx.data<scalar_t>(), d, n_col, h); })); THCudaCheck(cudaGetLastError()); return {dedata, dx}; }
the_stack
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #include "options.h" #include "scoped_ptrs.h" using namespace aocl_utils; #define MANUAL_VECTOR 8 #define NUM_THREADS_PER_WG 64 #define BLOOM_1 5 #define BLOOM_2 0x7FFFF #define BLOOM_SIZE 14 #define docEndingTag 0xFFFFFFFF // Params uint block_size = 64; uint total_num_docs = 256*1024; uint total_doc_size = 0; uint total_doc_size_no_padding = 0; // Host Buffers scoped_aligned_ptr<uint> h_docWordFrequencies_dimm1; scoped_aligned_ptr<uint> h_docWordFrequencies_dimm2; scoped_aligned_ptr<ulong> h_profileWeights; scoped_aligned_ptr<ulong> h_docInfo; scoped_aligned_ptr<uint> h_isWordInProfileHash; scoped_aligned_ptr<uint> h_startingDocID; scoped_aligned_ptr<uint> h_numItemsPerThread; scoped_aligned_ptr<ulong> h_profileScore; scoped_aligned_ptr<uint> h_docSizes; static uint m_z = 1; static uint m_w = 1; static uint rand_desh() { m_z = 36969 * (m_z & 65535) + (m_z >> 16); m_w = 18000 * (m_w & 65535) + (m_w >> 16); return (m_z << 16) + m_w; } double sampleNormal() { double u = ((double) rand() / (RAND_MAX)) * 2 - 1; double v = ((double) rand() / (RAND_MAX)) * 2 - 1; double r = u * u + v * v; if (r == 0 || r > 1) return sampleNormal(); double c = sqrt(-2 * log(r) / r); return u * c; } #define DOC_LEN_SIGMA 100 #define AVG_DOC_LEN 350 uint get_doc_length() { int len = sampleNormal() * DOC_LEN_SIGMA + AVG_DOC_LEN; if (len < 10) { len = 10; } // Arbitray lower bound; return (uint) len; } // High-resolution timer. double getCurrentTimestamp() { #ifdef _WIN32 // Windows // Use the high-resolution performance counter. static LARGE_INTEGER ticks_per_second = {}; if(ticks_per_second.QuadPart == 0) { // First call - get the frequency. QueryPerformanceFrequency(&ticks_per_second); } LARGE_INTEGER counter; QueryPerformanceCounter(&counter); double seconds = double(counter.QuadPart) / double(ticks_per_second.QuadPart); return seconds; #else // Linux timespec a; clock_gettime(CLOCK_MONOTONIC, &a); return (double(a.tv_nsec) * 1.0e-9) + double(a.tv_sec); #endif } void setupData() { h_startingDocID.reset( total_num_docs ); h_numItemsPerThread.reset( total_num_docs ); h_profileScore.reset( total_num_docs ); h_docInfo.reset( total_num_docs ); h_docSizes.reset( total_num_docs ); total_doc_size = 0; total_doc_size_no_padding = 0; for (uint i=0; i<total_num_docs; i++) { uint unpadded_size = get_doc_length(); uint size = unpadded_size & (~(2*block_size-1)); if (unpadded_size & ((2*block_size-1))) size += 2*block_size; // Multiple of block_size h_startingDocID[i] = total_doc_size/2; h_numItemsPerThread[i] = size / (2*block_size); ulong start_line = total_doc_size / (2*block_size); ulong end_line = start_line + size / (2*block_size) - 1; total_doc_size += size; total_doc_size_no_padding += unpadded_size; h_docSizes[i] = unpadded_size; h_profileScore[i] = -1; h_docInfo[i] = (start_line << 32) | end_line; } h_isWordInProfileHash.reset( (1L << BLOOM_SIZE) ); h_docWordFrequencies_dimm1.reset( total_doc_size/2 ); h_docWordFrequencies_dimm2.reset( total_doc_size/2 ); printf("Creating Documents total_terms=%d (no_pad=%d)\n", total_doc_size, total_doc_size_no_padding); for (uint i=0; i<total_doc_size/2; i++) { h_docWordFrequencies_dimm1[i] = docEndingTag; h_docWordFrequencies_dimm2[i] = docEndingTag; } for (uint doci=0; doci < total_num_docs; doci++) { uint start = h_startingDocID[doci]; uint size = h_docSizes[doci]; for (uint i = 0; i < size/2; i++) { uint term = (rand_desh()%((1L << 24)-1)); uint freq = (rand_desh()%254)+1; h_docWordFrequencies_dimm1[start + i] = (term << 8) | freq; term = (rand_desh()%((1L << 24)-1)); freq = (rand_desh()%254)+1; h_docWordFrequencies_dimm2[start + i] = (term << 8) | freq; } if (size%2) { uint term = (rand_desh()%((1L << 24)-1)); uint freq = (rand_desh()%254)+1; h_docWordFrequencies_dimm1[start + size/2] = (term << 8) | freq; } } h_profileWeights.reset( (1L << 24) ); for (uint i=0; i<(1L << BLOOM_SIZE); i++) { h_isWordInProfileHash[i] = 0x0; } printf("Creating Profile\n"); for (uint i=0; i<(1L << 24); i++) { h_profileWeights[i] = 0; } for (uint i=0; i<16384; i++) { uint entry = (rand_desh()%(1<<24)); h_profileWeights[entry] = 10; uint hash1 = entry >> BLOOM_1; //this gives me the top 16 bits of the 24bit word id h_isWordInProfileHash[ hash1 >> 5 ] |= 1 << (hash1 & 0x1f); uint hash2 = entry & BLOOM_2; //this gives me the bottom 16 bits of the 24bit word id h_isWordInProfileHash[ hash2 >> 5 ] |= 1 << (hash2 & 0x1f); } } void runOnCPU() { // go through each document in turn, and compute the score scoped_aligned_ptr<ulong> cpu_profileScore; cpu_profileScore.reset( total_num_docs ); uint total = 0; uint falsies = 0; for (uint doci=0; doci < total_num_docs; doci++) { cpu_profileScore[doci] = 0.0; uint start = h_startingDocID[doci]; uint size = h_docSizes[doci]; for (uint i = 0; i < size/2 + (size%2); i++) { uint curr_entry = h_docWordFrequencies_dimm1[start + i]; uint frequency = curr_entry & 0x00ff; uint word_id = curr_entry >> 8; uint hash1 = word_id >> BLOOM_1; //this gives me the top 16 bits of the 24bit word id bool inh1 = h_isWordInProfileHash[ hash1 >> 5 ] & ( 1 << (hash1 & 0x1f)); uint hash2 = word_id & BLOOM_2; //this gives me the bottom 16 bits of the 24bit word id bool inh2 = h_isWordInProfileHash[ hash2 >> 5 ] & ( 1 << (hash2 & 0x1f)); if (inh1 && inh2) { total++; if (h_profileWeights[word_id] == 0) falsies++; cpu_profileScore[doci] += h_profileWeights[word_id] * (ulong)frequency; } } for (uint i = 0; i < size/2; i++) { uint curr_entry = h_docWordFrequencies_dimm2[start + i]; uint frequency = curr_entry & 0x00ff; uint word_id = curr_entry >> 8; uint hash1 = word_id >> BLOOM_1; //this gives me the top 16 bits of the 24bit word id bool inh1 = h_isWordInProfileHash[ hash1 >> 5 ] & ( 1 << (hash1 & 0x1f)); uint hash2 = word_id & BLOOM_2; //this gives me the bottom 16 bits of the 24bit word id bool inh2 = h_isWordInProfileHash[ hash2 >> 5 ] & ( 1 << (hash2 & 0x1f)); if (inh1 && inh2) { total++; if (h_profileWeights[word_id] == 0) falsies++; cpu_profileScore[doci] += h_profileWeights[word_id] * (ulong)frequency; } } } printf( "total_access = %d , falsies = %d, percentage = %f hit= %g\n", \ total, falsies, total * 1.0f / total_doc_size, (total-falsies)*1.0f/total_doc_size ); // compare the final scores for (uint doci = 0; doci < total_num_docs; doci++) { if (cpu_profileScore[doci] != h_profileScore[doci]) { printf("FAILED\n : doc[%d] score: CPU = %lu, Device = %lu\n", \ doci, cpu_profileScore[doci], h_profileScore[doci]); return; } } printf( "Verification: PASS\n" ); } __device__ ulong mulfp( ulong weight, uint freq ) { uint part1 = weight & 0xFFFFF; // lower 24-bits of weight uint part2 = (weight >> 24) & 0xFFFF; // next 16-bits uint res1 = part1 * freq; uint res2 = part2 * freq; return (ulong)res1 + (((ulong)res2) << 24); } __global__ void compute ( const uint* docWordFrequencies_dimm1, const uint* docWordFrequencies_dimm2, const ulong* profileWeights_dimm1, const ulong* profileWeights_dimm2, const uint* isWordInProfileHash, uint* profileScorePerGroup_highbits_dimm1, uint* profileScorePerGroup_lowbits_dimm2 ) { uint curr_entry[MANUAL_VECTOR]; uint word_id[MANUAL_VECTOR]; uint freq[MANUAL_VECTOR]; uint hash1[MANUAL_VECTOR]; uint hash2[MANUAL_VECTOR]; bool is_end[MANUAL_VECTOR]; bool make_access[MANUAL_VECTOR]; __shared__ ulong partial[NUM_THREADS_PER_WG/MANUAL_VECTOR]; int gid = blockIdx.x * blockDim.x + threadIdx.x; ulong sum = 0; //#pragma unroll for (uint i=0; i<MANUAL_VECTOR; i++) { curr_entry[i] = docWordFrequencies_dimm1[gid*MANUAL_VECTOR + i]; freq[i] = curr_entry[i] & 0xff; word_id[i] = curr_entry[i] >> 8; is_end[i] = curr_entry[i] == docEndingTag; hash1[i] = word_id[i] >> BLOOM_1; hash2[i] = word_id[i] & BLOOM_2; make_access[i] = !is_end[i] && ((isWordInProfileHash[ hash1[i] >> 5 ] >> (hash1[i] & 0x1f)) & 0x1) && ((isWordInProfileHash[ hash2[i] >> 5 ] >> (hash2[i] & 0x1f)) & 0x1); if (make_access[i]) { sum += mulfp(profileWeights_dimm1[word_id[i]],freq[i]); } } //#pragma unroll for (uint i=0; i<MANUAL_VECTOR; i++) { curr_entry[i] = docWordFrequencies_dimm2[gid*MANUAL_VECTOR + i]; freq[i] = curr_entry[i] & 0xff; word_id[i] = curr_entry[i] >> 8; is_end[i] = curr_entry[i] == docEndingTag; hash1[i] = word_id[i] >> BLOOM_1; hash2[i] = word_id[i] & BLOOM_2; make_access[i] = !is_end[i] && ((isWordInProfileHash[ hash1[i] >> 5 ] >> (hash1[i] & 0x1f)) & 0x1) && ((isWordInProfileHash[ hash2[i] >> 5 ] >> (hash2[i] & 0x1f)) & 0x1); if (make_access[i]) { sum += mulfp(profileWeights_dimm2[word_id[i]],freq[i]); } } partial[threadIdx.x] = sum; __syncthreads(); if (threadIdx.x == 0) { ulong4 res= *(ulong4*)&partial[0]; ulong4 res2= *(ulong4*)&partial[4]; ulong final_result = res.x + res.y + res.z + res.w + res2.x + res2.y + res2.z + res2.w; profileScorePerGroup_highbits_dimm1[blockIdx.x] = (uint) (final_result >> 32); profileScorePerGroup_lowbits_dimm2[blockIdx.x] = (uint) (final_result & 0xFFFFFFFF); } } __global__ void reduction( const ulong* docInfo, const uint* partial_highbits_dimm1, const uint* partial_lowbits_dimm2, ulong* result) { int gid = blockIdx.x * blockDim.x + threadIdx.x; ulong info = docInfo[gid]; unsigned start = info >> 32; unsigned end = info & 0xFFFFFFFF; ulong total = 0; #pragma unroll 2 for (unsigned i=start; i<=end; i++) { ulong upper = partial_highbits_dimm1[i]; ulong lower = partial_lowbits_dimm2[i]; ulong sum = (upper << 32) | lower; total += sum; } result[gid] = total; } int main(int argc, char** argv) { Options options(argc, argv); // Optional argument to specify the problem size. if(options.has("n")) { total_num_docs = options.get<uint>("n"); } printf("Total number of documents: %u\n", total_num_docs); srand(2); printf("RAND_MAX: %d\n", RAND_MAX); printf("Allocating and setting up data\n"); setupData(); printf("Setting up HIP\n"); size_t local_size = (block_size / MANUAL_VECTOR); size_t global_size = total_doc_size / 2 / MANUAL_VECTOR / local_size; size_t local_size_reduction = block_size; size_t global_size_reduction = total_num_docs / block_size; uint* d_docWordFrequencies_dimm1; uint* d_docWordFrequencies_dimm2; uint* d_partialSums_dimm1; uint* d_partialSums_dimm2; ulong* d_profileWeights_dimm1; ulong* d_profileWeights_dimm2; uint* d_isWordInProfileHash; ulong* d_docInfo; ulong* d_profileScore; hipMalloc((void**)&d_docWordFrequencies_dimm1, sizeof(uint) * total_doc_size/2); hipMalloc((void**)&d_docWordFrequencies_dimm2, sizeof(uint) * total_doc_size/2); hipMalloc((void**)&d_partialSums_dimm1, sizeof(uint) * total_doc_size/(2*block_size)); hipMalloc((void**)&d_partialSums_dimm2, sizeof(uint) * total_doc_size/(2*block_size)); hipMalloc((void**)&d_profileWeights_dimm1, sizeof(ulong) * (1L << 24)); hipMalloc((void**)&d_profileWeights_dimm2, sizeof(ulong) * (1L << 24)); hipMalloc((void**)&d_isWordInProfileHash, sizeof(uint) * (1L << BLOOM_SIZE)); hipMalloc((void**)&d_docInfo, sizeof(ulong) * total_num_docs); hipMalloc((void**)&d_profileScore, sizeof(ulong) * total_num_docs); hipMemcpy(d_docWordFrequencies_dimm1, h_docWordFrequencies_dimm1, sizeof(uint) * total_doc_size/2, hipMemcpyHostToDevice); hipMemcpy(d_docWordFrequencies_dimm2, h_docWordFrequencies_dimm2, sizeof(uint) * total_doc_size/2, hipMemcpyHostToDevice); hipMemcpy(d_profileWeights_dimm1, h_profileWeights, sizeof(ulong) * (1L << 24), hipMemcpyHostToDevice); hipMemcpy(d_profileWeights_dimm2, h_profileWeights, sizeof(ulong) * (1L << 24), hipMemcpyHostToDevice); hipMemcpy(d_isWordInProfileHash, h_isWordInProfileHash, sizeof(uint) * (1L << BLOOM_SIZE), hipMemcpyHostToDevice); hipMemcpy(d_docInfo, h_docInfo, sizeof(ulong) * total_num_docs, hipMemcpyHostToDevice); const double start_time = getCurrentTimestamp(); for (int i=0; i<100; i++) { hipLaunchKernelGGL(compute, global_size, local_size, 0, 0, d_docWordFrequencies_dimm1, d_docWordFrequencies_dimm2, d_profileWeights_dimm1, d_profileWeights_dimm2, d_isWordInProfileHash, d_partialSums_dimm1, d_partialSums_dimm2); hipLaunchKernelGGL(reduction, global_size_reduction, local_size_reduction, 0, 0, d_docInfo, d_partialSums_dimm1, d_partialSums_dimm2, d_profileScore); } hipDeviceSynchronize(); const double end_time = getCurrentTimestamp(); double kernelExecutionTime = (end_time - start_time)/100; printf("======================================================\n"); printf("Kernel Time = %f ms (averaged over 100 times)\n", kernelExecutionTime * 1000.0f ); printf("Throughput = %f\n", total_doc_size_no_padding / kernelExecutionTime / 1.0e+6f ); hipMemcpy(h_profileScore, d_profileScore, sizeof(ulong) * total_num_docs, hipMemcpyDeviceToHost); hipFree(d_docWordFrequencies_dimm1); hipFree(d_docWordFrequencies_dimm2); hipFree(d_partialSums_dimm1); hipFree(d_partialSums_dimm2); hipFree(d_profileWeights_dimm1); hipFree(d_profileWeights_dimm2); hipFree(d_isWordInProfileHash); hipFree(d_docInfo); hipFree(d_profileScore); printf("Done\n"); runOnCPU(); }
the_stack
typedef std::chrono::high_resolution_clock::time_point TimePoint; struct Params { size_t natlig; size_t natpro; size_t ntypes; size_t nposes; std::vector<Atom> protein; std::vector<Atom> ligand; std::vector<FFParams> forcefield; std::array<std::vector<float>, 6> poses; size_t iterations; // XXX bring this back once all SYCL implementations implement 2020 spec // size_t posesPerWI; size_t wgSize; std::string deckDir; friend std::ostream &operator<<(std::ostream &os, const Params &params) { os << "natlig: " << params.natlig << "\n" << "natpro: " << params.natpro << "\n" << "ntypes: " << params.ntypes << "\n" << "nposes: " << params.nposes << "\n" << "iterations: " << params.iterations << "\n" << "posesPerWI: " << NUM_TD_PER_THREAD << "\n" << "wgSize: " << params.wgSize << "\n"; return os; } }; __global__ void fasten_main( // size_t posesPerWI, const size_t ntypes, const size_t nposes, const size_t natlig, const size_t natpro, const Atom *__restrict protein_molecule, const Atom *__restrict ligand_molecule, const float *__restrict transforms_0, const float *__restrict transforms_1, const float *__restrict transforms_2, const float *__restrict transforms_3, const float *__restrict transforms_4, const float *__restrict transforms_5, const FFParams *__restrict forcefield, float *__restrict etotals); double elapsedMillis( const TimePoint &start, const TimePoint &end){ auto elapsedNs = static_cast<double>( std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count()); return elapsedNs * 1e-6; } void printTimings(const Params &params, double millis) { // Average time per iteration double ms = (millis / params.iterations); double runtime = ms * 1e-3; // Compute FLOP/s double ops_per_wg = NUM_TD_PER_THREAD * 27 + params.natlig * (3 + NUM_TD_PER_THREAD * 18 + params.natpro * (11 + NUM_TD_PER_THREAD * 30)) + NUM_TD_PER_THREAD; double total_ops = ops_per_wg * ((double) params.nposes / NUM_TD_PER_THREAD); double flops = total_ops / runtime; double gflops = flops / 1e9; double interactions = (double) params.nposes * (double) params.natlig * (double) params.natpro; double interactions_per_sec = interactions / runtime; // Print stats std::cout.precision(3); std::cout << std::fixed; std::cout << "- Kernel time: " << (millis) << " ms\n"; std::cout << "- Average time: " << ms << " ms\n"; std::cout << "- Interactions/s: " << (interactions_per_sec / 1e9) << " billion\n"; std::cout << "- GFLOP/s: " << gflops << "\n"; } template<typename T> std::vector<T> readNStruct(const std::string &path) { std::fstream s(path, std::ios::binary | std::ios::in); if (!s.good()) { throw std::invalid_argument("Bad file: " + path); } s.ignore(std::numeric_limits<std::streamsize>::max()); auto len = s.gcount(); s.clear(); s.seekg(0, std::ios::beg); std::vector<T> xs(len / sizeof(T)); s.read(reinterpret_cast<char *>(xs.data()), len); s.close(); return xs; } Params loadParameters(const std::vector<std::string> &args) { Params params = {}; // Defaults params.iterations = DEFAULT_ITERS; params.nposes = DEFAULT_NPOSES; params.wgSize = DEFAULT_WGSIZE; params.deckDir = DATA_DIR; // params.posesPerWI = DEFAULT_PPWI; const auto readParam = [&args](size_t &current, const std::string &arg, const std::initializer_list<std::string> &matches, const std::function<void(std::string)> &handle) { if (matches.size() == 0) return false; if (std::find(matches.begin(), matches.end(), arg) != matches.end()) { if (current + 1 < args.size()) { current++; handle(args[current]); } else { std::cerr << "["; for (const auto &m : matches) std::cerr << m; std::cerr << "] specified but no value was given" << std::endl; std::exit(EXIT_FAILURE); } return true; } return false; }; const auto bindInt = [](const std::string &param, size_t &dest, const std::string &name) { try { auto parsed = std::stol(param); if (parsed < 0) { std::cerr << "positive integer required for <" << name << ">: `" << parsed << "`" << std::endl; std::exit(EXIT_FAILURE); } dest = parsed; } catch (...) { std::cerr << "malformed value, integer required for <" << name << ">: `" << param << "`" << std::endl; std::exit(EXIT_FAILURE); } }; for (size_t i = 0; i < args.size(); ++i) { using namespace std::placeholders; const auto arg = args[i]; if (readParam(i, arg, {"--iterations", "-i"}, std::bind(bindInt, _1, std::ref(params.iterations), "iterations"))) continue; if (readParam(i, arg, {"--numposes", "-n"}, std::bind(bindInt, _1, std::ref(params.nposes), "numposes"))) continue; // if (readParam(i, arg, {"--posesperwi", "-p"}, std::bind(bindInt, _1, std::ref(params.posesPerWI), "posesperwi"))) continue; if (readParam(i, arg, {"--wgsize", "-w"}, std::bind(bindInt, _1, std::ref(params.wgSize), "wgsize"))) continue; if (readParam(i, arg, {"--deck"}, [&](const std::string &param) { params.deckDir = param; })) continue; if (arg == "--help" || arg == "-h") { std::cout << "\n"; std::cout << "Usage: ./main [OPTIONS]\n\n" << "Options:\n" << " -h --help Print this message\n" << " -i --iterations I Repeat kernel I times (default: " << DEFAULT_ITERS << ")\n" << " -n --numposes N Compute energies for N poses (default: " << DEFAULT_NPOSES << ")\n" // << " -p --poserperwi PPWI Compute PPWI poses per work-item (default: " << DEFAULT_PPWI << ")\n" << " -w --wgsize WGSIZE Run with work-group size WGSIZE using nd_range, set to 0 for plain range (default: " << DEFAULT_WGSIZE << ")\n" << " --deck DECK Use the DECK directory as input deck (default: " << DATA_DIR << ")" << std::endl; std::exit(EXIT_SUCCESS); } std::cout << "Unrecognized argument '" << arg << "' (try '--help')" << std::endl; std::exit(EXIT_FAILURE); } params.ligand = readNStruct<Atom>(params.deckDir + FILE_LIGAND); params.natlig = params.ligand.size(); params.protein = readNStruct<Atom>(params.deckDir + FILE_PROTEIN); params.natpro = params.protein.size(); params.forcefield = readNStruct<FFParams>(params.deckDir + FILE_FORCEFIELD); params.ntypes = params.forcefield.size(); auto poses = readNStruct<float>(params.deckDir + FILE_POSES); if (poses.size() / 6 != params.nposes) { throw std::invalid_argument("Bad poses: " + std::to_string(poses.size())); } for (size_t i = 0; i < 6; ++i) { params.poses[i].resize(params.nposes); std::copy( std::next(poses.cbegin(), i * params.nposes), std::next(poses.cbegin(), i * params.nposes + params.nposes), params.poses[i].begin()); } return params; } std::vector<float> runKernel(Params params) { std::vector<float> energies(params.nposes); Atom *protein; hipMalloc((void**)&protein, params.natpro*sizeof(Atom)); hipMemcpy(protein, params.protein.data(), params.natpro*sizeof(Atom), hipMemcpyHostToDevice); Atom *ligand; hipMalloc((void**)&ligand, params.natlig*sizeof(Atom)); hipMemcpy(ligand, params.ligand.data(), params.natlig*sizeof(Atom), hipMemcpyHostToDevice); float *transforms_0; hipMalloc((void**)&transforms_0, params.nposes*sizeof(float)); hipMemcpy(transforms_0, params.poses[0].data(), params.nposes*sizeof(float), hipMemcpyHostToDevice); float *transforms_1; hipMalloc((void**)&transforms_1, params.nposes*sizeof(float)); hipMemcpy(transforms_1, params.poses[1].data(), params.nposes*sizeof(float), hipMemcpyHostToDevice); float *transforms_2; hipMalloc((void**)&transforms_2, params.nposes*sizeof(float)); hipMemcpy(transforms_2, params.poses[2].data(), params.nposes*sizeof(float), hipMemcpyHostToDevice); float *transforms_3; hipMalloc((void**)&transforms_3, params.nposes*sizeof(float)); hipMemcpy(transforms_3, params.poses[3].data(), params.nposes*sizeof(float), hipMemcpyHostToDevice); float *transforms_4; hipMalloc((void**)&transforms_4, params.nposes*sizeof(float)); hipMemcpy(transforms_4, params.poses[4].data(), params.nposes*sizeof(float), hipMemcpyHostToDevice); float *transforms_5; hipMalloc((void**)&transforms_5, params.nposes*sizeof(float)); hipMemcpy(transforms_5, params.poses[5].data(), params.nposes*sizeof(float), hipMemcpyHostToDevice); FFParams *forcefield; hipMalloc((void**)&forcefield, params.ntypes*sizeof(FFParams)); hipMemcpy(forcefield, params.forcefield.data(), params.ntypes*sizeof(FFParams), hipMemcpyHostToDevice); float *results; hipMalloc((void**)&results, params.nposes*sizeof(float)); size_t global = ceil((params.nposes) / static_cast<double> (NUM_TD_PER_THREAD)); global = ceil(static_cast<double> (global) / params.wgSize); dim3 grid (global); dim3 block (params.wgSize); // warmup hipLaunchKernelGGL(fasten_main, dim3(grid), dim3(block), params.ntypes * sizeof(FFParams) , 0, params.ntypes, params.nposes, params.natlig, params.natpro, protein, ligand, transforms_0, transforms_1, transforms_2, transforms_3, transforms_4, transforms_5, forcefield, results); auto kernelStart = std::chrono::high_resolution_clock::now(); for (size_t i = 0; i < params.iterations; ++i) { hipLaunchKernelGGL(fasten_main, dim3(grid), dim3(block), params.ntypes * sizeof(FFParams) , 0, params.ntypes, params.nposes, params.natlig, params.natpro, protein, ligand, transforms_0, transforms_1, transforms_2, transforms_3, transforms_4, transforms_5, forcefield, results); } hipDeviceSynchronize(); auto kernelEnd = std::chrono::high_resolution_clock::now(); hipMemcpy(energies.data(), results, params.nposes*sizeof(float), hipMemcpyDeviceToHost); printTimings(params, elapsedMillis(kernelStart, kernelEnd)); hipFree(protein); hipFree(ligand); hipFree(transforms_0); hipFree(transforms_1); hipFree(transforms_2); hipFree(transforms_3); hipFree(transforms_4); hipFree(transforms_5); hipFree(forcefield); hipFree(results); return energies; } int main(int argc, char *argv[]) { auto args = std::vector<std::string>(argv + 1, argv + argc); auto params = loadParameters(args); std::cout << "Poses : " << params.nposes << std::endl; std::cout << "Iterations: " << params.iterations << std::endl; std::cout << "Ligands : " << params.natlig << std::endl; std::cout << "Proteins : " << params.natpro << std::endl; std::cout << "Deck : " << params.deckDir << std::endl; std::cout << "WG : " << params.wgSize << std::endl; auto energies = runKernel(params); #ifdef DUMP // Keep the output format consistent with the original version FILE *output = fopen("result.out", "w+"); printf("\nEnergies\n"); for (size_t i = 0; i < params.nposes; i++) { fprintf(output, "%7.2f\n", energies[i]); if (i < 16) printf("%7.2f\n", energies[i]); } fclose(output); #endif // Validate energies std::ifstream refEnergies(params.deckDir + FILE_REF_ENERGIES); size_t nRefPoses = params.nposes; if (params.nposes > REF_NPOSES) { std::cout << "Only validating the first " << REF_NPOSES << " poses.\n"; nRefPoses = REF_NPOSES; } std::string line; float maxdiff = 0.0f; for (size_t i = 0; i < nRefPoses; i++) { if (!std::getline(refEnergies, line)) { throw std::logic_error("ran out of ref energies lines to verify"); } float e = std::stof(line); if (std::fabs(e) < 1.f && std::fabs(energies[i]) < 1.f) continue; float diff = std::fabs(e - energies[i]) / e; if (diff > maxdiff) maxdiff = diff; } std::cout << "Largest difference was " << std::setprecision(3) << (100 * maxdiff) << "%.\n\n"; // Expect numbers to be accurate to 2 decimal places refEnergies.close(); return 0; }
the_stack
#include "trt_engine/trt_network_crt/plugins/adaptive_pooling_plugin/adaptive_pooling_plugin.h" #include <cuda_fp16.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <algorithm> #include <cstdint> #include <limits> #include "trt_engine/trt_network_crt/plugins/common/half_ext.cuh" #include "trt_engine/trt_network_crt/plugins/common/trt_tensor_info.h" FWD_TRT_NAMESPACE_BEGIN __device__ inline int start_index(int a, int b, int c) { return (int)floorf((float)(a * c) / b); } __device__ inline int end_index(int a, int b, int c) { return (int)ceilf((float)((a + 1) * c) / b); } // 4d tensor B x D x H x W /* * Description: * this function adaptively maxpools an input 4D tensor along dimensions 2 * and 3 4D input, 4D output, 4D argmax x and y */ template <typename T> __global__ void adaptive_max_pool2d(const T *input, T *output, int isizeH, int isizeW, int osizeH, int osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; const int ostepW = blockDim.x; int ostartH = blockDim.y * blockIdx.y + threadIdx.y; int oendH = osizeH; const int ostepH = blockDim.y * gridDim.y; // select input/output plane output = output + o_plane * osizeH * osizeW; input = input + i_plane * istrideD; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the mean of the input image... const T *ptr_input = input + istartH * istrideH + istartW * istrideW; T *ptr_output = output + oh * osizeW + ow; T max = NumericLimits<T>::lowest(); int ih, iw; for (ih = 0; ih < kH; ih++) { for (iw = 0; iw < kW; iw++) { T val = ptr_input[iw * istrideW]; if ((val > max) || isnan(val)) { max = val; } } ptr_input += istrideH; // next input line } // Update output and argmax *ptr_output = max; } } } // 5d tensor B x D x T x H x W /* * Description: * this function adaptively maxpools an input 4D tensor along dimensions 2 * and 3 4D input, 4D output, 4D argmax x and y */ template <typename T> __global__ void adaptive_max_pool3d(const T *input, T *output, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW, int64_t offsetZ) { // iterators on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // slice/feature // input frame/time ramge is fixed. int istartT = start_index(ot, osizeT, isizeT); int iendT = end_index(ot, osizeT, isizeT); int kT = iendT - istartT; // input offset by slice/feature and earliest relevant frame/time const T *input_dt = input + d * istrideD + istartT * istrideT; // output offset by slice/feature and frame/time T *output_dt = output + o_plane * osizeH * osizeW; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling from corresponding input pixels const T *ptr_input = input_dt + istartH * istrideH + istartW * istrideW; T *ptr_output = output_dt + oh * osizeW + ow; T max = NumericLimits<T>::lowest(); int it, ih, iw; for (it = 0; it < kT; ++it) { for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { T val = ptr_input[ih * istrideH + iw * istrideW]; if ((val > max) || isnan(val)) { max = val; } } } ptr_input += istrideT; // next input frame } // Update output *ptr_output = max; } } } // 4d tensor B x D x H x W // All kernels view batch dim B and feature dim D as collapsed. /* * Description: * this function adaptively average pools an input 4D tensor along dimensions * 2 and 3 4D input, 4D output */ template <typename T> __global__ void adaptive_average_pool2d(const T *input, T *output, int isizeH, int isizeW, int osizeH, int osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { // iterators on output pixels int oh, ow; // select input/output plane based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; output = output + o_plane * osizeH * osizeW; input = input + i_plane * istrideD; int ostartH = blockDim.y * blockIdx.y + threadIdx.y; int oendH = osizeH; const int ostepH = blockDim.y * gridDim.y; int ostartW = threadIdx.x; int oendW = osizeW; const int ostepW = blockDim.x; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling over corresponding input pixels const T *ptr_input = input + istartH * istrideH + istartW * istrideW; T *ptr_output = output + oh * osizeW + ow; T sum = 0; int ih, iw; for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { T val = ptr_input[iw * istrideW]; sum += val; } ptr_input += istrideH; // next input line } // Update output *ptr_output = sum / kH / kW; } } } // 5d tensor B x D x T x H x W // All kernels view batch dim B and dim D as collapsed. /* * Description: * this function adaptively average pools an input 5D tensor along dimensions * 2, 3, and 4 5D input, 5D output * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). */ template <typename T> __global__ void adaptive_average_pool3d(const T *input, T *output, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW, int64_t offsetZ) { // iterates on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // slice/feature // input frame/time range is fixed. int istartT = start_index(ot, osizeT, isizeT); int iendT = end_index(ot, osizeT, isizeT); int kT = iendT - istartT; // input offset by slice/feature and earliest relevant frame/time const T *input_dt = input + d * istrideD + istartT * istrideT; // output offset by slice/feature and frame/time T *output_dt = output + o_plane * osizeH * osizeW; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling from corresponding input pixels const T *ptr_input = input_dt + istartH * istrideH + istartW * istrideW; T *ptr_output = output_dt + oh * osizeW + ow; T sum = 0; int it, ih, iw; for (it = 0; it < kT; ++it) { for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { T val = ptr_input[ih * istrideH + iw * istrideW]; sum += val; } } ptr_input += istrideT; // next input frame } // Update output *ptr_output = sum / kT / kH / kW; } } } // TODO: �� kernel ʵ�ֽ����Ż� template <typename T> void AdaptivePooling2DCuda(const TensorInfo<T> &input, TensorInfo<T> &output, const std::vector<int> &output_size, PoolingOperation type, cudaStream_t stream) { const int osizeH = output_size[0]; const int osizeW = output_size[1]; const int sizeB = input.Size(0); const int sizeD = input.Size(1); const int isizeH = input.Size(2); const int isizeW = input.Size(3); const int64_t istrideD = input.Stride(1); const int64_t istrideH = input.Stride(2); const int64_t istrideW = input.Stride(3); // cuda blocks & threads: int blocksH = std::max(16 / sizeD, 1); dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); if (type == PoolingOperation::MAX_POOLING) { adaptive_max_pool2d<T><<<blocks, threads, 0, stream>>>(input.DataPtr(), output.DataPtr(), isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); } else { adaptive_average_pool2d<T><<<blocks, threads, 0, stream>>>(input.DataPtr(), output.DataPtr(), isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); } } template <typename T> void AdaptivePooling3DCuda(const TensorInfo<T> &input, TensorInfo<T> &output, const std::vector<int> &output_size, PoolingOperation type, cudaStream_t stream) { const int osizeT = output_size[0]; const int osizeH = output_size[1]; const int osizeW = output_size[2]; const int sizeB = input.Size(0); const int sizeD = input.Size(1); const int isizeT = input.Size(2); const int isizeH = input.Size(3); const int isizeW = input.Size(4); const int64_t istrideD = input.Stride(1); const int64_t istrideT = input.Stride(2); const int64_t istrideH = input.Stride(3); const int64_t istrideW = input.Stride(4); int64_t totalZ = sizeB * sizeD * osizeT; int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = std::max(static_cast<int>(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); if (type == PoolingOperation::MAX_POOLING) { adaptive_max_pool3d<T><<<blocks, threads, 0, stream>>>( input.DataPtr(), output.DataPtr(), isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW, offsetZ); } else { adaptive_average_pool3d<T><<<blocks, threads, 0, stream>>>( input.DataPtr(), output.DataPtr(), isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW, offsetZ); } totalZ -= 65535; offsetZ += 65535; } } template void AdaptivePooling2DCuda<float>(const TensorInfo<float> &input, TensorInfo<float> &output, const std::vector<int> &output_size, PoolingOperation type, cudaStream_t stream); template void AdaptivePooling2DCuda<half>(const TensorInfo<half> &input, TensorInfo<half> &output, const std::vector<int> &output_size, PoolingOperation type, cudaStream_t stream); template void AdaptivePooling3DCuda<float>(const TensorInfo<float> &input, TensorInfo<float> &output, const std::vector<int> &output_size, PoolingOperation type, cudaStream_t stream); template void AdaptivePooling3DCuda<half>(const TensorInfo<half> &input, TensorInfo<half> &output, const std::vector<int> &output_size, PoolingOperation type, cudaStream_t stream); FWD_TRT_NAMESPACE_END
the_stack
// cuda_check_status_cu ---------------------------------------------------- // extern "C" static void cuda_check_status_cu(cudaError_t status, int whiam) { if(status != cudaSuccess) { printf("cuda error: code %d, %s, line%d\n", status, cudaGetErrorString(status), whiam); exit(EXIT_FAILURE); } } // atomicMax --------------------------------------------------------------- __device__ float atomicMax(float* const address, const float val) { if ( *address >= val ) return *address; int* const address_as_i = (int*)address; int old = *address_as_i; int assumed = old; do { assumed = old; if ( __int_as_float(assumed) >= val ) break; old = atomicCAS(address_as_i, assumed, __float_as_int(val) ); } while (assumed != old); return __int_as_float(old); } // jacobi_kernel --------------------------------------------------------------- __global__ void jacobi_kernel( const float* const A_d, float* const Anew_d, const int n, const int m, float* const residue_d ) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; float residue = 0.0f; if ( j >= 1 && j < n-1 && i >= 1 && i < m-1 ) { Anew_d[j *m+ i] = 0.25f * ( A_d[j *m+ (i+1)] + A_d[j *m+ (i-1)] + A_d[(j-1) *m+ i] + A_d[(j+1) *m+ i]); residue = fabsf(Anew_d[j *m+ i]-A_d[j *m+ i]); atomicMax( residue_d, residue ); } } // copy_kernel --------------------------------------------------------------- __global__ void copy_kernel( float* const A_d, const float* const Anew_d, const int n, const int m ) { int i = blockIdx.x*blockDim.x+threadIdx.x; int j = blockIdx.y*blockDim.y+threadIdx.y; if ( j >= 1 && j < n-1 && i >= 1 && i < m-1 ) { A_d[j *m + i] = Anew_d[j *m + i]; } } // launch_jacobi_kernel --------------------------------------------------------------- extern "C" float launch_jacobi_kernel( const float* const A_d, float* const Anew_d, const int n, const int m, float* const residue_d ) { const dim3 dimBlock(16,16,1); const dim3 dimGrid((m/dimBlock.x)+1,(n/dimBlock.y)+1,1); float residue = 0.f; cudaMemcpy( residue_d, &residue, sizeof(float), cudaMemcpyHostToDevice ); jacobi_kernel<<<dimGrid,dimBlock>>>(A_d,Anew_d,n,m,residue_d); cudaMemcpy( &residue, residue_d, sizeof(float), cudaMemcpyDeviceToHost ); return residue; } // launch_jacobi_kernel_async --------------------------------------------------------------- extern "C" void launch_jacobi_kernel_async( const float* const A_d, float* const Anew_d, const int n, const int m, float* const residue_d ) { const dim3 dimBlock(16,16,1); const dim3 dimGrid((m/dimBlock.x)+1,(n/dimBlock.y)+1,1); float residue = 0.f; cudaMemcpy( residue_d, &residue, sizeof(float), cudaMemcpyHostToDevice ); if ( n > 0 && m > 0 ) { jacobi_kernel<<<dimGrid,dimBlock>>>(A_d,Anew_d,n,m,residue_d); } } // wait_jacobi_kernel --------------------------------------------------------------- extern "C" float wait_jacobi_kernel( const float* const residue_d ) { float residue = 0.f; cudaMemcpy( &residue, residue_d, sizeof(float), cudaMemcpyDeviceToHost ); return residue; } // launch_copy_kernel --------------------------------------------------------------- extern "C" void launch_copy_kernel( float* const A_d, const float* const Anew_d, const int n, const int m ) { const dim3 dimBlock(16,16,1); const dim3 dimGrid((m/dimBlock.x)+1,(n/dimBlock.y)+1,1); copy_kernel<<<dimGrid,dimBlock>>>(A_d,Anew_d,n,m); } // set_device --------------------------------------------------------------- extern "C" void set_device(int dev) { cudaSetDevice(dev); } // memcpy_h2d --------------------------------------------------------------- extern "C" void memcpy_h2d( float* const A, const float* const Anew, float** A_d, float** Anew_d, float** residue_d, const int n, const int m, const int gpu_start, const int n_cpu ) { cudaError_t rcm; // printf("%d \n",cudaGetLastError()); rcm = cudaMalloc( (void**)A_d, n*m * sizeof(float) ); cuda_check_status_cu(rcm, __LINE__); rcm = cudaMalloc( (void**)Anew_d, n*m * sizeof(float) ); cuda_check_status_cu(rcm, __LINE__); rcm = cudaMalloc( (void**)residue_d, sizeof(float) ); cuda_check_status_cu(rcm, __LINE__); rcm=cudaMemcpy( *A_d, A+gpu_start-1, m*(n-n_cpu)*sizeof(float), cudaMemcpyHostToDevice ); cuda_check_status_cu(rcm, __LINE__); //cuda error: code 11, invalid argument, line150 ==> Ben: dereference the pointer to pointer ! rcm=cudaMemcpy( *Anew_d, Anew+gpu_start-1, m*(n-n_cpu)*sizeof(float), cudaMemcpyHostToDevice ); cuda_check_status_cu(rcm, __LINE__); //cuda error: code 11, invalid argument, line150 ==> Ben: dereference the pointer to pointer ! //bug rcm = cudaMemcpy( &A_d, A+gpu_start-1, m*(n-n_cpu)*sizeof(float), cudaMemcpyHostToDevice );cuda_check_status_cu(rcm, __LINE__); //bug rcm = cudaMemcpy( &Anew_d, Anew+gpu_start-1, m*(n-n_cpu)*sizeof(float), cudaMemcpyHostToDevice );cuda_check_status_cu(rcm, __LINE__); //bug rcm = cudaMalloc( (void**)&A_d, n*m * sizeof(float) ); cuda_check_status_cu(rcm, __LINE__); //bug rcm = cudaMalloc( (void**)&Anew_d, n*m * sizeof(float) ); cuda_check_status_cu(rcm, __LINE__); //bug rcm = cudaMalloc( (void**)&residue_d, sizeof(float) ); cuda_check_status_cu(rcm, __LINE__); //bug rcm = cudaMemcpy( A_d, A+gpu_start-1, m*(n-n_cpu)*sizeof(float), cudaMemcpyHostToDevice ); cuda_check_status_cu(rcm, __LINE__); //bug rcm = cudaMemcpy( Anew_d, Anew+gpu_start-1, m*(n-n_cpu)*sizeof(float), cudaMemcpyHostToDevice ); cuda_check_status_cu(rcm, __LINE__); } // jacobi_memcpy ----------------------------------------------------------------- extern "C" void jacobi_memcpy( float* const A , const float* const Anew , float* const A_d, const float* const Anew_d, const int m, const int cpu_start, const int cpu_end, const int gpu_end, const int rank ) { cudaError_t error; if ( rank == 0 ) { error = cudaMemcpy( A_d+(gpu_end+1)*m+1, Anew +cpu_start*m+1, (m-2)*sizeof(float), cudaMemcpyHostToDevice ); cuda_check_status_cu(error, __LINE__); // cuda error: code 11, invalid argument, line173 error = cudaMemcpy( A +(cpu_start-1)*m+1, Anew_d+gpu_end*m+1, (m-2)*sizeof(float), cudaMemcpyDeviceToHost ); cuda_check_status_cu(error, __LINE__); } else { error = cudaMemcpy( A_d+0*m+1, Anew+cpu_end*m+1, (m-2)*sizeof(float), cudaMemcpyHostToDevice ); cuda_check_status_cu(error, __LINE__); error = cudaMemcpy( A+(cpu_end+1)*m+1, Anew_d+1*m+1, (m-2)*sizeof(float), cudaMemcpyDeviceToHost ); cuda_check_status_cu(error, __LINE__); } } /* cudaError_t cudaMemcpy ( void* dst, const void* src, size_t count, cudaMemcpyKind kind ) Copies data between host and device. dst - Destination memory address src - Source memory address count - Size in bytes to copy kind - Type of transfer Returns cudaSuccess, cudaErrorInvalidValue, cudaErrorInvalidDevicePointer, cudaErrorInvalidMemcpyDirection Copies count bytes from the memory area pointed to by src to the memory area pointed to by dst, where kind is one of cudaMemcpyHostToHost, cudaMemcpyHostToDevice, cudaMemcpyDeviceToHost, or cudaMemcpyDeviceToDevice, and specifies the direction of the copy. The memory areas may not overlap. Calling cudaMemcpy() with dst and src pointers that do not match the direction of the copy results in an undefined behavior. */ // jacobi_memcpy_final ----------------------------------------------------------------- extern "C" void jacobi_memcpy_final( float* const A, float** const A_d, const int m, const int n, const int n_cpu, const int cpu_end, const int rank ) { // float* const A, const float* const A_d, cudaError_t error; if ( rank == 0 ) { error = cudaMemcpy( A+1*m+1, *(A_d)+1*m+1, (m*(n-n_cpu-1)-2)*sizeof(float), cudaMemcpyDeviceToHost ); cuda_check_status_cu(error, __LINE__); // cuda error: code 11, invalid argument, line227 } else { error = cudaMemcpy( A+cpu_end*m+1, *(A_d)+1*m+1, (m*(n-n_cpu-1)-2)*sizeof(float), cudaMemcpyDeviceToHost ); cuda_check_status_cu(error, __LINE__); } } // get_info_device --------------------------------------------------------------- extern "C" void get_info_device(char *gpu_string, int dev) { int driverVersion = 0, runtimeVersion = 0; struct cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); strcpy(gpu_string, deviceProp.name); // printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name); cudaDriverGetVersion(&driverVersion); cudaRuntimeGetVersion(&runtimeVersion); printf("\n CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10); printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor); printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); } // init_cuda ------------------------------------------------------- #ifndef DEVS_PER_NODE #define DEVS_PER_NODE 1 // Devices per node #endif // void init_cuda(int rank) extern "C" void init_cuda( int rank, float* const A, const float* const Anew, float** const A_d, float** const Anew_d, const int n, const int m, const int gpu_start, const int n_cpu, float** const residue_d ) { // const float** const Anew_d, // int rank; // MPI_Comm_rank(MPI_COMM_WORLD, &rank); int dev = rank % DEVS_PER_NODE; // printf("rk=%d dev=%d\n",rank,dev); cudaError_t error; error = cudaSetDevice(dev); cuda_check_status_cu(error, __LINE__); //bug? ==> yes //bug: // fix in place: // for A_d: &A_d (=pointer to pointer to really pass, not only a copy) instead of A_d // for A: A //OKOKOKOK: memcpy_h2d(A, Anew, &A_d, &Anew_d, &residue_d, n, m, gpu_start, n_cpu); //memcpy_h2d(A, Anew, &A_d, &Anew_d, &residue_d, n, m, gpu_start, n_cpu); //bug: memcpy_h2d(A, Anew, A_d, Anew_d, residue_d, n, m, gpu_start, n_cpu); error = cudaMalloc( (void**)A_d, n*m * sizeof(float) ); cuda_check_status_cu(error, __LINE__); error = cudaMalloc( (void**)Anew_d, n*m * sizeof(float) ); cuda_check_status_cu(error, __LINE__); error = cudaMalloc( (void**)residue_d, sizeof(float) ); cuda_check_status_cu(error, __LINE__); error=cudaMemcpy( *A_d, A+gpu_start-1, m*(n-n_cpu)*sizeof(float), cudaMemcpyHostToDevice ); cuda_check_status_cu(error, __LINE__); //cuda error: code 11, invalid argument, line150 ==> Ben: dereference the pointer to pointer ! error=cudaMemcpy( *Anew_d, Anew+gpu_start-1, m*(n-n_cpu)*sizeof(float), cudaMemcpyHostToDevice ); cuda_check_status_cu(error, __LINE__); } // free_device ----------------------------------------------------- extern "C" void free_device(float* A_d, float* Anew_d, float* residue_d) { // cudaDeviceSynchronize(); cudaError_t error; error = cudaFree( residue_d ); cuda_check_status_cu(error, __LINE__); error = cudaFree(Anew_d); cuda_check_status_cu(error, __LINE__); error = cudaFree(A_d); cuda_check_status_cu(error, __LINE__); error = cudaDeviceReset(); cuda_check_status_cu(error, __LINE__); } // finalize_cuda --------------------------------------------------------------- extern "C" void finalize_cuda() { // cudaDeviceSynchronize(); cudaError_t error; error = cudaDeviceSynchronize(); cuda_check_status_cu(error, __LINE__); /* TODO: cudaFree( residue_d ); cudaFree(Anew_d); cudaFree(A_d); */ // free_device(A_d, Anew_d, residue_d); }
the_stack
#ifndef HAVE_OPENCV_CUDEV #error "opencv_cudev is required" #else #include "opencv2/cudev.hpp" using namespace cv::cudev; void cmpScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const GpuMat&, double, Stream& stream, int cmpop); namespace { template <class Op, typename T> struct CmpOp : binary_function<T, T, uchar> { __device__ __forceinline__ uchar operator()(T a, T b) const { Op op; return -op(a, b); } }; #define MAKE_VEC(_type, _cn) typename MakeVec<_type, _cn>::type template <class Op, typename T, int cn> struct CmpScalarOp; template <class Op, typename T> struct CmpScalarOp<Op, T, 1> : unary_function<T, uchar> { T val; __device__ __forceinline__ uchar operator()(T src) const { CmpOp<Op, T> op; return op(src, val); } }; template <class Op, typename T> struct CmpScalarOp<Op, T, 2> : unary_function<MAKE_VEC(T, 2), MAKE_VEC(uchar, 2)> { MAKE_VEC(T, 2) val; __device__ __forceinline__ MAKE_VEC(uchar, 2) operator()(const MAKE_VEC(T, 2) & src) const { CmpOp<Op, T> op; return VecTraits<MAKE_VEC(uchar, 2)>::make(op(src.x, val.x), op(src.y, val.y)); } }; template <class Op, typename T> struct CmpScalarOp<Op, T, 3> : unary_function<MAKE_VEC(T, 3), MAKE_VEC(uchar, 3)> { MAKE_VEC(T, 3) val; __device__ __forceinline__ MAKE_VEC(uchar, 3) operator()(const MAKE_VEC(T, 3) & src) const { CmpOp<Op, T> op; return VecTraits<MAKE_VEC(uchar, 3)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z)); } }; template <class Op, typename T> struct CmpScalarOp<Op, T, 4> : unary_function<MAKE_VEC(T, 4), MAKE_VEC(uchar, 4)> { MAKE_VEC(T, 4) val; __device__ __forceinline__ MAKE_VEC(uchar, 4) operator()(const MAKE_VEC(T, 4) & src) const { CmpOp<Op, T> op; return VecTraits<MAKE_VEC(uchar, 4)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z), op(src.w, val.w)); } }; #undef TYPE_VEC template <typename ScalarDepth> struct TransformPolicy : DefaultTransformPolicy { }; template <> struct TransformPolicy<double> : DefaultTransformPolicy { enum { shift = 1 }; }; template <template <typename> class Op, typename T, int cn> void cmpScalarImpl(const GpuMat& src, cv::Scalar value, GpuMat& dst, Stream& stream) { typedef typename MakeVec<T, cn>::type src_type; typedef typename MakeVec<uchar, cn>::type dst_type; cv::Scalar_<T> value_ = value; CmpScalarOp<Op<T>, T, cn> op; op.val = VecTraits<src_type>::make(value_.val); gridTransformUnary_< TransformPolicy<T> >(globPtr<src_type>(src), globPtr<dst_type>(dst), op, stream); } } void cmpScalar(const GpuMat& src, cv::Scalar val, bool inv, GpuMat& dst, const GpuMat&, double, Stream& stream, int cmpop) { typedef void (*func_t)(const GpuMat& src, cv::Scalar value, GpuMat& dst, Stream& stream); static const func_t funcs[7][6][4] = { { {cmpScalarImpl<equal_to, uchar, 1>, cmpScalarImpl<equal_to, uchar, 2>, cmpScalarImpl<equal_to, uchar, 3>, cmpScalarImpl<equal_to, uchar, 4>}, {cmpScalarImpl<greater, uchar, 1>, cmpScalarImpl<greater, uchar, 2>, cmpScalarImpl<greater, uchar, 3>, cmpScalarImpl<greater, uchar, 4>}, {cmpScalarImpl<greater_equal, uchar, 1>, cmpScalarImpl<greater_equal, uchar, 2>, cmpScalarImpl<greater_equal, uchar, 3>, cmpScalarImpl<greater_equal, uchar, 4>}, {cmpScalarImpl<less, uchar, 1>, cmpScalarImpl<less, uchar, 2>, cmpScalarImpl<less, uchar, 3>, cmpScalarImpl<less, uchar, 4>}, {cmpScalarImpl<less_equal, uchar, 1>, cmpScalarImpl<less_equal, uchar, 2>, cmpScalarImpl<less_equal, uchar, 3>, cmpScalarImpl<less_equal, uchar, 4>}, {cmpScalarImpl<not_equal_to, uchar, 1>, cmpScalarImpl<not_equal_to, uchar, 2>, cmpScalarImpl<not_equal_to, uchar, 3>, cmpScalarImpl<not_equal_to, uchar, 4>} }, { {cmpScalarImpl<equal_to, schar, 1>, cmpScalarImpl<equal_to, schar, 2>, cmpScalarImpl<equal_to, schar, 3>, cmpScalarImpl<equal_to, schar, 4>}, {cmpScalarImpl<greater, schar, 1>, cmpScalarImpl<greater, schar, 2>, cmpScalarImpl<greater, schar, 3>, cmpScalarImpl<greater, schar, 4>}, {cmpScalarImpl<greater_equal, schar, 1>, cmpScalarImpl<greater_equal, schar, 2>, cmpScalarImpl<greater_equal, schar, 3>, cmpScalarImpl<greater_equal, schar, 4>}, {cmpScalarImpl<less, schar, 1>, cmpScalarImpl<less, schar, 2>, cmpScalarImpl<less, schar, 3>, cmpScalarImpl<less, schar, 4>}, {cmpScalarImpl<less_equal, schar, 1>, cmpScalarImpl<less_equal, schar, 2>, cmpScalarImpl<less_equal, schar, 3>, cmpScalarImpl<less_equal, schar, 4>}, {cmpScalarImpl<not_equal_to, schar, 1>, cmpScalarImpl<not_equal_to, schar, 2>, cmpScalarImpl<not_equal_to, schar, 3>, cmpScalarImpl<not_equal_to, schar, 4>} }, { {cmpScalarImpl<equal_to, ushort, 1>, cmpScalarImpl<equal_to, ushort, 2>, cmpScalarImpl<equal_to, ushort, 3>, cmpScalarImpl<equal_to, ushort, 4>}, {cmpScalarImpl<greater, ushort, 1>, cmpScalarImpl<greater, ushort, 2>, cmpScalarImpl<greater, ushort, 3>, cmpScalarImpl<greater, ushort, 4>}, {cmpScalarImpl<greater_equal, ushort, 1>, cmpScalarImpl<greater_equal, ushort, 2>, cmpScalarImpl<greater_equal, ushort, 3>, cmpScalarImpl<greater_equal, ushort, 4>}, {cmpScalarImpl<less, ushort, 1>, cmpScalarImpl<less, ushort, 2>, cmpScalarImpl<less, ushort, 3>, cmpScalarImpl<less, ushort, 4>}, {cmpScalarImpl<less_equal, ushort, 1>, cmpScalarImpl<less_equal, ushort, 2>, cmpScalarImpl<less_equal, ushort, 3>, cmpScalarImpl<less_equal, ushort, 4>}, {cmpScalarImpl<not_equal_to, ushort, 1>, cmpScalarImpl<not_equal_to, ushort, 2>, cmpScalarImpl<not_equal_to, ushort, 3>, cmpScalarImpl<not_equal_to, ushort, 4>} }, { {cmpScalarImpl<equal_to, short, 1>, cmpScalarImpl<equal_to, short, 2>, cmpScalarImpl<equal_to, short, 3>, cmpScalarImpl<equal_to, short, 4>}, {cmpScalarImpl<greater, short, 1>, cmpScalarImpl<greater, short, 2>, cmpScalarImpl<greater, short, 3>, cmpScalarImpl<greater, short, 4>}, {cmpScalarImpl<greater_equal, short, 1>, cmpScalarImpl<greater_equal, short, 2>, cmpScalarImpl<greater_equal, short, 3>, cmpScalarImpl<greater_equal, short, 4>}, {cmpScalarImpl<less, short, 1>, cmpScalarImpl<less, short, 2>, cmpScalarImpl<less, short, 3>, cmpScalarImpl<less, short, 4>}, {cmpScalarImpl<less_equal, short, 1>, cmpScalarImpl<less_equal, short, 2>, cmpScalarImpl<less_equal, short, 3>, cmpScalarImpl<less_equal, short, 4>}, {cmpScalarImpl<not_equal_to, short, 1>, cmpScalarImpl<not_equal_to, short, 2>, cmpScalarImpl<not_equal_to, short, 3>, cmpScalarImpl<not_equal_to, short, 4>} }, { {cmpScalarImpl<equal_to, int, 1>, cmpScalarImpl<equal_to, int, 2>, cmpScalarImpl<equal_to, int, 3>, cmpScalarImpl<equal_to, int, 4>}, {cmpScalarImpl<greater, int, 1>, cmpScalarImpl<greater, int, 2>, cmpScalarImpl<greater, int, 3>, cmpScalarImpl<greater, int, 4>}, {cmpScalarImpl<greater_equal, int, 1>, cmpScalarImpl<greater_equal, int, 2>, cmpScalarImpl<greater_equal, int, 3>, cmpScalarImpl<greater_equal, int, 4>}, {cmpScalarImpl<less, int, 1>, cmpScalarImpl<less, int, 2>, cmpScalarImpl<less, int, 3>, cmpScalarImpl<less, int, 4>}, {cmpScalarImpl<less_equal, int, 1>, cmpScalarImpl<less_equal, int, 2>, cmpScalarImpl<less_equal, int, 3>, cmpScalarImpl<less_equal, int, 4>}, {cmpScalarImpl<not_equal_to, int, 1>, cmpScalarImpl<not_equal_to, int, 2>, cmpScalarImpl<not_equal_to, int, 3>, cmpScalarImpl<not_equal_to, int, 4>} }, { {cmpScalarImpl<equal_to, float, 1>, cmpScalarImpl<equal_to, float, 2>, cmpScalarImpl<equal_to, float, 3>, cmpScalarImpl<equal_to, float, 4>}, {cmpScalarImpl<greater, float, 1>, cmpScalarImpl<greater, float, 2>, cmpScalarImpl<greater, float, 3>, cmpScalarImpl<greater, float, 4>}, {cmpScalarImpl<greater_equal, float, 1>, cmpScalarImpl<greater_equal, float, 2>, cmpScalarImpl<greater_equal, float, 3>, cmpScalarImpl<greater_equal, float, 4>}, {cmpScalarImpl<less, float, 1>, cmpScalarImpl<less, float, 2>, cmpScalarImpl<less, float, 3>, cmpScalarImpl<less, float, 4>}, {cmpScalarImpl<less_equal, float, 1>, cmpScalarImpl<less_equal, float, 2>, cmpScalarImpl<less_equal, float, 3>, cmpScalarImpl<less_equal, float, 4>}, {cmpScalarImpl<not_equal_to, float, 1>, cmpScalarImpl<not_equal_to, float, 2>, cmpScalarImpl<not_equal_to, float, 3>, cmpScalarImpl<not_equal_to, float, 4>} }, { {cmpScalarImpl<equal_to, double, 1>, cmpScalarImpl<equal_to, double, 2>, cmpScalarImpl<equal_to, double, 3>, cmpScalarImpl<equal_to, double, 4>}, {cmpScalarImpl<greater, double, 1>, cmpScalarImpl<greater, double, 2>, cmpScalarImpl<greater, double, 3>, cmpScalarImpl<greater, double, 4>}, {cmpScalarImpl<greater_equal, double, 1>, cmpScalarImpl<greater_equal, double, 2>, cmpScalarImpl<greater_equal, double, 3>, cmpScalarImpl<greater_equal, double, 4>}, {cmpScalarImpl<less, double, 1>, cmpScalarImpl<less, double, 2>, cmpScalarImpl<less, double, 3>, cmpScalarImpl<less, double, 4>}, {cmpScalarImpl<less_equal, double, 1>, cmpScalarImpl<less_equal, double, 2>, cmpScalarImpl<less_equal, double, 3>, cmpScalarImpl<less_equal, double, 4>}, {cmpScalarImpl<not_equal_to, double, 1>, cmpScalarImpl<not_equal_to, double, 2>, cmpScalarImpl<not_equal_to, double, 3>, cmpScalarImpl<not_equal_to, double, 4>} } }; if (inv) { // src1 is a scalar; swap it with src2 cmpop = cmpop == cv::CMP_LT ? cv::CMP_GT : cmpop == cv::CMP_LE ? cv::CMP_GE : cmpop == cv::CMP_GE ? cv::CMP_LE : cmpop == cv::CMP_GT ? cv::CMP_LT : cmpop; } const int depth = src.depth(); const int cn = src.channels(); CV_DbgAssert( depth <= CV_64F && cn <= 4 ); funcs[depth][cmpop][cn - 1](src, val, dst, stream); } #endif
the_stack
/* ---------------------------------------------------------------------- factorial n table, size SNA::nmaxfactorial+1 ------------------------------------------------------------------------- */ const double nfac_table[] = { 1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800, 39916800, 479001600, 6227020800, 87178291200, 1307674368000, 20922789888000, 355687428096000, 6.402373705728e+15, 1.21645100408832e+17, 2.43290200817664e+18, 5.10909421717094e+19, 1.12400072777761e+21, 2.5852016738885e+22, 6.20448401733239e+23, 1.5511210043331e+25, 4.03291461126606e+26, 1.08888694504184e+28, 3.04888344611714e+29, 8.8417619937397e+30, 2.65252859812191e+32, 8.22283865417792e+33, 2.63130836933694e+35, 8.68331761881189e+36, 2.95232799039604e+38, 1.03331479663861e+40, 3.71993326789901e+41, 1.37637530912263e+43, 5.23022617466601e+44, 2.03978820811974e+46, // nmaxfactorial = 39 }; /* ---------------------------------------------------------------------- the function delta given by VMK Eq. 8.2(1) ------------------------------------------------------------------------- */ double deltacg(int j1, int j2, int j) { double sfaccg = factorial((j1 + j2 + j) / 2 + 1); return sqrt(factorial((j1 + j2 - j) / 2) * factorial((j1 - j2 + j) / 2) * factorial((-j1 + j2 + j) / 2) / sfaccg); } /* ---------------------------------------------------------------------- assign Clebsch-Gordan coefficients using the quasi-binomial formula VMK 8.2.1(3) ------------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- pre-compute table of sqrt[p/m2], p, q = 1,twojmax the p = 0, q = 0 entries are allocated and skipped for convenience. a second table is computed with +/-1 parity factor ------------------------------------------------------------------------- */ /* ---------------------------------------------------------------------- */ int compute_ncoeff(int twojmax) { int ncount; ncount = 0; for (int j1 = 0; j1 <= twojmax; j1++) for (int j2 = 0; j2 <= j1; j2++) for (int j = abs(j1 - j2); j <= MIN(twojmax, j1 + j2); j += 2) if (j >= j1) ncount++; return ncount; } /* ---------------------------------------------------------------------- */ __device__ double compute_sfac(double r, double rcut, const int switch_flag) { if (switch_flag == 0) return 1.0; if (switch_flag == 1) { if (r <= rmin0) return 1.0; else if (r > rcut) return 0.0; else { double rcutfac = MY_PI / (rcut - rmin0); return 0.5 * (cos((r - rmin0) * rcutfac) + 1.0); } } return 0.0; } /* ---------------------------------------------------------------------- */ __device__ double compute_dsfac(double r, double rcut, const int switch_flag) { if (switch_flag == 0) return 0.0; if (switch_flag == 1) { if (r <= rmin0) return 0.0; else if (r > rcut) return 0.0; else { double rcutfac = MY_PI / (rcut - rmin0); return -0.5 * sin((r - rmin0) * rcutfac) * rcutfac; } } return 0.0; } __device__ void compute_duarray(const int natom, const int nbor, const int num_atoms, const int num_nbor, const int twojmax, const int idxdu_max, const int jdimpq, const int switch_flag, const double x, const double y, const double z, const double z0, const double r, const double dz0dr, const double wj_in, const double rcut, const double* rootpqarray, const COMPLEX* ulist, COMPLEX* dulist) { double r0inv; double a_r, a_i, b_r, b_i; double da_r[3], da_i[3], db_r[3], db_i[3]; double dz0[3], dr0inv[3], dr0invdr; double rootpq; int jju, jjup, jjdu, jjdup; double rinv = 1.0 / r; double ux = x * rinv; double uy = y * rinv; double uz = z * rinv; r0inv = 1.0 / sqrt(r * r + z0 * z0); a_r = z0 * r0inv; a_i = -z * r0inv; b_r = y * r0inv; b_i = -x * r0inv; dr0invdr = -pow(r0inv, 3.0) * (r + z0 * dz0dr); dr0inv[0] = dr0invdr * ux; dr0inv[1] = dr0invdr * uy; dr0inv[2] = dr0invdr * uz; dz0[0] = dz0dr * ux; dz0[1] = dz0dr * uy; dz0[2] = dz0dr * uz; for (int k = 0; k < 3; k++) { da_r[k] = dz0[k] * r0inv + z0 * dr0inv[k]; da_i[k] = -z * dr0inv[k]; } da_i[2] += -r0inv; for (int k = 0; k < 3; k++) { db_r[k] = y * dr0inv[k]; db_i[k] = -x * dr0inv[k]; } db_i[0] += -r0inv; db_r[1] += r0inv; for (int k = 0; k < 3; ++k) dulist[DULIST_INDEX(natom, nbor, 0, k)] = { 0.0, 0.0 }; jju = 1; jjdu = 1; for (int j = 1; j <= twojmax; j++) { int deljju = j + 1; for (int mb = 0; 2 * mb <= j; mb++) { for (int k = 0; k < 3; ++k) dulist[DULIST_INDEX(natom, nbor, jjdu, k)] = { 0.0, 0.0 }; jju += deljju; jjdu += deljju; } int ncolhalf = deljju / 2; jju += deljju * ncolhalf; } jju = 1; jjdu = 1; jjup = 0; jjdup = 0; for (int j = 1; j <= twojmax; j++) { int deljju = j + 1; int deljjup = j; for (int mb = 0; 2 * mb < j; mb++) { for (int ma = 0; ma < j; ma++) { double up_r = ulist[ULIST_INDEX(natom, nbor, jjup)].re; double up_i = ulist[ULIST_INDEX(natom, nbor, jjup)].im; rootpq = rootpqarray[ROOTPQ_INDEX(j - ma, j - mb)]; for (int k = 0; k < 3; k++) { dulist[DULIST_INDEX(natom, nbor, jjdu, k)].re += rootpq * (da_r[k] * up_r + da_i[k] * up_i + a_r * dulist[DULIST_INDEX(natom, nbor, jjdup, k)].re + a_i * dulist[DULIST_INDEX(natom, nbor, jjdup, k)].im); dulist[DULIST_INDEX(natom, nbor, jjdu, k)].im += rootpq * (da_r[k] * up_i - da_i[k] * up_r + a_r * dulist[DULIST_INDEX(natom, nbor, jjdup, k)].im - a_i * dulist[DULIST_INDEX(natom, nbor, jjdup, k)].re); } rootpq = rootpqarray[ROOTPQ_INDEX(ma + 1, j - mb)]; for (int k = 0; k < 3; k++) { dulist[DULIST_INDEX(natom, nbor, jjdu + 1, k)].re = -rootpq * (db_r[k] * up_r + db_i[k] * up_i + b_r * dulist[DULIST_INDEX(natom, nbor, jjdup, k)].re + b_i * dulist[DULIST_INDEX(natom, nbor, jjdup, k)].im); dulist[DULIST_INDEX(natom, nbor, jjdu + 1, k)].im = -rootpq * (db_r[k] * up_i - db_i[k] * up_r + b_r * dulist[DULIST_INDEX(natom, nbor, jjdup, k)].im - b_i * dulist[DULIST_INDEX(natom, nbor, jjdup, k)].re); } // assign middle column i.e. mb+1 if (2 * (mb + 1) == j) { rootpq = rootpqarray[ROOTPQ_INDEX(j - ma, mb + 1)]; for (int k = 0; k < 3; k++) { dulist[DULIST_INDEX(natom, nbor, jjdu + deljju, k)].re += rootpq * (db_r[k] * up_r - db_i[k] * up_i + b_r * dulist[DULIST_INDEX(natom, nbor, jjdup, k)].re - b_i * dulist[DULIST_INDEX(natom, nbor, jjdup, k)].im); dulist[DULIST_INDEX(natom, nbor, jjdu + deljju, k)].im += rootpq * (db_r[k] * up_i + db_i[k] * up_r + b_r * dulist[DULIST_INDEX(natom, nbor, jjdup, k)].im + b_i * dulist[DULIST_INDEX(natom, nbor, jjdup, k)].re); } rootpq = rootpqarray[ROOTPQ_INDEX(ma + 1, mb + 1)]; for (int k = 0; k < 3; k++) { dulist[DULIST_INDEX(natom, nbor, jjdu + 1 + deljju, k)].re = rootpq * (da_r[k] * up_r - da_i[k] * up_i + a_r * dulist[DULIST_INDEX(natom, nbor, jjdup, k)].re - a_i * dulist[DULIST_INDEX(natom, nbor, jjdup, k)].im); dulist[DULIST_INDEX(natom, nbor, jjdu + 1 + deljju, k)].im = rootpq * (da_r[k] * up_i + da_i[k] * up_r + a_r * dulist[DULIST_INDEX(natom, nbor, jjdup, k)].im + a_i * dulist[DULIST_INDEX(natom, nbor, jjdup, k)].re); } } jju++; jjup++; jjdu++; jjdup++; } jju++; jjdu++; } if (j % 2 == 0) { jju += deljju; jjdu += deljju; } int ncolhalf = deljju / 2; jju += deljju * ncolhalf; int ncolhalfp = deljjup / 2; jjup += deljjup * ncolhalfp; } double sfac = compute_sfac(r, rcut, switch_flag); double dsfac = compute_dsfac(r, rcut, switch_flag); sfac *= wj_in; dsfac *= wj_in; jju = 0; jjdu = 0; for (int j = 0; j <= twojmax; j++) { int deljju = j + 1; for (int mb = 0; 2 * mb <= j; mb++) for (int ma = 0; ma <= j; ma++) { dulist[DULIST_INDEX(natom, nbor, jjdu, 0)].re = dsfac * ulist[ULIST_INDEX(natom, nbor, jju)].re * ux + sfac * dulist[DULIST_INDEX(natom, nbor, jjdu, 0)].re; dulist[DULIST_INDEX(natom, nbor, jjdu, 0)].im = dsfac * ulist[ULIST_INDEX(natom, nbor, jju)].im * ux + sfac * dulist[DULIST_INDEX(natom, nbor, jjdu, 0)].im; dulist[DULIST_INDEX(natom, nbor, jjdu, 1)].re = dsfac * ulist[ULIST_INDEX(natom, nbor, jju)].re * uy + sfac * dulist[DULIST_INDEX(natom, nbor, jjdu, 1)].re; dulist[DULIST_INDEX(natom, nbor, jjdu, 1)].im = dsfac * ulist[ULIST_INDEX(natom, nbor, jju)].im * uy + sfac * dulist[DULIST_INDEX(natom, nbor, jjdu, 1)].im; dulist[DULIST_INDEX(natom, nbor, jjdu, 2)].re = dsfac * ulist[ULIST_INDEX(natom, nbor, jju)].re * uz + sfac * dulist[DULIST_INDEX(natom, nbor, jjdu, 2)].re; dulist[DULIST_INDEX(natom, nbor, jjdu, 2)].im = dsfac * ulist[ULIST_INDEX(natom, nbor, jju)].im * uz + sfac * dulist[DULIST_INDEX(natom, nbor, jjdu, 2)].im; jju++; jjdu++; } int ncolhalf = deljju / 2; jju += deljju * ncolhalf; } } /* ---------------------------------------------------------------------- Elapsed Time ------------------------------------------------------------------------- */ inline double elapsedTime(timeval start_time, timeval end_time) { return ((end_time.tv_sec - start_time.tv_sec) + 1e-6 * (end_time.tv_usec - start_time.tv_usec)); } void options(int argc, char* argv[]) { for (int i = 1; i < argc; i++) { if ((strcmp(argv[i], "-h") == 0) || (strcmp(argv[i], "--help") == 0)) { printf("TestSNAP 1.0 (stand-alone SNAP force kernel)\n\n"); printf("The following optional command-line switches override default " "values\n"); printf("-ns, --nsteps <val>: set the number of force calls to val " "(default 1)\n"); exit(0); } else if ((strcmp(argv[i], "-ns") == 0) || (strcmp(argv[i], "--nsteps") == 0)) { nsteps = atoi(argv[++i]); } else { printf("ERROR: Unknown command line argument: %s\n", argv[i]); exit(1); } } } /* ---------------------------------------------------------------------- factorial n, wrapper for precomputed table ------------------------------------------------------------------------- */ double factorial(int n) { if (n < 0 || n > nmaxfactorial) { // printf("Invalid argument to factorial %d", n); exit(1); } return nfac_table[n]; }
the_stack
using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif long fsize(int fd) { struct stat stat; int res = fstat(fd, &stat); return stat.st_size; } int printll(char *s) { while (*s != '\n' && *s != ',' && *s != '\t') { putchar(*s++); } return 0; } long hash(char *str0, int len) { unsigned char *str = (unsigned char *)str0; unsigned long hash = 5381; int c; while ((c = *str++) && len--) hash = ((hash << 5) + hash) + c; /* hash * 33 + c */ return hash; } long HEAP_SIZE_CPU = 1073741826; // 1048576; // 536870912; // 268435456; // 2097152; 1610612739; // 4294967304; // void *mallocBase = calloc(HEAP_SIZE_CPU, 1); void *mallocAddr = mallocBase; void *waterMark = mallocBase; void *myMalloc(size_t bytes) { void *res = mallocAddr; mallocAddr = (void *)((char *)mallocAddr + bytes); if ((long)mallocAddr >= (long)mallocBase + HEAP_SIZE_CPU) fprintf(stderr, "CPU memory breached limit of HEAP_SIZE_CPU\n"); return res; } long HEAP_SIZE = 8589934608; // 4294967304; // this is for GPU int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1) { long int diff = (t2->tv_usec + 1000000 * t2->tv_sec) - (t1->tv_usec + 1000000 * t1->tv_sec); result->tv_sec = diff / 1000000; result->tv_usec = diff % 1000000; return (diff < 0); } #define CUDA_CALL(f) { \ cudaError_t err = (f); \ if (err != cudaSuccess) { \ fprintf(stderr, "CUDA error occurred: %s (%s:%d)\n", \ cudaGetErrorString(err), __FILE__, __LINE__); \ exit(err); \ } \ } #define CUBLAS_CALL(f) { \ cublasStatus_t stat = (f); \ if (stat != CUBLAS_STATUS_SUCCESS) { \ fprintf(stderr, "cuBLAS error occurred: %d (%s:%d)\n", \ stat, __FILE__, __LINE__); \ exit(stat); \ } \ } void *gpuMallocBase; void *gpuMallocAddr; // Alignment boundary size, in bytes. constexpr int N = 4; // 16 void *myGpuMalloc(size_t bytes) { bytes = ((bytes + (1 << N) - 1) >> N) << N; void *res = gpuMallocAddr; gpuMallocAddr = (void *)((char *)gpuMallocAddr + bytes); if ((long)gpuMallocAddr >= (long)gpuMallocBase + HEAP_SIZE) fprintf(stderr, "GPU breached memory limit of HEAP_SIZE\n"); return res; } void myGpuFree(size_t bytes) { bytes = ((bytes + (1 << N) - 1) >> N) << N; gpuMallocAddr = (void *)((char *)gpuMallocAddr - bytes); cudaMemset((void*)gpuMallocAddr, 0, bytes); return; } template <typename T> __global__ void arrayUpdate(T *data, int index, T value) { data[index] = value; } __global__ void arrayFill(float* data, float value, int size) { int stride = gridDim.x * blockDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < size; i += stride) data[i] = value; } __global__ void hardTanh(float* in, float* out, float min_val, float max_val, int size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < size; i += stride) { out[i] = in[i] < min_val ? min_val : (in[i] > max_val ? max_val : in[i]); } } __global__ void hardTanh_grad(float* in_x, float* in_d, float* out_d, float min_val, float max_val, int size, bool inplace) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < size; i += stride) { if (inplace) { if (in_x[i] < min_val || in_x[i] > max_val) in_d[i] = 0; } else { if (in_x[i] >= min_val && in_x[i] <= max_val) in_d[i] += out_d[i]; } } } __global__ void nllLoss(float *x, int x_stride, float *y, int* target) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = tid * x_stride + target[tid]; y[tid] = -1 * x[offset]; } __global__ void nllLoss_grad(int x_stride, float *yGrad, int* target, float* xGrad) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = tid * x_stride + target[tid]; xGrad[offset] += -1 * yGrad[tid]; } // only for 4D tensor in and 3D tensor out (TODO: incorrect!) __global__ void sum_optimization(float* in, int inStr0, int inStr1, int inStr2, int inStr3, float* out, int outStr0, int outStr1, int outStr2, int dim, int nElementOut, int dimSize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < nElementOut; i += stride) { int outOff0 = i / outStr0; int outOff1temp = i - outOff0 * outStr0; int outOff1 = outOff1temp / outStr1; int outOff2 = outOff1temp - outOff1 * outStr1; for (int j = 0; j < dimSize; j++) { int inOff; if (dim == 0) inOff = j * inStr0 + outOff0 * inStr1 + outOff1 * inStr2 + outOff2 * inStr3; if (dim == 1) inOff = outOff0 * inStr0 + j * inStr1 + outOff1 * inStr2 + outOff2 * inStr3; if (dim == 2) inOff = outOff0 * inStr0 + outOff1 * inStr1 + j * inStr2 + outOff2 * inStr3; if (dim == 3) inOff = outOff0 * inStr0 + outOff1 * inStr1 + outOff2 * inStr2 + j * inStr3; out[i] += in[inOff]; } } } // only for 4D tensor in and 3D tensor out __global__ void sum_grad(float* in, int inSize0, int inSize1, int inSize2, int inSize3, int nElement, float* out, int outStride0, int outStride1, int outStride2, int dim) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < nElement; i += stride) { int inOff2 = i / inSize3; int inDim3 = i - inOff2 * inSize3; int inOff1 = inOff2 / inSize2; int inDim2 = inOff2 - inOff1 * inSize2; int inDim0 = inOff1 / inSize1; int inDim1 = inOff1 - inDim0 * inSize1; int outOff = 0; if (dim == 0) outOff = inDim1 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2; if (dim == 1) outOff = inDim0 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2; if (dim == 2) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim3 * outStride2; if (dim == 3) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim2 * outStride2; in[i] += out[outOff]; } } //following - https://github.com/torch/cutorch/blob/master/lib/THC/THCTensorMath.cuh#L49 template <int Dims> static inline __device__ int compute(const int outputSizes[Dims], const int outputStrides[Dims], const int dimSize, const int concatDim, int linearIndex) { int offset = 0; #pragma unroll for (int i = Dims - 1; i >= 1; --i) { int curDimSize = i == concatDim? dimSize : outputSizes[i]; int nextDimIndex = linearIndex / curDimSize; int curDimIndex = linearIndex - curDimSize * nextDimIndex; int curDimOffset = curDimIndex * outputStrides[i]; offset += curDimOffset; linearIndex = nextDimIndex; } return offset + linearIndex * outputStrides[0]; } // TODO: Only for Dim of rank 4, and only for 2 inputs __global__ void concat2D_1D_greg(float* in1, int dimSize1, int nElement1, float* in2, int dimSize2, int nElement2, float* out, int concatDim, int outSize0, int outSize1, int outSize2, int outSize3, int outStride0, int outStride1, int outStride2, int outStride3) { int outSizes[] = {outSize0, outSize1, outSize2, outSize3}; int outStrides[] = {outStride0, outStride1, outStride2, outStride3}; int tid = blockIdx.x * blockDim.x + threadIdx.x; int nElement = blockIdx.y == 0 ? nElement1 : nElement2; if (tid >= nElement) return; float* data = blockIdx.y == 0 ? in1 : in2; int offset = blockIdx.y == 0 ? 0 : dimSize1; int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2; int dataOffset = offset * outStrides[concatDim]; int stride = gridDim.x * blockDim.x; for (; tid < nElement; tid += stride) { int elementOffset = compute<4>(outSizes, //0, outSize1, outSize2, outSize3, outStrides, //0, outStride1, outStride2, outStride3, dimSize, concatDim, tid); out[dataOffset + elementOffset] = data[tid]; } } // TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1 __global__ void concat2D_1D_greg_grad(float* in1, int dimSize1, int nElement1, float* in2, int dimSize2, int nElement2, float* out, int concatDim, int outSize0, int outSize1, int outSize2, int outSize3, int outStride0, int outStride1, int outStride2, int outStride3) { int outSizes[] = {outSize0, outSize1, outSize2, outSize3}; int outStrides[] = {outStride0, outStride1, outStride2, outStride3}; int tid = blockIdx.x * blockDim.x + threadIdx.x; int nElement = blockIdx.y == 0 ? nElement1 : nElement2; if (tid >= nElement) return; float* data = blockIdx.y == 0 ? in1 : in2; int offset = blockIdx.y == 0 ? 0 : dimSize1; int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2; int dataOffset = offset * outStride1; int stride = gridDim.x * blockDim.x; for (; tid < nElement; tid += stride) { int elementOffset = compute<4>(outSizes, //0, outSize1, outSize2, outSize3, outStrides, //0, outStride1, outStride2, outStride3, dimSize, concatDim, tid); data[tid] += out[dataOffset + elementOffset]; } } __global__ void repeat0(float* in, float* out, int outStride0, int outStride1, int outScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < outScalarCount; tid += stride) { int linearIndex = tid; int outIndex0 = linearIndex / outStride0; linearIndex = linearIndex - outIndex0 * outStride0; int outIndex1 = linearIndex / outStride1; int outIndex2 = linearIndex - outIndex1 * outStride1; int inIndex = outIndex2 + (outIndex0 + outIndex1) * outStride1; out[tid] = in[inIndex]; } } __global__ void shift0(float* in, float* out, int inDim0, int inStride0, int inStride1, int inScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < inScalarCount; tid += stride) { int linearIndex = tid; int inIndex0 = linearIndex / inStride0; linearIndex = linearIndex - inIndex0 * inStride0; int inIndex1 = linearIndex / inStride1; if (inIndex0 + inIndex1 >= inDim0) return; out[tid + inIndex1 * inStride0] = in[tid]; } } __global__ void adagrad_update_1D_1D(float* x, float* d, float* m, float clip, float lr, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) { if (d[tid] > clip) d[tid] = clip; if (d[tid] < -clip) d[tid] = -clip; m[tid] += d[tid] * d[tid]; x[tid] -= lr * d[tid] / sqrt(m[tid] + 0.00000001); d[tid] = 0; } } __global__ void momentum_update_1D_1D(float* x, float* d, float* m, float learning_rate, float momentum, float gradClip, bool nesterov, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) { float temp = d[tid]; if (temp > gradClip) temp = gradClip; if (temp < -gradClip) temp = -gradClip; m[tid] *= momentum; m[tid] += temp; if (nesterov) { temp += momentum * m[tid]; } else { temp = m[tid]; } x[tid] -= learning_rate * temp; d[tid] = 0; } } __global__ void addScalarInArrayInPlace(float* in, float* add, float scale, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in[tid] += add[0] * scale; } __global__ void addScalar(float* in, float* out, float add, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] + add; } __global__ void minusScalar(float* in, float* out, float minus, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] - minus; } __global__ void multScalar(float* in, float* out, float mult, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] * mult; } __global__ void divScalar(float* in, float* out, float div, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] / div; } __global__ void elementwise_1D_1D_mul(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] * in2[tid]; } __global__ void elementwise_1D_1D_mul_mutate(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] += in1[tid] * in2[tid]; } __global__ void elementwise_1D_1D_add(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] + in2[tid]; } __global__ void elementwise_1D_1D_minus(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] - in2[tid]; } __global__ void elementwise_1D_1D_div(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] / in2[tid]; } __global__ void elementwise_1D_1D_exp(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = exp(in[tid]); } __global__ void elementwise_1D_1D_log(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = log(in[tid]); } __global__ void elementwise_1D_1D_sqrt(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = sqrt(in[tid]); } __global__ void elementwise_1D_1D_square(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] * in[tid]; } __global__ void elementwise_1D_1D_exp_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] * out_x[tid]; } __global__ void elementwise_1D_1D_log_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] / in_x[tid]; } __global__ void elementwise_1D_1D_sqrt_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] / out_x[tid] / 2; } __global__ void elementwise_1D_1D_square_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] * 2 * in_x[tid]; } __global__ void clipAt(float* in, float bound, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) { if (in[tid] > bound) in[tid] = bound; if (in[tid] < -bound) in[tid] = -bound; } } __global__ void mask4D(float* in, int* mask, int xstrides0, int xstrides1, int xstrides2, int xstrides3, int scalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < scalarCount; tid += stride) { int linearIndex = tid; int xindex0 = linearIndex / xstrides0; linearIndex = linearIndex - xstrides0 * xindex0; int xindex1 = linearIndex / xstrides1; linearIndex = linearIndex - xstrides1 * xindex1; int xindex2 = linearIndex / xstrides2; int xindex3 = linearIndex - xstrides2 * xindex2; if (xindex3 >= mask[xindex0]) in[tid] = 0; } } __global__ void mul_sub(float* in1, float* in2, float* out, int in1ScalarCount, int in2ScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < in1ScalarCount; tid += stride) { out[tid] = in1[tid] * in2[tid % in2ScalarCount]; } } __global__ void mul_sub_grad(float* in1_x, float* in1_d, float* in2_x, float* in2_d, float* out, int in1ScalarCount, int in2ScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < in1ScalarCount; tid += stride) { int index = tid % in2ScalarCount; in1_d[tid] += out[tid] * in2_x[index]; in2_d[tid] = in1_x[tid] * out[tid]; // this is the temp array, need to be reduced! } } #define CUDNN_CALL(f) { \ cudnnStatus_t stat = (f); \ if (stat != CUDNN_STATUS_SUCCESS) { \ fprintf(stderr, "cuDNN error occurred: %d (%s:%d)\n", \ stat, __FILE__, __LINE__); \ exit(stat); \ } \ } void Snippet(char *); std::random_device rd{}; std::mt19937 gen{rd()}; std::normal_distribution<> d{0, 0.01}; int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: query <filename>\n"); return 0; } Snippet(argv[1]); return 0; } /***************************************** Emitting C Generated Code *******************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> void Snippet(char* x0) { // Backend setup. cublasHandle_t cublasHandle; CUBLAS_CALL(cublasCreate(&cublasHandle)); CUDA_CALL(cudaMalloc(&gpuMallocBase, HEAP_SIZE)); CUDA_CALL(cudaMemset(gpuMallocBase, 0, HEAP_SIZE)); gpuMallocAddr = gpuMallocBase; cudnnHandle_t cudnnHandle; CUDNN_CALL(cudnnCreate(&cudnnHandle)); srand(42); struct timeval begin_0, end_0, diff_0; gettimeofday(&begin_0, NULL); float* x7 = (float*)myMalloc(14432 * sizeof(float));; for(int x9=0; x9 < 14432; x9++) { float x10 = (float)rand()/RAND_MAX; float x11 = x10 - 0.5f; float x12 = x11 * 0.23068394f; x7[x9] = x12; } // Tensor 'toGPU' invocation. float* x17 = (float*)myGpuMalloc(14432 * sizeof(float)); CUDA_CALL(cudaMemcpy(x17, x7, 14432 * sizeof(float), cudaMemcpyHostToDevice)); float* x19 = (float*)myGpuMalloc(14432 * sizeof(float)); float* x20 = (float*)myGpuMalloc(32 * sizeof(float)); arrayFill<<<28, 512>>>(x20, 1.0f, 32); float* x22 = (float*)myGpuMalloc(32 * sizeof(float)); float* x23 = (float*)myGpuMalloc(32 * sizeof(float)); float* x24 = (float*)myGpuMalloc(32 * sizeof(float)); float* x25 = (float*)myGpuMalloc(32 * sizeof(float)); float* x26 = (float*)myGpuMalloc(32 * sizeof(float)); float* x27 = (float*)myMalloc(236544 * sizeof(float));; for(int x29=0; x29 < 236544; x29++) { float x30 = (float)rand()/RAND_MAX; float x31 = x30 - 0.5f; float x32 = x31 * 0.05698029f; x27[x29] = x32; } // Tensor 'toGPU' invocation. float* x37 = (float*)myGpuMalloc(236544 * sizeof(float)); CUDA_CALL(cudaMemcpy(x37, x27, 236544 * sizeof(float), cudaMemcpyHostToDevice)); float* x39 = (float*)myGpuMalloc(236544 * sizeof(float)); float* x40 = (float*)myGpuMalloc(32 * sizeof(float)); arrayFill<<<28, 512>>>(x40, 1.0f, 32); float* x42 = (float*)myGpuMalloc(32 * sizeof(float)); float* x43 = (float*)myGpuMalloc(32 * sizeof(float)); float* x44 = (float*)myGpuMalloc(32 * sizeof(float)); float* x45 = (float*)myGpuMalloc(32 * sizeof(float)); float* x46 = (float*)myGpuMalloc(32 * sizeof(float)); printf("initial rnn input size is %d \n",672); float* x48 = (float*)myMalloc(3477504 * sizeof(float));; for(int x50=0; x50 < 3477504; x50++) { float x51 = (float)rand()/RAND_MAX; float x52 = x51 - 0.5f; float x53 = x52 * 0.01f; x48[x50] = x53; } // Tensor 'toGPU' invocation. float* x58 = (float*)myGpuMalloc(3477504 * sizeof(float)); CUDA_CALL(cudaMemcpy(x58, x48, 3477504 * sizeof(float), cudaMemcpyHostToDevice)); float* x60 = (float*)myGpuMalloc(3477504 * sizeof(float)); int32_t x61 = 0; int32_t x62 = x61; float* x63 = x58+x62; float* x64 = x60+x62; x61 += 688128; int32_t x66 = x61; float* x67 = x58+x66; float* x68 = x60+x66; x61 += 1048576; int32_t x70 = x61; float* x71 = x58+x70; float* x72 = x60+x70; x61 += 1024; int32_t x74 = x61; float* x75 = x58+x74; float* x76 = x60+x74; x61 += 1024; int32_t x78 = x61; float* x79 = x58+x78; float* x80 = x60+x78; x61 += 688128; int32_t x82 = x61; float* x83 = x58+x82; float* x84 = x60+x82; x61 += 1048576; int32_t x86 = x61; float* x87 = x58+x86; float* x88 = x60+x86; x61 += 1024; int32_t x90 = x61; float* x91 = x58+x90; float* x92 = x60+x90; x61 += 1024; float* x94 = (float*)myMalloc(4198400 * sizeof(float));; for(int x96=0; x96 < 4198400; x96++) { float x97 = (float)rand()/RAND_MAX; float x98 = x97 - 0.5f; float x99 = x98 * 0.01f; x94[x96] = x99; } // Tensor 'toGPU' invocation. float* x104 = (float*)myGpuMalloc(4198400 * sizeof(float)); CUDA_CALL(cudaMemcpy(x104, x94, 4198400 * sizeof(float), cudaMemcpyHostToDevice)); float* x106 = (float*)myGpuMalloc(4198400 * sizeof(float)); int32_t x107 = 0; int32_t x108 = x107; float* x109 = x104+x108; float* x110 = x106+x108; x107 += 1048576; int32_t x112 = x107; float* x113 = x104+x112; float* x114 = x106+x112; x107 += 1048576; int32_t x116 = x107; float* x117 = x104+x116; float* x118 = x106+x116; x107 += 1024; int32_t x120 = x107; float* x121 = x104+x120; float* x122 = x106+x120; x107 += 1024; int32_t x124 = x107; float* x125 = x104+x124; float* x126 = x106+x124; x107 += 1048576; int32_t x128 = x107; float* x129 = x104+x128; float* x130 = x106+x128; x107 += 1048576; int32_t x132 = x107; float* x133 = x104+x132; float* x134 = x106+x132; x107 += 1024; int32_t x136 = x107; float* x137 = x104+x136; float* x138 = x106+x136; x107 += 1024; float* x140 = (float*)myMalloc(4198400 * sizeof(float));; for(int x141=0; x141 < 4198400; x141++) { float x142 = (float)rand()/RAND_MAX; float x143 = x142 - 0.5f; float x144 = x143 * 0.01f; x140[x141] = x144; } // Tensor 'toGPU' invocation. float* x149 = (float*)myGpuMalloc(4198400 * sizeof(float)); CUDA_CALL(cudaMemcpy(x149, x140, 4198400 * sizeof(float), cudaMemcpyHostToDevice)); float* x151 = (float*)myGpuMalloc(4198400 * sizeof(float)); int32_t x152 = 0; int32_t x153 = x152; float* x154 = x149+x153; float* x155 = x151+x153; x152 += 1048576; int32_t x157 = x152; float* x158 = x149+x157; float* x159 = x151+x157; x152 += 1048576; int32_t x161 = x152; float* x162 = x149+x161; float* x163 = x151+x161; x152 += 1024; int32_t x165 = x152; float* x166 = x149+x165; float* x167 = x151+x165; x152 += 1024; int32_t x169 = x152; float* x170 = x149+x169; float* x171 = x151+x169; x152 += 1048576; int32_t x173 = x152; float* x174 = x149+x173; float* x175 = x151+x173; x152 += 1048576; int32_t x177 = x152; float* x178 = x149+x177; float* x179 = x151+x177; x152 += 1024; int32_t x181 = x152; float* x182 = x149+x181; float* x183 = x151+x181; x152 += 1024; float* x185 = (float*)myGpuMalloc(1024 * sizeof(float)); arrayFill<<<28, 512>>>(x185, 1.0f, 1024); float* x187 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x188 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x189 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x190 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x191 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x192 = (float*)myMalloc(29696 * sizeof(float));; for(int x194=0; x194 < 29696; x194++) { float x195 = (float)rand()/RAND_MAX; float x196 = x195 - 0.5f; float x197 = x196 * 0.03125f; x192[x194] = x197; } // Tensor 'toGPU' invocation. float* x202 = (float*)myGpuMalloc(29696 * sizeof(float)); CUDA_CALL(cudaMemcpy(x202, x192, 29696 * sizeof(float), cudaMemcpyHostToDevice)); float* x204 = (float*)myGpuMalloc(29696 * sizeof(float)); float* x205 = (float*)myGpuMalloc(14432 * sizeof(float)); float* x206 = (float*)myGpuMalloc(236544 * sizeof(float)); float* x207 = (float*)myGpuMalloc(32 * sizeof(float)); float* x208 = (float*)myGpuMalloc(32 * sizeof(float)); float* x209 = (float*)myGpuMalloc(32 * sizeof(float)); float* x210 = (float*)myGpuMalloc(32 * sizeof(float)); float* x211 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x212 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x213 = (float*)myGpuMalloc(29696 * sizeof(float)); float* x214 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x215 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x216 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x217 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x218 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x219 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x220 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x221 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x222 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x223 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x224 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x225 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x226 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x227 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x228 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x229 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x230 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x231 = (float*)myGpuMalloc(688128 * sizeof(float)); float* x232 = (float*)myGpuMalloc(688128 * sizeof(float)); float* x233 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x234 = (float*)myGpuMalloc(1048576 * sizeof(float)); float* x235 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x236 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x237 = (float*)myGpuMalloc(1048576 * sizeof(float)); int32_t x238 = open("/scratch-ml00/wang603/deepspeechData/deepspeech_train.bin",0); int64_t x239 = fsize(x238); printf("file size is %ld\n",x239); char* x241 = (char*)mmap(0, x239, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x238, 0); int64_t x242 = (long)x241; int64_t x243 = x242; int64_t x244 = x243; int* x245 = (int32_t*) x244; int64_t x246 = (int64_t)4; x243 += x246; int32_t x248 = x245[0]; // int64_t x249 = x243; // int* x250 = (int32_t*) x249; x243 += x246; // int32_t x252 = x250[0]; printf("data size is %d batches, %d batch size\n",200,x248); int* x255 = (int32_t*)myMalloc(200 * sizeof(int32_t));; int* x256 = (int32_t*)myMalloc(200 * sizeof(int32_t));; float** x257 = (float**)myMalloc(200 * sizeof(float*));; float** x258 = (float**)myMalloc(200 * sizeof(float*));; int** x259 = (int**)myMalloc(200 * sizeof(int*));; int** x260 = (int**)myMalloc(200 * sizeof(int*));; // load data by batchs int32_t x286 = 4 * x248; int64_t x287 = (int64_t)x286; for(int x263=0; x263 < 200; x263++) { int64_t x264 = x243; int* x265 = (int32_t*) x264; x243 += x246; int32_t x267 = x265[0]; x255[x263] = x267; int64_t x269 = x243; int* x270 = (int32_t*) x269; x243 += x246; int32_t x272 = x270[0]; x256[x263] = x272; int32_t x274 = x255[x263]; int32_t x276 = x256[x263]; int64_t x278 = x243; float* x279 = (float*) x278; int32_t x275 = x248 * x274; int32_t x277 = x275 * x276; int32_t x280 = 4 * x277; int64_t x281 = (int64_t)x280; x243 += x281; x257[x263] = x279; int64_t x284 = x243; float* x285 = (float*) x284; x243 += x287; x258[x263] = x285; int64_t x290 = x243; int* x291 = (int32_t*) x290; x243 += x287; x259[x263] = x291; int* x294 = x259[x263]; int* x295 = x259[x263]; int32_t x296 = accumulate(x294, x295 + x248, 0); int64_t x297 = x243; int* x298 = (int32_t*) x297; int32_t x299 = 4 * x296; int64_t x300 = (int64_t)x299; x243 += x300; x260[x263] = x298; } gettimeofday(&end_0, NULL); timeval_subtract(&diff_0, &end_0, &begin_0);; int64_t x307 = ((diff_0.tv_sec * 1000000L) + (diff_0.tv_usec)); float x308 = (float)x307; float x309 = x308 / 1000000.0f; printf("Data reading (all prepare time) in %lf sec\n",x309); double* x311 = (double*)myMalloc(1 * sizeof(double));; double* x312 = (double*)myMalloc(1 * sizeof(double));; // training loop starts here int32_t x358 = x248 * 32; int32_t x450 = 2048 / 2; int32_t x454 = x248 * x450; int32_t x451 = 2 * x450; int32_t x452 = x248 * x451; int32_t x657 = x248 * 20; int32_t x253 = x248 * 200; double x662 = (double)x253; int64_t x685 = (int64_t)x253; float x692 = (float)x253; for(int x317=0; x317 < 1; x317++) { struct timeval begin_1, end_1, diff_1; int32_t x319 = 0; int32_t x320 = x319; int32_t x321 = x320; float x322 = 0.0f; float x323 = x322; float x324 = x323; int32_t x325 = x317 + 1; printf("Start training epoch %d\n",x325); // RNN descriptors refactored size_t dropoutStateSize_4; CUDNN_CALL(cudnnDropoutGetStatesSize(cudnnHandle, &dropoutStateSize_4)); void* dropoutStates_4 = NULL; cudnnDropoutDescriptor_t dropout_desc_4; CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_4)); CUDNN_CALL(cudnnSetDropoutDescriptor( dropout_desc_4, cudnnHandle, 0.0, dropoutStates_4, dropoutStateSize_4, time(NULL))); cudnnRNNDescriptor_t rnn_desc_4; CUDNN_CALL(cudnnCreateRNNDescriptor(&rnn_desc_4)); CUDNN_CALL(cudnnSetRNNDescriptor( cudnnHandle, rnn_desc_4, /*hiddenSize*/ 1024, /*numLayers*/ 1, dropout_desc_4, CUDNN_LINEAR_INPUT, CUDNN_BIDIRECTIONAL, CUDNN_RNN_RELU, CUDNN_RNN_ALGO_STANDARD, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetRNNMatrixMathType(rnn_desc_4, CUDNN_TENSOR_OP_MATH)); int batchSize_4 = 32; int inputSize_4 = 672; int hiddenSize_4 = 1024; cudnnTensorDescriptor_t x_desc_4; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_4)); int x_dims_4[] = {batchSize_4, inputSize_4, 1}; int x_strides_4[] = {x_dims_4[1] * x_dims_4[2], x_dims_4[2], 1}; CUDNN_CALL(cudnnSetTensorNdDescriptor( x_desc_4, CUDNN_DATA_FLOAT, /*nbDims*/ 3, x_dims_4, x_strides_4)); size_t paramsSize_4; CUDNN_CALL(cudnnGetRNNParamsSize( cudnnHandle, rnn_desc_4, x_desc_4, &paramsSize_4, CUDNN_DATA_FLOAT)); //#ifdef DEBUG // assert(paramsSize_4 / sizeof(float) == 3477504 && "Expected parameter size mismatch"); //#endif cudnnFilterDescriptor_t w_desc_4; CUDNN_CALL(cudnnCreateFilterDescriptor(&w_desc_4)); int w_dims_4[] = {int(paramsSize_4 / sizeof(float)), 1, 1}; CUDNN_CALL(cudnnSetFilterNdDescriptor( w_desc_4, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, /*nbDims*/ 3, w_dims_4)); cudnnTensorDescriptor_t x_desc_5; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_5)); int x_dims_5[] = {batchSize_4, hiddenSize_4, 1}; int x_strides_5[] = {x_dims_5[1] * x_dims_5[2], x_dims_5[2], 1}; CUDNN_CALL(cudnnSetTensorNdDescriptor( x_desc_5, CUDNN_DATA_FLOAT, /*nbDims*/ 3, x_dims_5, x_strides_5)); size_t paramsSize_5; CUDNN_CALL(cudnnGetRNNParamsSize( cudnnHandle, rnn_desc_4, x_desc_5, &paramsSize_5, CUDNN_DATA_FLOAT)); //#ifdef DEBUG // assert(paramsSize_5 / sizeof(float) == 4198400 && "Expected parameter size mismatch"); //#endif cudnnFilterDescriptor_t w_desc_5; CUDNN_CALL(cudnnCreateFilterDescriptor(&w_desc_5)); int w_dims_5[] = {int(paramsSize_5 / sizeof(float)), 1, 1}; CUDNN_CALL(cudnnSetFilterNdDescriptor( w_desc_5, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, /*nbDims*/ 3, w_dims_5)); cudnnTensorDescriptor_t hx_desc_4; CUDNN_CALL(cudnnCreateTensorDescriptor(&hx_desc_4)); int hx_dims_4[] = {2, batchSize_4, 1024}; int hx_strides_4[] = {hx_dims_4[1] * hx_dims_4[2], hx_dims_4[2], 1}; CUDNN_CALL(cudnnSetTensorNdDescriptor( hx_desc_4, CUDNN_DATA_FLOAT, /*nbDims*/ 3, hx_dims_4, hx_strides_4)); cudnnTensorDescriptor_t y_desc_4; CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_4)); int y_dims_4[] = {batchSize_4, 2048, 1}; int y_strides_4[] = {y_dims_4[1] * y_dims_4[2], y_dims_4[2], 1}; CUDNN_CALL(cudnnSetTensorNdDescriptor( y_desc_4, CUDNN_DATA_FLOAT, /*nbDims*/ 3, y_dims_4, y_strides_4)); int seqLength_4 = 630; cudnnTensorDescriptor_t x_descs_4[seqLength_4]; for (int i = 0; i < seqLength_4; i++) { x_descs_4[i] = x_desc_4; } cudnnTensorDescriptor_t y_descs_4[seqLength_4]; for (int i = 0; i < seqLength_4; i++) { y_descs_4[i] = y_desc_4; } cudnnTensorDescriptor_t x_descs_5[seqLength_4]; for (int i = 0; i < seqLength_4; i++) { x_descs_5[i] = x_desc_5; } cudnnTensorDescriptor_t y_descs_5[seqLength_4]; for (int i = 0; i < seqLength_4; i++) { y_descs_5[i] = y_desc_4; } size_t workspaceSize_x; CUDNN_CALL(cudnnGetRNNWorkspaceSize( cudnnHandle, rnn_desc_4, seqLength_4, x_descs_5, &workspaceSize_x)); void* workspace_x = myGpuMalloc(workspaceSize_x); size_t reserveSize_x1; CUDNN_CALL(cudnnGetRNNTrainingReserveSize( cudnnHandle, rnn_desc_4, seqLength_4, x_descs_4, &reserveSize_x1)); void* reserveSpace_x1 = myGpuMalloc(reserveSize_x1); size_t reserveSize_x2; CUDNN_CALL(cudnnGetRNNTrainingReserveSize( cudnnHandle, rnn_desc_4, seqLength_4, x_descs_5, &reserveSize_x2)); void* reserveSpace_x2 = myGpuMalloc(reserveSize_x2); size_t reserveSize_x3; CUDNN_CALL(cudnnGetRNNTrainingReserveSize( cudnnHandle, rnn_desc_4, seqLength_4, x_descs_5, &reserveSize_x3)); void* reserveSpace_x3 = myGpuMalloc(reserveSize_x3); // CNN/batchNorm descriptor refactored cudnnTensorDescriptor_t in_desc_0; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc_0)); cudnnFilterDescriptor_t filt_desc_0; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc_0)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc_0, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 32, 1, 41, 11)); cudnnTensorDescriptor_t out_desc_0; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_0)); cudnnConvolutionDescriptor_t conv_desc_0; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc_0)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc_0, 0, 0, 2, 2, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc_0, CUDNN_TENSOR_OP_MATH));; cudnnTensorDescriptor_t in_desc_1; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc_1)); cudnnTensorDescriptor_t out_desc_1; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_1)); cudnnTensorDescriptor_t sbmv_desc_1; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc_1)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc_1, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 32, 1, 1)); cudnnTensorDescriptor_t in_desc_2; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc_2)); cudnnFilterDescriptor_t filt_desc_2; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc_2)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc_2, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 32, 32, 21, 11)); cudnnTensorDescriptor_t out_desc_2; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_2)); cudnnConvolutionDescriptor_t conv_desc_2; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc_2)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc_2, 0, 0, 2, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc_2, CUDNN_TENSOR_OP_MATH));; cudnnTensorDescriptor_t in_desc_3; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc_3)); cudnnTensorDescriptor_t out_desc_3; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_3)); cudnnTensorDescriptor_t sbmv_desc_3; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc_3)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc_3, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 32, 1, 1)); cudnnTensorDescriptor_t in_desc_7; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc_7)); cudnnTensorDescriptor_t sbmv_desc_7; CUDNN_CALL(cudnnCreateTensorDescriptor(&sbmv_desc_7)); CUDNN_CALL(cudnnSetTensor4dDescriptor( sbmv_desc_7, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1024, 1, 1)); // Other Workspace. size_t ws_size_1 = 234248; void *ws_data_1 = myGpuMalloc(ws_size_1); cudnnTensorDescriptor_t in_desc_trans; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc_trans)); cudnnTensorDescriptor_t out_desc_trans; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_trans)); cudnnTensorDescriptor_t x_desc_soft; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_soft)); cudnnTensorDescriptor_t probs_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&probs_desc)); cudnnCTCLossDescriptor_t ctc_desc; CUDNN_CALL(cudnnCreateCTCLossDescriptor(&ctc_desc)); CUDNN_CALL(cudnnSetCTCLossDescriptor(ctc_desc, CUDNN_DATA_FLOAT)); size_t wsSizeCTC = 70129408; void *wsCTC = myGpuMalloc(wsSizeCTC); cudnnTensorDescriptor_t x_desc_red; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_red)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc_red, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 32, 1, 1, 1)); cudnnTensorDescriptor_t out_desc_red; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_red)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc_red, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 1, 1, 1)); cudnnReduceTensorDescriptor_t reduce_desc; CUDNN_CALL(cudnnCreateReduceTensorDescriptor(&reduce_desc)); CUDNN_CALL(cudnnSetReduceTensorDescriptor( reduce_desc, CUDNN_REDUCE_TENSOR_AVG, CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN, CUDNN_REDUCE_TENSOR_NO_INDICES, CUDNN_32BIT_INDICES)); size_t ws_size_red = 128; void *ws_data_red = myGpuMalloc(ws_size_red); float* x362 = (float*)myMalloc(1 * sizeof(float));; x362[0] = 0.0f; float* x364 = (float*)myMalloc(1 * sizeof(float));; x364[0] = 1.0f; int64_t x313 = (long)mallocAddr; int64_t x314 = (long)gpuMallocAddr; gettimeofday(&begin_1, NULL); // loop for one epoch for(int x328=0; x328 < 200; x328++) { int32_t x329 = x256[x328]; int32_t x330 = x255[x328]; float* x331 = x257[x328]; float* x334 = x258[x328]; int* x335 = x260[x328]; int* x336 = x259[x328]; x321 += x248; // Tensor 'toGPU' invocation. int32_t x332 = x330 * x329; int32_t x333 = x248 * x332; float* x339 = (float*)myGpuMalloc(x333 * sizeof(float)); CUDA_CALL(cudaMemcpyAsync(x339, x331, x333 * sizeof(float), cudaMemcpyHostToDevice)); float* x341 = (float*)myGpuMalloc(2 * sizeof(float)); float* x342 = (float*)myGpuMalloc(1 * sizeof(float)); float* x343 = (float*)myGpuMalloc(1 * sizeof(float)); // allocate memory to save the final loss in CPU Tensor float* x345 = (float*)myGpuMalloc(1 * sizeof(float)); int32_t x352 = x329 - 11; int32_t x353 = x352 / 2; int32_t x354 = x353 + 1; int32_t x349 = x330 - 41; int32_t x350 = x349 / 2; int32_t x351 = x350 + 1; int32_t x359 = x358 * x351; int32_t x360 = x359 * x354; float* x361 = (float*)myGpuMalloc(x360 * sizeof(float)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc_0, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 1, x330, x329)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc_0, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 32, x351, x354)); CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x364, in_desc_0, x339, filt_desc_0, x17, conv_desc_0, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM, ws_data_1, ws_size_1, x362, out_desc_0, x361)); float* x368 = (float*)myGpuMalloc(x360 * sizeof(float)); int32_t x355 = x351 * x354; int32_t x356 = 32 * x355; int32_t x357 = x248 * x356; float* x369 = (float*)myGpuMalloc(x357 * sizeof(float)); float* x370 = (float*)myGpuMalloc(32 * sizeof(float)); float* x371 = (float*)myGpuMalloc(32 * sizeof(float)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc_1, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 32, x351, x354)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc_1, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 32, x351, x354)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x364, x362, in_desc_1, x361, out_desc_1, x369, sbmv_desc_1, x20, x23, 0.1, x25, x26, 1.0E-5, x370, x371)); float* x378 = (float*)myGpuMalloc(x360 * sizeof(float)); hardTanh<<<28, 512>>>(x369, x369, 0.0, 20.0, true); int32_t x386 = x354 - 11; int32_t x387 = x386 / 1; int32_t x388 = x387 + 1; int32_t x383 = x351 - 21; int32_t x384 = x383 / 2; int32_t x385 = x384 + 1; int32_t x392 = x358 * x385; int32_t x393 = x392 * x388; float* x394 = (float*)myGpuMalloc(x393 * sizeof(float)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc_2, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 32, x351, x354)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc_2, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 32, x385, x388)); CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x364, in_desc_2, x369, filt_desc_2, x37, conv_desc_2, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM, ws_data_1, ws_size_1, x362, out_desc_2, x394)); float* x401 = (float*)myGpuMalloc(x393 * sizeof(float)); int32_t x389 = x385 * x388; int32_t x390 = 32 * x389; int32_t x391 = x248 * x390; float* x402 = (float*)myGpuMalloc(x391 * sizeof(float)); float* x403 = (float*)myGpuMalloc(32 * sizeof(float)); float* x404 = (float*)myGpuMalloc(32 * sizeof(float)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc_3, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 32, x385, x388)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc_3, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x248, 32, x385, x388)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x364, x362, in_desc_3, x394, out_desc_3, x402, sbmv_desc_3, x40, x43, 0.1, x45, x46, 1.0E-5, x403, x404)); float* x411 = (float*)myGpuMalloc(x393 * sizeof(float)); hardTanh<<<28, 512>>>(x402, x402, 0.0, 20.0, true); // after conv ops int32_t x414 = 32 * x385; int32_t x415 = x414 * x388; int32_t x416 = x248 * x415; float* x417 = (float*)myGpuMalloc(x416 * sizeof(float)); int* x420 = (int32_t*)myMalloc(4 * sizeof(int32_t));; int32_t x418 = x248 * x414; x420[2] = x418; x420[0] = x414; x420[1] = 1; x420[3] = 1; int32_t x429 = x420[0]; int32_t x430 = x420[1]; int32_t x431 = x420[2]; int32_t x432 = x420[3]; CUDNN_CALL(cudnnSetTensor4dDescriptorEx( in_desc_trans, CUDNN_DATA_FLOAT, x248, x414, x388, 1, x415, x388, 1, 1)); CUDNN_CALL(cudnnSetTensor4dDescriptorEx( out_desc_trans, CUDNN_DATA_FLOAT, x248, x414, x388, 1, x429, x430, x431, x432)); CUDNN_CALL(cudnnTransformTensor( cudnnHandle, x364, in_desc_trans, x402, x362, out_desc_trans, x417)); int32_t x434 = x388 * x248; int32_t x435 = x434 * x414; float* x436 = (float*)myGpuMalloc(x435 * sizeof(float)); // after resize and permute float* x438 = (float*)NULL; float* x439 = (float*)NULL; float* x440 = (float*)NULL; int32_t x443 = x434 * 2048; float* x444 = (float*)myGpuMalloc(x443 * sizeof(float)); int32_t seqLength_4 = x388; // int32_t batchSize_4 = x248; // int32_t inputSize_4 = x414; CUDNN_CALL(cudnnRNNForwardTraining( cudnnHandle, rnn_desc_4, seqLength_4, x_descs_4, x417, hx_desc_4, x438, hx_desc_4, x439, w_desc_4, x58, y_descs_4, x444, hx_desc_4, x440, hx_desc_4, NULL, workspace_x, workspaceSize_x, reserveSpace_x1, reserveSize_x1)); float* x449 = (float*)myGpuMalloc(x443 * sizeof(float)); int32_t x455 = x388 * x454; float* x456 = (float*)myGpuMalloc(x455 * sizeof(float)); // optimization for dimension sum if size is small int32_t x458 = x434 * x450; sum_optimization<<<28, 512>>>(x444, x452, x451, x450, 1, x456, x454, x450, 1, 2, x458, 2); float* x460 = (float*)myGpuMalloc(x458 * sizeof(float)); float* x461 = (float*)NULL; float* x462 = (float*)NULL; float* x463 = (float*)NULL; float* x464 = (float*)myGpuMalloc(x443 * sizeof(float)); int32_t seqLength_5 = x388; // int32_t batchSize_5 = x248; // int32_t inputSize_5 = x450; CUDNN_CALL(cudnnRNNForwardTraining( cudnnHandle, rnn_desc_4, seqLength_5, x_descs_5, x456, hx_desc_4, x461, hx_desc_4, x462, w_desc_5, x104, y_descs_5, x464, hx_desc_4, x463, hx_desc_4, NULL, workspace_x, workspaceSize_x, reserveSpace_x2, reserveSize_x2)); float* x469 = (float*)myGpuMalloc(x443 * sizeof(float)); float* x470 = (float*)myGpuMalloc(x455 * sizeof(float)); // optimization for dimension sum if size is small sum_optimization<<<28, 512>>>(x464, x452, x451, x450, 1, x470, x454, x450, 1, 2, x458, 2); float* x473 = (float*)myGpuMalloc(x458 * sizeof(float)); float* x474 = (float*)NULL; float* x475 = (float*)NULL; float* x476 = (float*)NULL; float* x477 = (float*)myGpuMalloc(x443 * sizeof(float)); int32_t seqLength_6 = x388; // int32_t batchSize_6 = x248; // int32_t inputSize_6 = x450; CUDNN_CALL(cudnnRNNForwardTraining( cudnnHandle, rnn_desc_4, seqLength_6, x_descs_5, x470, hx_desc_4, x474, hx_desc_4, x475, w_desc_5, x149, y_descs_5, x477, hx_desc_4, x476, hx_desc_4, NULL, workspace_x, workspaceSize_x, reserveSpace_x3, reserveSize_x3)); float* x482 = (float*)myGpuMalloc(x443 * sizeof(float)); float* x483 = (float*)myGpuMalloc(x455 * sizeof(float)); // optimization for dimension sum if size is small sum_optimization<<<28, 512>>>(x477, x452, x451, x450, 1, x483, x454, x450, 1, 2, x458, 2); float* x486 = (float*)myGpuMalloc(x458 * sizeof(float)); float* x489 = (float*)myGpuMalloc(x458 * sizeof(float)); float* x490 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x491 = (float*)myGpuMalloc(1024 * sizeof(float)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc_7, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x434, x450, 1, 1)); CUDNN_CALL(cudnnBatchNormalizationForwardTraining( cudnnHandle, CUDNN_BATCHNORM_PER_ACTIVATION, x364, x362, in_desc_7, x483, in_desc_7, x489, sbmv_desc_7, x185, x188, 0.1, x190, x191, 1.0E-5, x490, x491)); float* x498 = (float*)myGpuMalloc(x458 * sizeof(float)); int32_t x499 = x434 * 29; float* x500 = (float*)myGpuMalloc(x499 * sizeof(float)); CUBLAS_CALL(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, 29,x434,1024,x364,x202,29,x489,1024,x362,x500,29)); float* x506 = (float*)myGpuMalloc(x499 * sizeof(float)); float* x513 = (float*)myGpuMalloc(x499 * sizeof(float)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc_soft, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, x434, 29, 1, 1)); CUDNN_CALL(cudnnSoftmaxForward( cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, x364, x_desc_soft, x500, x362, x_desc_soft, x513)); float* x515 = (float*)myGpuMalloc(x499 * sizeof(float)); // before CTC loss int* x517 = (int32_t*)myMalloc(x248 * sizeof(int32_t));; float x521 = (float)x388; for(int x519=0; x519 < x248; x519++) { float x520 = x334[x519]; float x522 = x520 * x521; int32_t x523 = (int)x522; x517[x519] = x523; } float* x528 = (float*)myGpuMalloc(x248 * sizeof(float)); { int probs_dims[] = {x388, x248, 29}; int probs_strides[] = {probs_dims[1] * probs_dims[2], probs_dims[2], 1}; CUDNN_CALL(cudnnSetTensorNdDescriptor( probs_desc, CUDNN_DATA_FLOAT, /*nbDims*/ 3, probs_dims, probs_strides)); CUDNN_CALL(cudnnCTCLoss( cudnnHandle, probs_desc, x513, x335, x336, x517, x528, probs_desc, x515, CUDNN_CTC_LOSS_ALGO_DETERMINISTIC, ctc_desc, wsCTC, wsSizeCTC)); }; float* x530 = (float*)myGpuMalloc(1 * sizeof(float)); CUDNN_CALL(cudnnReduceTensor( cudnnHandle, reduce_desc, nullptr, 0, ws_data_red, ws_size_red, x364, x_desc_red, x528, x362, out_desc_red, x530)); // after CTC loss float* x537 = (float*)myGpuMalloc(1 * sizeof(float)); // make sure the size of loss is 1 arrayFill<<<28, 512>>>(x537, 1.0f, 1); // backend is lantern.TensorDslCudnn$BackendCudnn@50648033 CUDA_CALL(cudaMemcpyAsync(x345, x530, 1 * sizeof(float), cudaMemcpyDeviceToDevice)); CUDNN_CALL(cudnnSoftmaxBackward( cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, x364, x_desc_soft, x513, x_desc_soft, x515, x364, x_desc_soft, x506)); // backprop of matrix-matrix-dot CUBLAS_CALL(cublasSgemm(cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, x450,x434,29,x364,x202,29,x506,29,x364,x498,x450)); CUBLAS_CALL(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, 29,x450,x434,x364,x506,29,x489,x450,x364,x204,29)); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_PER_ACTIVATION, x364, x364, x364, x364, in_desc_7, x483, in_desc_7, x498, in_desc_7, x486, sbmv_desc_7, x185, x187,x189, 1.0E-5, x490, x491)); // backprop for sum on dim op int32_t x453 = x388 * x452; sum_grad<<<28, 512>>>(x482, x388, x248, 2, x450, x453, x486, x454, x450, 1, 2); float* x563 = (float*)NULL; float* x564 = (float*)NULL; CUDNN_CALL(cudnnRNNBackwardData( cudnnHandle, rnn_desc_4, seqLength_6, y_descs_5, x477, y_descs_5, x482, hx_desc_4, NULL, hx_desc_4, NULL, w_desc_5, x149, hx_desc_4, x563, hx_desc_4, x564, x_descs_5, x473, hx_desc_4, NULL, hx_desc_4, NULL, workspace_x, workspaceSize_x, reserveSpace_x3, reserveSize_x3)); float* x566 = (float*)NULL; CUDNN_CALL(cudnnRNNBackwardWeights( cudnnHandle, rnn_desc_4, seqLength_6, x_descs_5, x470, hx_desc_4, x566, y_descs_5, x477, workspace_x, workspaceSize_x, w_desc_5, x151, reserveSpace_x3, reserveSize_x3)); // backprop for sum on dim op sum_grad<<<28, 512>>>(x469, x388, x248, 2, x450, x453, x473, x454, x450, 1, 2); float* x570 = (float*)NULL; float* x571 = (float*)NULL; CUDNN_CALL(cudnnRNNBackwardData( cudnnHandle, rnn_desc_4, seqLength_5, y_descs_5, x464, y_descs_5, x469, hx_desc_4, NULL, hx_desc_4, NULL, w_desc_5, x104, hx_desc_4, x570, hx_desc_4, x571, x_descs_5, x460, hx_desc_4, NULL, hx_desc_4, NULL, workspace_x, workspaceSize_x, reserveSpace_x2, reserveSize_x2)); float* x573 = (float*)NULL; CUDNN_CALL(cudnnRNNBackwardWeights( cudnnHandle, rnn_desc_4, seqLength_5, x_descs_5, x456, hx_desc_4, x573, y_descs_5, x464, workspace_x, workspaceSize_x, w_desc_5, x106, reserveSpace_x2, reserveSize_x2)); // backprop for sum on dim op sum_grad<<<28, 512>>>(x449, x388, x248, 2, x450, x453, x460, x454, x450, 1, 2); float* x577 = (float*)NULL; float* x578 = (float*)NULL; CUDNN_CALL(cudnnRNNBackwardData( cudnnHandle, rnn_desc_4, seqLength_4, y_descs_4, x444, y_descs_4, x449, hx_desc_4, NULL, hx_desc_4, NULL, w_desc_4, x58, hx_desc_4, x577, hx_desc_4, x578, x_descs_4, x436, hx_desc_4, NULL, hx_desc_4, NULL, workspace_x, workspaceSize_x, reserveSpace_x1, reserveSize_x1)); float* x580 = (float*)NULL; CUDNN_CALL(cudnnRNNBackwardWeights( cudnnHandle, rnn_desc_4, seqLength_4, x_descs_4, x417, hx_desc_4, x580, y_descs_4, x444, workspace_x, workspaceSize_x, w_desc_4, x60, reserveSpace_x1, reserveSize_x1)); // backprop for permute WrappedArray(2, 0, 1) int* x583 = (int32_t*)myMalloc(4 * sizeof(int32_t));; x583[2] = x418; x583[0] = x414; x583[1] = 1; x583[3] = 1; int32_t x590 = x583[0]; int32_t x591 = x583[1]; int32_t x592 = x583[2]; int32_t x593 = x583[3]; CUDNN_CALL(cudnnTransformTensor( cudnnHandle, x364, out_desc_trans, x436, x364, in_desc_trans, x411)); hardTanh_grad<<<28, 512>>>(x402, x411, x411, 0.0, 20.0, x391, true); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x364, x364, x364, x364, in_desc_3, x394, out_desc_3, x411, in_desc_3, x401, sbmv_desc_3, x40, x42,x44, 1.0E-5, x403, x404)); // conv2D back-propagate CUDNN_CALL(cudnnConvolutionBackwardData( cudnnHandle, x364, filt_desc_2, x37, out_desc_2, x401, conv_desc_2, CUDNN_CONVOLUTION_BWD_DATA_ALGO_0, ws_data_1, ws_size_1, x364, in_desc_2, x378)); CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x364, in_desc_2, x369, out_desc_2, x401, conv_desc_2, CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3, ws_data_1, ws_size_1, x364, filt_desc_2, x39)); hardTanh_grad<<<28, 512>>>(x369, x378, x378, 0.0, 20.0, x357, true); CUDNN_CALL(cudnnBatchNormalizationBackward( cudnnHandle, CUDNN_BATCHNORM_SPATIAL, x364, x364, x364, x364, in_desc_1, x361, out_desc_1, x378, in_desc_1, x368, sbmv_desc_1, x20, x22,x24, 1.0E-5, x370, x371)); // conv2D back-propagate CUDNN_CALL(cudnnConvolutionBackwardFilter( cudnnHandle, x364, in_desc_0, x339, out_desc_0, x368, conv_desc_0, CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3, ws_data_1, ws_size_1, x364, filt_desc_0, x19)); // Tensor 'toCPU' invocation. float* x619 = (float*)myMalloc(1 * sizeof(float));; CUDA_CALL(cudaMemcpyAsync(x619, x345, 1 * sizeof(float), cudaMemcpyDeviceToHost)); float x621 = x619[0]; x324 += x621; momentum_update_1D_1D<<<28, 512>>>(x17, x19, x205, 3.0E-8, 0.9, 400.0, true, 14432); momentum_update_1D_1D<<<28, 512>>>(x37, x39, x206, 3.0E-8, 0.9, 400.0, true, 236544); momentum_update_1D_1D<<<28, 512>>>(x40, x42, x207, 3.0E-8, 0.9, 400.0, true, 32); momentum_update_1D_1D<<<28, 512>>>(x43, x44, x208, 3.0E-8, 0.9, 400.0, true, 32); momentum_update_1D_1D<<<28, 512>>>(x23, x24, x209, 3.0E-8, 0.9, 400.0, true, 32); momentum_update_1D_1D<<<28, 512>>>(x20, x22, x210, 3.0E-8, 0.9, 400.0, true, 32); momentum_update_1D_1D<<<28, 512>>>(x185, x187, x211, 3.0E-8, 0.9, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x188, x189, x212, 3.0E-8, 0.9, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x202, x204, x213, 3.0E-8, 0.9, 400.0, true, 29696); momentum_update_1D_1D<<<28, 512>>>(x174, x175, x214, 3.0E-8, 0.9, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x170, x171, x215, 3.0E-8, 0.9, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x182, x183, x216, 3.0E-8, 0.9, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x178, x179, x217, 3.0E-8, 0.9, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x154, x155, x218, 3.0E-8, 0.9, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x158, x159, x219, 3.0E-8, 0.9, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x166, x167, x220, 3.0E-8, 0.9, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x162, x163, x221, 3.0E-8, 0.9, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x129, x130, x222, 3.0E-8, 0.9, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x125, x126, x223, 3.0E-8, 0.9, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x137, x138, x224, 3.0E-8, 0.9, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x109, x110, x225, 3.0E-8, 0.9, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x133, x134, x226, 3.0E-8, 0.9, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x113, x114, x227, 3.0E-8, 0.9, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x117, x118, x228, 3.0E-8, 0.9, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x121, x122, x229, 3.0E-8, 0.9, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x91, x92, x230, 3.0E-8, 0.9, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x79, x80, x231, 3.0E-8, 0.9, 400.0, true, 688128); momentum_update_1D_1D<<<28, 512>>>(x63, x64, x232, 3.0E-8, 0.9, 400.0, true, 688128); momentum_update_1D_1D<<<28, 512>>>(x87, x88, x233, 3.0E-8, 0.9, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x67, x68, x234, 3.0E-8, 0.9, 400.0, true, 1048576); momentum_update_1D_1D<<<28, 512>>>(x71, x72, x235, 3.0E-8, 0.9, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x75, x76, x236, 3.0E-8, 0.9, 400.0, true, 1024); momentum_update_1D_1D<<<28, 512>>>(x83, x84, x237, 3.0E-8, 0.9, 400.0, true, 1048576); int32_t x656 = x321; int32_t x658 = x656 % x657; bool x659 = x658 == 0; if (x659) { float x664 = x324; double x660 = (double)x656; double x661 = 100.0 * x660; double x663 = x661 / x662; float x665 = (float)x656; float x666 = x664 / x665; printf("Train epoch %d: [%d/%d (%.0f%%)]\tAverage Loss: %.6f\n",x317,x656,x253,x663,x666); fflush(stdout); } else { } int64_t x671 = (long)mallocAddr; int64_t x672 = x671 - x313; memset((void*)x313, 0, x672); mallocAddr = (void*)x313; int64_t x675 = (long)gpuMallocAddr; int64_t x676 = x675 - x314; cudaMemset((void*)x314, 0, x676); gpuMallocAddr = (void*)x314; } gettimeofday(&end_1, NULL); timeval_subtract(&diff_1, &end_1, &begin_1);; int64_t x683 = ((diff_1.tv_sec * 1000000L) + (diff_1.tv_usec)); int64_t x684 = x683 / 1000LL; int64_t x686 = x683 / x685; printf("Training completed in %ldms (%ld us/images)\n",x684,x686); double x688 = (double)x683; double x689 = x688 / 1000000.0; x312[x317] = x689; float x691 = x324; float x693 = x691 / x692; double x694 = (double)x693; x311[x317] = x694; } gettimeofday(&end_0, NULL); timeval_subtract(&diff_0, &end_0, &begin_0);; // int64_t x700 = ((diff_0.tv_sec * 1000000L) + (diff_0.tv_usec)); sort(x312, x312 + 1); double x706 = x312[0]; int64_t x707 = (long)fopen(x0, "w"); fprintf((FILE *)x707, "unit: %s\n", "1 epoch"); for(int x709=0; x709 < 1; x709++) { double x710 = x311[x709]; fprintf((FILE *)x707, "%lf\n", x710); } fprintf((FILE *)x707, "run time: %lf %lf\n", x309, x706); fclose((FILE*)x707); // Backend cleanup. CUBLAS_CALL(cublasDestroy(cublasHandle)); CUDA_CALL(cudaFree(gpuMallocBase)); CUDNN_CALL(cudnnDestroy(cudnnHandle)); } /***************************************** End of C Generated Code *******************************************/
the_stack
* Evaluates different tuning configurations of DeviceReduce. * * The best way to use this program: * (1) Find the best all-around single-block tune for a given arch. * For example, 100 samples [1 ..512], 100 timing iterations per config per sample: * ./bin/tune_device_reduce_sm200_nvvm_5.0_abi_i386 --i=100 --s=100 --n=512 --single --device=0 * (2) Update the single tune in device_reduce.cuh * (3) Find the best all-around multi-block tune for a given arch. * For example, 100 samples [single-block tile-size .. 50,331,648], 100 timing iterations per config per sample: * ./bin/tune_device_reduce_sm200_nvvm_5.0_abi_i386 --i=100 --s=100 --device=0 * (4) Update the multi-block tune in device_reduce.cuh * ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <vector> #include <algorithm> #include <stdio.h> #include <cub/cub.cuh> #include "../test/test_util.h" using namespace cub; using namespace std; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- #ifndef TUNE_ARCH #define TUNE_ARCH 100 #endif int g_max_items = 48 * 1024 * 1024; int g_samples = 100; int g_timing_iterations = 2; bool g_verbose = false; bool g_single = false; bool g_verify = true; CachingDeviceAllocator g_allocator; //--------------------------------------------------------------------- // Host utility subroutines //--------------------------------------------------------------------- /** * Initialize problem */ template <typename T> void Initialize( GenMode gen_mode, T *h_in, int num_items) { for (int i = 0; i < num_items; ++i) { InitValue(gen_mode, h_in[i], i); } } /** * Sequential reduction */ template <typename T, typename ReductionOp> T Reduce( T *h_in, ReductionOp reduction_op, int num_items) { T retval = h_in[0]; for (int i = 1; i < num_items; ++i) retval = reduction_op(retval, h_in[i]); return retval; } //--------------------------------------------------------------------- // Full tile test generation //--------------------------------------------------------------------- /** * Wrapper structure for generating and running different tuning configurations */ template < typename T, typename OffsetT, typename ReductionOp> struct Schmoo { //--------------------------------------------------------------------- // Types //--------------------------------------------------------------------- /// Pairing of kernel function pointer and corresponding dispatch params template <typename KernelPtr> struct DispatchTuple { KernelPtr kernel_ptr; DeviceReduce::KernelDispachParams params; float avg_throughput; float best_avg_throughput; OffsetT best_size; float hmean_speedup; DispatchTuple() : kernel_ptr(0), params(DeviceReduce::KernelDispachParams()), avg_throughput(0.0), best_avg_throughput(0.0), hmean_speedup(0.0), best_size(0) {} }; /** * Comparison operator for DispatchTuple.avg_throughput */ template <typename Tuple> static bool MinSpeedup(const Tuple &a, const Tuple &b) { float delta = a.hmean_speedup - b.hmean_speedup; return ((delta < 0.02) && (delta > -0.02)) ? (a.best_avg_throughput < b.best_avg_throughput) : // Negligible average performance differences: defer to best performance (a.hmean_speedup < b.hmean_speedup); } /// Multi-block reduction kernel type and dispatch tuple type typedef void (*MultiBlockDeviceReduceKernelPtr)(T*, T*, OffsetT, GridEvenShare<OffsetT>, GridQueue<OffsetT>, ReductionOp); typedef DispatchTuple<MultiBlockDeviceReduceKernelPtr> MultiDispatchTuple; /// Single-block reduction kernel type and dispatch tuple type typedef void (*SingleBlockDeviceReduceKernelPtr)(T*, T*, OffsetT, ReductionOp); typedef DispatchTuple<SingleBlockDeviceReduceKernelPtr> SingleDispatchTuple; //--------------------------------------------------------------------- // Fields //--------------------------------------------------------------------- vector<MultiDispatchTuple> multi_kernels; // List of generated multi-block kernels vector<SingleDispatchTuple> single_kernels; // List of generated single-block kernels //--------------------------------------------------------------------- // Kernel enumeration methods //--------------------------------------------------------------------- /** * Must have smem that fits in the SM * Must have vector load length that divides items per thread */ template <typename TilesReducePolicy, typename ReductionOp> struct SmemSize { enum { BYTES = sizeof(typename BlockReduceTiles<TilesReducePolicy, T*, OffsetT, ReductionOp>::TempStorage), IS_OK = ((BYTES < ArchProps<TUNE_ARCH>::SMEM_BYTES) && (TilesReducePolicy::ITEMS_PER_THREAD % TilesReducePolicy::VECTOR_LOAD_LENGTH == 0)) }; }; /** * Specialization that allows kernel generation with the specified TilesReducePolicy */ template < typename TilesReducePolicy, bool IsOk = SmemSize<TilesReducePolicy, ReductionOp>::IS_OK> struct Ok { /// Enumerate multi-block kernel and add to the list template <typename KernelsVector> static void GenerateMulti( KernelsVector &multi_kernels, int subscription_factor) { MultiDispatchTuple tuple; tuple.params.template Init<TilesReducePolicy>(subscription_factor); tuple.kernel_ptr = ReducePrivatizedKernel<TilesReducePolicy, T*, T*, OffsetT, ReductionOp>; multi_kernels.push_back(tuple); } /// Enumerate single-block kernel and add to the list template <typename KernelsVector> static void GenerateSingle(KernelsVector &single_kernels) { SingleDispatchTuple tuple; tuple.params.template Init<TilesReducePolicy>(); tuple.kernel_ptr = ReduceSingleKernel<TilesReducePolicy, T*, T*, OffsetT, ReductionOp>; single_kernels.push_back(tuple); } }; /** * Specialization that rejects kernel generation with the specified TilesReducePolicy */ template <typename TilesReducePolicy> struct Ok<TilesReducePolicy, false> { template <typename KernelsVector> static void GenerateMulti(KernelsVector &multi_kernels, int subscription_factor) {} template <typename KernelsVector> static void GenerateSingle(KernelsVector &single_kernels) {} }; /// Enumerate block-scheduling variations template < int BLOCK_THREADS, int ITEMS_PER_THREAD, int VECTOR_LOAD_LENGTH, BlockReduceAlgorithm BLOCK_ALGORITHM, CacheLoadModifier LOAD_MODIFIER> void Enumerate() { // Multi-block kernels Ok<BlockReduceTilesPolicy<BLOCK_THREADS, ITEMS_PER_THREAD, VECTOR_LOAD_LENGTH, BLOCK_ALGORITHM, LOAD_MODIFIER, GRID_MAPPING_RAKE> >::GenerateMulti(multi_kernels, 1); Ok<BlockReduceTilesPolicy<BLOCK_THREADS, ITEMS_PER_THREAD, VECTOR_LOAD_LENGTH, BLOCK_ALGORITHM, LOAD_MODIFIER, GRID_MAPPING_RAKE> >::GenerateMulti(multi_kernels, 2); Ok<BlockReduceTilesPolicy<BLOCK_THREADS, ITEMS_PER_THREAD, VECTOR_LOAD_LENGTH, BLOCK_ALGORITHM, LOAD_MODIFIER, GRID_MAPPING_RAKE> >::GenerateMulti(multi_kernels, 4); Ok<BlockReduceTilesPolicy<BLOCK_THREADS, ITEMS_PER_THREAD, VECTOR_LOAD_LENGTH, BLOCK_ALGORITHM, LOAD_MODIFIER, GRID_MAPPING_RAKE> >::GenerateMulti(multi_kernels, 8); #if TUNE_ARCH >= 200 Ok<BlockReduceTilesPolicy<BLOCK_THREADS, ITEMS_PER_THREAD, VECTOR_LOAD_LENGTH, BLOCK_ALGORITHM, LOAD_MODIFIER, GRID_MAPPING_DYNAMIC> >::GenerateMulti(multi_kernels, 1); #endif // Single-block kernels Ok<BlockReduceTilesPolicy<BLOCK_THREADS, ITEMS_PER_THREAD, VECTOR_LOAD_LENGTH, BLOCK_ALGORITHM, LOAD_MODIFIER, GRID_MAPPING_RAKE> >::GenerateSingle(single_kernels); } /// Enumerate load modifier variations template < int BLOCK_THREADS, int ITEMS_PER_THREAD, int VECTOR_LOAD_LENGTH, BlockReduceAlgorithm BLOCK_ALGORITHM> void Enumerate() { Enumerate<BLOCK_THREADS, ITEMS_PER_THREAD, VECTOR_LOAD_LENGTH, BLOCK_ALGORITHM, LOAD_DEFAULT>(); #if TUNE_ARCH >= 350 Enumerate<BLOCK_THREADS, ITEMS_PER_THREAD, VECTOR_LOAD_LENGTH, BLOCK_ALGORITHM, LOAD_LDG>(); #endif } /// Enumerate block algorithms template < int BLOCK_THREADS, int ITEMS_PER_THREAD, int VECTOR_LOAD_LENGTH> void Enumerate() { Enumerate<BLOCK_THREADS, ITEMS_PER_THREAD, VECTOR_LOAD_LENGTH, BLOCK_REDUCE_RAKING>(); Enumerate<BLOCK_THREADS, ITEMS_PER_THREAD, VECTOR_LOAD_LENGTH, BLOCK_REDUCE_WARP_REDUCTIONS>(); } /// Enumerate vectorization variations template < int BLOCK_THREADS, int ITEMS_PER_THREAD> void Enumerate() { Enumerate<BLOCK_THREADS, ITEMS_PER_THREAD, 1>(); Enumerate<BLOCK_THREADS, ITEMS_PER_THREAD, 2>(); Enumerate<BLOCK_THREADS, ITEMS_PER_THREAD, 4>(); } /// Enumerate thread-granularity variations template <int BLOCK_THREADS> void Enumerate() { Enumerate<BLOCK_THREADS, 7>(); Enumerate<BLOCK_THREADS, 8>(); Enumerate<BLOCK_THREADS, 9>(); Enumerate<BLOCK_THREADS, 11>(); Enumerate<BLOCK_THREADS, 12>(); Enumerate<BLOCK_THREADS, 13>(); Enumerate<BLOCK_THREADS, 15>(); Enumerate<BLOCK_THREADS, 16>(); Enumerate<BLOCK_THREADS, 17>(); Enumerate<BLOCK_THREADS, 19>(); Enumerate<BLOCK_THREADS, 20>(); Enumerate<BLOCK_THREADS, 21>(); Enumerate<BLOCK_THREADS, 23>(); Enumerate<BLOCK_THREADS, 24>(); Enumerate<BLOCK_THREADS, 25>(); } /// Enumerate block size variations void Enumerate() { printf("\nEnumerating kernels\n"); fflush(stdout); Enumerate<32>(); Enumerate<64>(); Enumerate<96>(); Enumerate<128>(); Enumerate<160>(); Enumerate<192>(); Enumerate<256>(); Enumerate<512>(); } //--------------------------------------------------------------------- // Test methods //--------------------------------------------------------------------- /** * Test a configuration */ void TestConfiguration( MultiDispatchTuple &multi_dispatch, SingleDispatchTuple &single_dispatch, T* d_in, T* d_out, T* h_reference, OffsetT num_items, ReductionOp reduction_op) { // Clear output if (g_verify) CubDebugExit(cudaMemset(d_out, 0, sizeof(T))); // Allocate temporary storage void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; CubDebugExit(DeviceReduce::Dispatch( d_temp_storage, temp_storage_bytes, multi_dispatch.kernel_ptr, single_dispatch.kernel_ptr, FillAndResetDrainKernel<OffsetT>, multi_dispatch.params, single_dispatch.params, d_in, d_out, num_items, reduction_op)); CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); // Warmup/correctness iteration CubDebugExit(DeviceReduce::Dispatch( d_temp_storage, temp_storage_bytes, multi_dispatch.kernel_ptr, single_dispatch.kernel_ptr, FillAndResetDrainKernel<OffsetT>, multi_dispatch.params, single_dispatch.params, d_in, d_out, num_items, reduction_op)); if (g_verify) CubDebugExit(cudaDeviceSynchronize()); // Copy out and display results int compare = (g_verify) ? CompareDeviceResults(h_reference, d_out, 1, true, false) : 0; // Performance GpuTimer gpu_timer; float elapsed_millis = 0.0; for (int i = 0; i < g_timing_iterations; i++) { gpu_timer.Start(); CubDebugExit(DeviceReduce::Dispatch( d_temp_storage, temp_storage_bytes, multi_dispatch.kernel_ptr, single_dispatch.kernel_ptr, FillAndResetDrainKernel<OffsetT>, multi_dispatch.params, single_dispatch.params, d_in, d_out, num_items, reduction_op)); gpu_timer.Stop(); elapsed_millis += gpu_timer.ElapsedMillis(); } // Mooch CubDebugExit(cudaDeviceSynchronize()); float avg_elapsed = elapsed_millis / g_timing_iterations; float avg_throughput = float(num_items) / avg_elapsed / 1000.0 / 1000.0; float avg_bandwidth = avg_throughput * sizeof(T); multi_dispatch.avg_throughput = CUB_MAX(avg_throughput, multi_dispatch.avg_throughput); if (avg_throughput > multi_dispatch.best_avg_throughput) { multi_dispatch.best_avg_throughput = avg_throughput; multi_dispatch.best_size = num_items; } single_dispatch.avg_throughput = CUB_MAX(avg_throughput, single_dispatch.avg_throughput); if (avg_throughput > single_dispatch.best_avg_throughput) { single_dispatch.best_avg_throughput = avg_throughput; single_dispatch.best_size = num_items; } if (g_verbose) { printf("\t%.2f GB/s, multi_dispatch( ", avg_bandwidth); multi_dispatch.params.Print(); printf(" ), single_dispatch( "); single_dispatch.params.Print(); printf(" )\n"); fflush(stdout); } AssertEquals(0, compare); // Cleanup temporaries if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); } /** * Evaluate multi-block configurations */ void TestMulti( T* h_in, T* d_in, T* d_out, ReductionOp reduction_op) { // Simple single kernel tuple for use with multi kernel sweep typedef typename DeviceReduce::TunedPolicies<T, OffsetT, TUNE_ARCH>::SinglePolicy SimpleSinglePolicy; SingleDispatchTuple simple_single_tuple; simple_single_tuple.params.template Init<SimpleSinglePolicy>(); simple_single_tuple.kernel_ptr = ReduceSingleKernel<SimpleSinglePolicy, T*, T*, OffsetT, ReductionOp>; double max_exponent = log2(double(g_max_items)); double min_exponent = log2(double(simple_single_tuple.params.tile_size)); unsigned int max_int = (unsigned int) -1; for (int sample = 0; sample < g_samples; ++sample) { printf("\nMulti-block sample %d, ", sample); int num_items; if (sample == 0) { // First sample: use max items num_items = g_max_items; printf("num_items: %d", num_items); fflush(stdout); } else { // Sample a problem size from [2^g_min_exponent, g_max_items]. First 2/3 of the samples are log-distributed, the other 1/3 are uniformly-distributed. unsigned int bits; RandomBits(bits); double scale = double(bits) / max_int; if (sample < g_samples / 2) { // log bias double exponent = ((max_exponent - min_exponent) * scale) + min_exponent; num_items = pow(2.0, exponent); num_items = CUB_MIN(num_items, g_max_items); printf("num_items: %d (2^%.2f)", num_items, exponent); fflush(stdout); } else { // uniform bias num_items = CUB_MAX(pow(2.0, min_exponent), scale * g_max_items); num_items = CUB_MIN(num_items, g_max_items); printf("num_items: %d (%.2f * %d)", num_items, scale, g_max_items); fflush(stdout); } } if (g_verbose) printf("\n"); else printf(", "); // Compute reference T h_reference = Reduce(h_in, reduction_op, num_items); // Run test on each multi-kernel configuration float best_avg_throughput = 0.0; for (int j = 0; j < multi_kernels.size(); ++j) { multi_kernels[j].avg_throughput = 0.0; TestConfiguration(multi_kernels[j], simple_single_tuple, d_in, d_out, &h_reference, num_items, reduction_op); best_avg_throughput = CUB_MAX(best_avg_throughput, multi_kernels[j].avg_throughput); } // Print best throughput for this problem size printf("Best: %.2fe9 items/s (%.2f GB/s)\n", best_avg_throughput, best_avg_throughput * sizeof(T)); // Accumulate speedup (inverse for harmonic mean) for (int j = 0; j < multi_kernels.size(); ++j) multi_kernels[j].hmean_speedup += best_avg_throughput / multi_kernels[j].avg_throughput; } // Find max overall throughput and compute hmean speedups float overall_max_throughput = 0.0; for (int j = 0; j < multi_kernels.size(); ++j) { overall_max_throughput = CUB_MAX(overall_max_throughput, multi_kernels[j].best_avg_throughput); multi_kernels[j].hmean_speedup = float(g_samples) / multi_kernels[j].hmean_speedup; } // Sort by cumulative speedup sort(multi_kernels.begin(), multi_kernels.end(), MinSpeedup<MultiDispatchTuple>); // Print ranked multi configurations printf("\nRanked multi_kernels:\n"); for (int j = 0; j < multi_kernels.size(); ++j) { printf("\t (%d) params( ", multi_kernels.size() - j); multi_kernels[j].params.Print(); printf(" ) hmean speedup: %.3f, best throughput %.2f @ %d elements (%.2f GB/s, %.2f%%)\n", multi_kernels[j].hmean_speedup, multi_kernels[j].best_avg_throughput, (int) multi_kernels[j].best_size, multi_kernels[j].best_avg_throughput * sizeof(T), multi_kernels[j].best_avg_throughput / overall_max_throughput); } printf("\nMax multi-block throughput %.2f (%.2f GB/s)\n", overall_max_throughput, overall_max_throughput * sizeof(T)); } /** * Evaluate single-block configurations */ void TestSingle( T* h_in, T* d_in, T* d_out, ReductionOp reduction_op) { // Construct a NULL-ptr multi-kernel tuple that forces a single-kernel pass MultiDispatchTuple multi_tuple; double max_exponent = log2(double(g_max_items)); unsigned int max_int = (unsigned int) -1; for (int sample = 0; sample < g_samples; ++sample) { printf("\nSingle-block sample %d, ", sample); int num_items; if (sample == 0) { // First sample: use max items num_items = g_max_items; printf("num_items: %d", num_items); fflush(stdout); } else { // Sample a problem size from [2, g_max_items], log-distributed unsigned int bits; RandomBits(bits); double scale = double(bits) / max_int; double exponent = ((max_exponent - 1) * scale) + 1; num_items = pow(2.0, exponent); printf("num_items: %d (2^%.2f)", num_items, exponent); fflush(stdout); } if (g_verbose) printf("\n"); else printf(", "); // Compute reference T h_reference = Reduce(h_in, reduction_op, num_items); // Run test on each single-kernel configuration (pick first multi-config to use, which shouldn't be float best_avg_throughput = 0.0; for (int j = 0; j < single_kernels.size(); ++j) { single_kernels[j].avg_throughput = 0.0; TestConfiguration(multi_tuple, single_kernels[j], d_in, d_out, &h_reference, num_items, reduction_op); best_avg_throughput = CUB_MAX(best_avg_throughput, single_kernels[j].avg_throughput); } // Print best throughput for this problem size printf("Best: %.2fe9 items/s (%.2f GB/s)\n", best_avg_throughput, best_avg_throughput * sizeof(T)); // Accumulate speedup (inverse for harmonic mean) for (int j = 0; j < single_kernels.size(); ++j) single_kernels[j].hmean_speedup += best_avg_throughput / single_kernels[j].avg_throughput; } // Find max overall throughput and compute hmean speedups float overall_max_throughput = 0.0; for (int j = 0; j < single_kernels.size(); ++j) { overall_max_throughput = CUB_MAX(overall_max_throughput, single_kernels[j].best_avg_throughput); single_kernels[j].hmean_speedup = float(g_samples) / single_kernels[j].hmean_speedup; } // Sort by cumulative speedup sort(single_kernels.begin(), single_kernels.end(), MinSpeedup<SingleDispatchTuple>); // Print ranked single configurations printf("\nRanked single_kernels:\n"); for (int j = 0; j < single_kernels.size(); ++j) { printf("\t (%d) params( ", single_kernels.size() - j); single_kernels[j].params.Print(); printf(" ) hmean speedup: %.3f, best throughput %.2f @ %d elements (%.2f GB/s, %.2f%%)\n", single_kernels[j].hmean_speedup, single_kernels[j].best_avg_throughput, (int) single_kernels[j].best_size, single_kernels[j].best_avg_throughput * sizeof(T), single_kernels[j].best_avg_throughput / overall_max_throughput); } printf("\nMax single-block throughput %.2f (%.2f GB/s)\n", overall_max_throughput, overall_max_throughput * sizeof(T)); } }; //--------------------------------------------------------------------- // Main //--------------------------------------------------------------------- /** * Main */ int main(int argc, char** argv) { // Initialize command line CommandLineArgs args(argc, argv); args.GetCmdLineArgument("n", g_max_items); args.GetCmdLineArgument("s", g_samples); args.GetCmdLineArgument("i", g_timing_iterations); g_verbose = args.CheckCmdLineFlag("v"); g_single = args.CheckCmdLineFlag("single"); g_verify = !args.CheckCmdLineFlag("noverify"); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--device=<device-id>] " "[--n=<max items>]" "[--s=<samples>]" "[--i=<timing iterations>]" "[--single]" "[--v]" "[--noverify]" "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); #if (TUNE_SIZE == 1) typedef unsigned char T; #elif (TUNE_SIZE == 2) typedef unsigned short T; #elif (TUNE_SIZE == 4) typedef unsigned int T; #elif (TUNE_SIZE == 8) typedef unsigned long long T; #else // Default typedef unsigned int T; #endif typedef unsigned int OffsetT; Sum reduction_op; // Enumerate kernels Schmoo<T, OffsetT, Sum > schmoo; schmoo.Enumerate(); // Allocate host arrays T *h_in = new T[g_max_items]; // Initialize problem Initialize(UNIFORM, h_in, g_max_items); // Initialize device arrays T *d_in = NULL; T *d_out = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * g_max_items)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * 1)); CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * g_max_items, cudaMemcpyHostToDevice)); // Test kernels if (g_single) schmoo.TestSingle(h_in, d_in, d_out, reduction_op); else schmoo.TestMulti(h_in, d_in, d_out, reduction_op); // Cleanup if (h_in) delete[] h_in; if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); return 0; }
the_stack
#include <linalg/batched/matrix.cuh> #include <raft/cudart_utils.h> #include <raft/linalg/add.cuh> #include <raft/mr/device/allocator.hpp> #include <gtest/gtest.h> #include <algorithm> #include <cmath> #include <cstddef> #include <random> #include <vector> namespace MLCommon { namespace LinAlg { namespace Batched { enum MatrixOperation { AB_op, // Matrix-matrix product (with GEMM) AZT_op, // Matrix-vector product (with GEMM) ZA_op, // Vector-matrix product (with GEMM) ApB_op, // Addition AmB_op, // Substraction AkB_op, // Kronecker product AsolveZ_op, // Linear equation solver Ax=b LaggedZ_op, // Lag matrix CopyA2D_op, // 2D copy DiffA_op, // Vector first difference Hessenberg_op, // Hessenberg decomposition A=UHU' Schur_op, // Schur decomposition A=USU' Lyapunov_op, // Lyapunov equation solver AXA'-X+B=0 }; template <typename T> struct MatrixInputs { MatrixOperation operation; int batch_size; int m; // Usually the dimensions of A and/or Z int n; int p; // Usually the dimensions of B or other parameters int q; int s; // Additional parameters for operations that need more than 4 int t; T tolerance; }; template <typename T> class MatrixTest : public ::testing::TestWithParam<MatrixInputs<T>> { protected: void SetUp() override { using std::vector; params = ::testing::TestWithParam<MatrixInputs<T>>::GetParam(); // Find out whether A, B and Z will be used (depending on the operation) bool use_A = (params.operation != LaggedZ_op); bool use_B = (params.operation == AB_op) || (params.operation == ApB_op) || (params.operation == AmB_op) || (params.operation == AkB_op) || (params.operation == Lyapunov_op); bool use_Z = (params.operation == AZT_op) || (params.operation == ZA_op) || (params.operation == AsolveZ_op) || (params.operation == LaggedZ_op); bool Z_col = (params.operation == AsolveZ_op); int r = params.operation == AZT_op ? params.n : params.m; // Check if the dimensions are valid and compute the output dimensions int m_r{}; int n_r{}; switch (params.operation) { case AB_op: ASSERT_TRUE(params.n == params.p); m_r = params.m; n_r = params.q; break; case ApB_op: case AmB_op: ASSERT_TRUE(params.m == params.p && params.n == params.q); m_r = params.m; n_r = params.n; break; case AkB_op: m_r = params.m * params.p; n_r = params.n * params.q; break; case AZT_op: m_r = params.m; n_r = 1; break; case ZA_op: m_r = 1; n_r = params.n; break; case AsolveZ_op: ASSERT_TRUE(params.n == params.m); // For this test we multiply A by the solution and check against Z m_r = params.m; n_r = 1; break; case LaggedZ_op: // For this operation params.n holds the number of lags m_r = params.m - params.n; n_r = params.n; break; case CopyA2D_op: // For this operation p and q are the dimensions of the copy window m_r = params.p; n_r = params.q; break; case DiffA_op: // Note: A can represent either a row or column vector ASSERT_TRUE(params.m == 1 || params.n == 1); m_r = std::max(1, params.m - 1); n_r = std::max(1, params.n - 1); break; case Hessenberg_op: case Schur_op: case Lyapunov_op: ASSERT_TRUE(params.m == params.n && params.m == params.p && params.m == params.q); m_r = params.m; n_r = params.m; break; } // Create test matrices and vector std::vector<T> A; std::vector<T> B; std::vector<T> Z; if (use_A) A.resize(params.batch_size * params.m * params.n); if (use_B) B.resize(params.batch_size * params.p * params.q); if (use_Z) Z.resize(params.batch_size * r); // Generate random data std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<T> udis(-1.0, 3.0); for (std::size_t i = 0; i < A.size(); i++) A[i] = udis(gen); for (std::size_t i = 0; i < B.size(); i++) B[i] = udis(gen); for (std::size_t i = 0; i < Z.size(); i++) Z[i] = udis(gen); // Create handles, stream CUBLAS_CHECK(cublasCreate(&handle)); CUDA_CHECK(cudaStreamCreate(&stream)); // Created batched matrices Matrix<T> AbM(params.m, params.n, params.batch_size, handle, stream); Matrix<T> BbM(params.p, params.q, params.batch_size, handle, stream); Matrix<T> ZbM(Z_col ? r : 1, Z_col ? 1 : r, params.batch_size, handle, stream); // Copy the data to the device if (use_A) raft::update_device(AbM.raw_data(), A.data(), A.size(), stream); if (use_B) raft::update_device(BbM.raw_data(), B.data(), B.size(), stream); if (use_Z) raft::update_device(ZbM.raw_data(), Z.data(), Z.size(), stream); // Create fake batched matrices to be overwritten by results res_bM = new Matrix<T>(1, 1, 1, handle, stream); // Compute the tested results switch (params.operation) { case AB_op: *res_bM = AbM * BbM; break; case ApB_op: *res_bM = AbM + BbM; break; case AmB_op: *res_bM = AbM - BbM; break; case AkB_op: *res_bM = b_kron(AbM, BbM); break; case AZT_op: *res_bM = b_gemm(AbM, ZbM, false, true); break; case ZA_op: *res_bM = ZbM * AbM; break; case AsolveZ_op: // A * A\Z -> should be Z *res_bM = AbM * b_solve(AbM, ZbM); break; case LaggedZ_op: *res_bM = b_lagged_mat(ZbM, params.n); break; case CopyA2D_op: *res_bM = b_2dcopy(AbM, params.s, params.t, params.p, params.q); break; case DiffA_op: *res_bM = AbM.difference(); break; case Hessenberg_op: { constexpr T zero_tolerance = std::is_same<T, double>::value ? 1e-7 : 1e-3f; int n = params.m; Matrix<T> HbM(n, n, params.batch_size, handle, stream); Matrix<T> UbM(n, n, params.batch_size, handle, stream); b_hessenberg(AbM, UbM, HbM); // Check that H is in Hessenberg form std::vector<T> H = std::vector<T>(n * n * params.batch_size); raft::update_host(H.data(), HbM.raw_data(), H.size(), stream); CUDA_CHECK(cudaStreamSynchronize(stream)); for (int ib = 0; ib < params.batch_size; ib++) { for (int j = 0; j < n - 2; j++) { for (int i = j + 2; i < n; i++) { ASSERT_TRUE(raft::abs(H[n * n * ib + n * j + i]) < zero_tolerance); } } } // Check that U is unitary (UU'=I) std::vector<T> UUt = std::vector<T>(n * n * params.batch_size); raft::update_host(UUt.data(), b_gemm(UbM, UbM, false, true).raw_data(), UUt.size(), stream); CUDA_CHECK(cudaStreamSynchronize(stream)); for (int ib = 0; ib < params.batch_size; ib++) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { ASSERT_TRUE(raft::abs(UUt[n * n * ib + n * j + i] - (i == j ? (T)1 : (T)0)) < zero_tolerance); } } } // Write UHU' in the result (will be compared against A) *res_bM = UbM * b_gemm(HbM, UbM, false, true); break; } case Schur_op: { constexpr T zero_tolerance = std::is_same<T, double>::value ? 1e-7 : 1e-3f; int n = params.m; Matrix<T> SbM(n, n, params.batch_size, handle, stream); Matrix<T> UbM(n, n, params.batch_size, handle, stream); b_schur(AbM, UbM, SbM); // Check that S is in Schur form std::vector<T> S = std::vector<T>(n * n * params.batch_size); raft::update_host(S.data(), SbM.raw_data(), S.size(), stream); CUDA_CHECK(cudaStreamSynchronize(stream)); for (int ib = 0; ib < params.batch_size; ib++) { for (int j = 0; j < n - 2; j++) { for (int i = j + 2; i < n; i++) { ASSERT_TRUE(raft::abs(S[n * n * ib + n * j + i]) < zero_tolerance); } } } for (int ib = 0; ib < params.batch_size; ib++) { for (int k = 0; k < n - 3; k++) { ASSERT_FALSE(raft::abs(S[n * n * ib + n * k + k + 1]) > zero_tolerance && raft::abs(S[n * n * ib + n * (k + 1) + k + 2]) > zero_tolerance && raft::abs(S[n * n * ib + n * (k + 2) + k + 3]) > zero_tolerance); } } // Check that U is unitary (UU'=I) std::vector<T> UUt = std::vector<T>(n * n * params.batch_size); raft::update_host(UUt.data(), b_gemm(UbM, UbM, false, true).raw_data(), UUt.size(), stream); CUDA_CHECK(cudaStreamSynchronize(stream)); for (int ib = 0; ib < params.batch_size; ib++) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { ASSERT_TRUE(raft::abs(UUt[n * n * ib + n * j + i] - (i == j ? (T)1 : (T)0)) < zero_tolerance); } } } // Write USU' in the result (will be compared against A) *res_bM = UbM * b_gemm(SbM, UbM, false, true); break; } case Lyapunov_op: { Matrix<T> XbM = b_lyapunov(AbM, BbM); // Write AXA'-X in the result (will be compared against -B) *res_bM = AbM * b_gemm(XbM, AbM, false, true) - XbM; break; } } // Compute the expected results res_h.resize(params.batch_size * m_r * n_r); switch (params.operation) { case AB_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::matMul(res_h.data() + bid * m_r * n_r, A.data() + bid * params.m * params.n, B.data() + bid * params.p * params.q, params.m, params.n, params.q); } break; case ApB_op: Naive::add(res_h.data(), A.data(), B.data(), A.size()); break; case AmB_op: Naive::add(res_h.data(), A.data(), B.data(), A.size(), T(-1.0)); break; case AkB_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::kronecker(res_h.data() + bid * m_r * n_r, A.data() + bid * params.m * params.n, B.data() + bid * params.p * params.q, params.m, params.n, params.p, params.q); } break; case AZT_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::matMul(res_h.data() + bid * m_r * n_r, A.data() + bid * params.m * params.n, Z.data() + bid * r, params.m, params.n, 1); } break; case ZA_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::matMul(res_h.data() + bid * m_r * n_r, Z.data() + bid * r, A.data() + bid * params.m * params.n, 1, params.m, params.n); } break; case AsolveZ_op: // Simply copy Z in the result memcpy(res_h.data(), Z.data(), r * params.batch_size * sizeof(T)); break; case LaggedZ_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::laggedMat( res_h.data() + bid * m_r * n_r, Z.data() + bid * params.m, params.m, params.n); } break; case CopyA2D_op: for (int bid = 0; bid < params.batch_size; bid++) { Naive::copy2D(res_h.data() + bid * m_r * n_r, A.data() + bid * params.m * params.n, params.s, params.t, params.m, m_r, n_r); } break; case DiffA_op: { int len = params.m * params.n; for (int bid = 0; bid < params.batch_size; bid++) { Naive::diff(res_h.data() + bid * (len - 1), A.data() + bid * len, len); } break; } case Hessenberg_op: case Schur_op: // Simply copy A (will be compared against UHU') memcpy(res_h.data(), A.data(), params.m * params.m * params.batch_size * sizeof(T)); break; case Lyapunov_op: // Simply copy -B (will be compared against AXA'-X) for (int i = 0; i < params.m * params.m * params.batch_size; i++) { res_h[i] = -B[i]; } break; } CUDA_CHECK(cudaStreamSynchronize(stream)); } void TearDown() override { delete res_bM; CUBLAS_CHECK(cublasDestroy(handle)); CUDA_CHECK(cudaStreamDestroy(stream)); } protected: MatrixInputs<T> params; Matrix<T>* res_bM; std::vector<T> res_h; cublasHandle_t handle; cudaStream_t stream = 0; }; // Test parameters (op, batch_size, m, n, p, q, s, t, tolerance) const std::vector<MatrixInputs<double>> inputsd = { {AB_op, 7, 15, 37, 37, 11, 0, 0, 1e-6}, {AZT_op, 5, 33, 65, 1, 1, 0, 0, 1e-6}, {ZA_op, 8, 12, 41, 1, 1, 0, 0, 1e-6}, {ApB_op, 4, 16, 48, 16, 48, 0, 0, 1e-6}, {AmB_op, 17, 9, 3, 9, 3, 0, 0, 1e-6}, {AkB_op, 5, 3, 13, 31, 8, 0, 0, 1e-6}, {AkB_op, 3, 7, 12, 31, 15, 0, 0, 1e-6}, {AkB_op, 2, 11, 2, 8, 46, 0, 0, 1e-6}, {AsolveZ_op, 6, 17, 17, 1, 1, 0, 0, 1e-6}, {LaggedZ_op, 5, 31, 9, 1, 1, 0, 0, 1e-6}, {LaggedZ_op, 7, 129, 3, 1, 1, 0, 0, 1e-6}, {CopyA2D_op, 11, 31, 63, 17, 14, 5, 9, 1e-6}, {CopyA2D_op, 4, 33, 7, 30, 4, 3, 0, 1e-6}, {DiffA_op, 5, 11, 1, 1, 1, 0, 0, 1e-6}, {DiffA_op, 15, 1, 37, 1, 1, 0, 0, 1e-6}, {Hessenberg_op, 10, 15, 15, 15, 15, 0, 0, 1e-6}, {Hessenberg_op, 30, 61, 61, 61, 61, 0, 0, 1e-6}, // {Schur_op, 7, 12, 12, 12, 12, 0, 0, 1e-3}, // {Schur_op, 17, 77, 77, 77, 77, 0, 0, 1e-3}, // {Lyapunov_op, 5, 14, 14, 14, 14, 0, 0, 1e-2}, // {Lyapunov_op, 13, 100, 100, 100, 100, 0, 0, 1e-2} }; // Note: Schur and Lyapunov tests have had stability issues on CI so // they are disabled temporarily. See issue: // https://github.com/rapidsai/cuml/issues/1949 // Test parameters (op, batch_size, m, n, p, q, s, t, tolerance) const std::vector<MatrixInputs<float>> inputsf = { {AB_op, 7, 15, 37, 37, 11, 0, 0, 1e-2}, {AZT_op, 5, 33, 65, 1, 1, 0, 0, 1e-2}, {ZA_op, 8, 12, 41, 1, 1, 0, 0, 1e-2}, {ApB_op, 4, 16, 48, 16, 48, 0, 0, 1e-2}, {AmB_op, 17, 9, 3, 9, 3, 0, 0, 1e-2}, {AkB_op, 5, 3, 13, 31, 8, 0, 0, 1e-2}, {AkB_op, 3, 7, 12, 31, 15, 0, 0, 1e-2}, {AkB_op, 2, 11, 2, 8, 46, 0, 0, 1e-2}, {AsolveZ_op, 6, 17, 17, 1, 1, 0, 0, 1e-2}, {LaggedZ_op, 5, 31, 9, 1, 1, 0, 0, 1e-5}, {LaggedZ_op, 7, 129, 3, 1, 1, 0, 0, 1e-5}, {CopyA2D_op, 11, 31, 63, 17, 14, 5, 9, 1e-5}, {CopyA2D_op, 4, 33, 7, 30, 4, 3, 0, 1e-5}, {DiffA_op, 5, 11, 1, 1, 1, 0, 0, 1e-2}, {DiffA_op, 15, 1, 37, 1, 1, 0, 0, 1e-2}, {Hessenberg_op, 10, 15, 15, 15, 15, 0, 0, 1e-2}, {Hessenberg_op, 30, 61, 61, 61, 61, 0, 0, 1e-2}, // {Schur_op, 7, 12, 12, 12, 12, 0, 0, 1e-2}, // {Schur_op, 17, 77, 77, 77, 77, 0, 0, 1e-2}, // {Lyapunov_op, 5, 14, 14, 14, 14, 0, 0, 1e-2}, // {Lyapunov_op, 13, 100, 100, 100, 100, 0, 0, 1e-2} }; // Note: Schur and Lyapunov operations don't give good precision for // single-precision floating-point numbers yet... using BatchedMatrixTestD = MatrixTest<double>; using BatchedMatrixTestF = MatrixTest<float>; TEST_P(BatchedMatrixTestD, Result) { ASSERT_TRUE(raft::devArrMatchHost(res_h.data(), res_bM->raw_data(), res_h.size(), raft::CompareApprox<double>(params.tolerance), stream)); } TEST_P(BatchedMatrixTestF, Result) { ASSERT_TRUE(raft::devArrMatchHost(res_h.data(), res_bM->raw_data(), res_h.size(), raft::CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(BatchedMatrixTests, BatchedMatrixTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(BatchedMatrixTests, BatchedMatrixTestF, ::testing::ValuesIn(inputsf)); } // namespace Batched } // namespace LinAlg } // namespace MLCommon
the_stack
#include <nvidia/helper_cuda.h> #define PI 3.141592653589793f #define BLOCK_SIZE 256 // int tidk = 0*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 1*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 2*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 3*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 4*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 5*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 6*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 7*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 8*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 9*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 10*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 11*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 12*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 13*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 14*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 15*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 16*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 17*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 18*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 19*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 20*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 21*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 22*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; // tidk = 23*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; template<typename T> __device__ inline T atomicAdd_(T* address, T val) {}; template<> __device__ inline double atomicAdd_<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); }; template<> __device__ inline float atomicAdd_<float>(float* address, float val) { return atomicAdd(address,val); }; template <uint16_t blockSize, class T> __global__ void reduction_oldOptimizedMemLayout(T *mu_karch,T *dbg) //, T *N) { __shared__ T mu[BLOCK_SIZE*4*6]; const int tid = threadIdx.x + blockDim.x * threadIdx.y; int tidk = tid*6*4; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 1.0f; } // old reduction..... __syncthreads(); //sync the threads #pragma unroll for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) { int ss = s*6*4; if(tid < s) { #pragma unroll for( int k=0; k<6*4; ++k) { mu[tidk+k] += mu[tidk+k + ss]; } } __syncthreads(); } //dbg[tid] = mu[tid+BLOCK_SIZE]; if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { atomicAdd_<T>(&mu_karch[tid],mu[tid]); } } template <uint16_t blockSize, class T> __global__ void reduction_oldOptimized(T *mu_karch,T *dbg) //, T *N) { __shared__ T mu[BLOCK_SIZE*4*6]; const int tid = threadIdx.x + blockDim.x * threadIdx.y; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 1.0f; } // old reduction..... __syncthreads(); //sync the threads if(blockSize >= 512) if(tid<256){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 256]; } __syncthreads(); } if(blockSize >= 256) if(tid<128){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 128]; } __syncthreads(); } if(blockSize >= 128) if(tid<64){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 64]; } __syncthreads(); } if(blockSize >= 64) if(tid<32){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 32]; } } __syncthreads(); if(blockSize >= 32) if(tid<16){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 16]; } __syncthreads(); } if(blockSize >= 16) { if(tid<8){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 8]; } } __syncthreads(); } if(blockSize >= 8) { if(tid<4){ #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 4]; } } __syncthreads(); } if(blockSize >= 4) { if(tid<2*6*4){ int tidk = (tid/2)*BLOCK_SIZE + tid%2; mu[tidk] += mu[tidk+2]; dbg[tid] = tidk; // if(tid<2){ //#pragma unroll // for( int k=0; k<6*4; ++k) { // int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + 2]; // dbg[k*2+tid] = tidk; // dbg[tid+k*2] = tidk; // } } } __syncthreads(); // if(blockSize >= 2) // { // if(tid<6*4) // { // int tidk = tid*BLOCK_SIZE; // mu[tidk] += mu[tidk+1]; // } // __syncthreads(); // } //dbg[tid] = mu[tid+BLOCK_SIZE*19]; if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { int tidk = tid*BLOCK_SIZE; //mu[tidk] += mu[tidk+1]; atomicAdd_<T>(&mu_karch[tid],mu[tidk]+mu[tidk+1]); //atomicAdd_<T>(&mu_karch[tid],mu[tid*BLOCK_SIZE]); } } template<class T> __global__ void reduction_old(T *mu_karch, T *dbg) //, T *N) { __shared__ T mu[BLOCK_SIZE*4*6]; const int tid = threadIdx.x + blockDim.x * threadIdx.y; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 1.0f; } // old reduction..... __syncthreads(); //sync the threads #pragma unroll for(int s=(BLOCK_SIZE)/2; s>1; s>>=1) { if(tid < s) { #pragma unroll for( int k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; mu[tidk] += mu[tidk + s]; } } __syncthreads(); } if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { atomicAdd_<T>(&mu_karch[tid],mu[tid*BLOCK_SIZE]+mu[tid*BLOCK_SIZE+1]); } } template<class T> __global__ void reduction_newNew(T *mu_karch,T *dbg) //, T *N) { __shared__ T mu[BLOCK_SIZE*4*6]; const int tid = threadIdx.x + blockDim.x * threadIdx.y; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 1.0f; } // old reduction..... __syncthreads(); //sync the threads int s = (BLOCK_SIZE)/2; //128 #pragma unroll for (uint32_t k=0; k<6*4; ++k) { int tidk = k*BLOCK_SIZE+tid; if(tid<s) mu[tidk] += mu[tidk + s]; } s = (BLOCK_SIZE)/4;//64 #pragma unroll for (uint32_t k=0; k<6*2; ++k) { #pragma unroll for (uint32_t j=0; j<2; ++j) { int ss = j*s; int tidk = (2*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } // if(tid<s) // { // int tidk = 2*k*BLOCK_SIZE+tid; // mu[tidk] += mu[tidk + s]; // }else{ // int tidk = (2*k+1)*BLOCK_SIZE+tid-s; // mu[tidk] += mu[tidk + s]; // } } __syncthreads(); //sync the threads s = (BLOCK_SIZE)/8; //32 #pragma unroll for (uint32_t k=0; k<6; ++k) { #pragma unroll for (uint32_t j=0; j<4; ++j) { int ss = j*s; int tidk = (4*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } } __syncthreads(); //sync the threads s = (BLOCK_SIZE)/16; //16 #pragma unroll for (uint32_t k=0; k<3; ++k) { #pragma unroll for (uint32_t j=0; j<8; ++j) { int ss = j*s; int tidk = (8*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } } __syncthreads(); //sync the threads s = (BLOCK_SIZE)/32; //8 uint32_t k = 0; #pragma unroll for (uint32_t j=0; j<16; ++j) { int ss = j*s; int tidk = (16*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } k = 1; #pragma unroll for (uint32_t j=0; j<8; ++j) { int ss = j*s; int tidk = (16*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } __syncthreads(); //sync the threads s = (BLOCK_SIZE)/64; //4 k = 0; #pragma unroll for (uint32_t j=0; j<24; ++j) { int ss = j*s; int tidk = (24*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } //__syncthreads(); //sync the threads s = (BLOCK_SIZE)/128; //2 k = 0; #pragma unroll for (uint32_t j=0; j<24; ++j) { int ss = j*s; int tidk = (24*k+j)*BLOCK_SIZE+tid-ss; if(ss<=tid && tid<ss+s) mu[tidk] += mu[tidk + s]; } //__syncthreads(); //sync the threads s = (BLOCK_SIZE)/256; //1 k = 0; #pragma unroll for (uint32_t j=0; j<24; ++j) { int tidk = (24*k+j)*BLOCK_SIZE+tid-j; if(tid==j) mu[tidk] += mu[tidk + s]; } if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { atomicAdd_<T>(&mu_karch[tid],mu[tid*BLOCK_SIZE]); } } template<class T> __global__ void reduction_new(T *mu_karch,T *dbg) //, T *N) { __shared__ T mu[BLOCK_SIZE*4*6]; const int tid = threadIdx.x + blockDim.x * threadIdx.y; #pragma unroll for(int s=0; s<6*4; ++s) { // this is almost certainly bad ordering mu[tid+BLOCK_SIZE*s] = 1.0f; } bool exit=false; int tpr = BLOCK_SIZE/(4*6); // threads per row //reduction..... __syncthreads(); //sync the threads #pragma unroll for(int r=0; r<4*6; ++r) { if (r*tpr <= tid && tid < (r+1)*tpr) { int tidr = tid - r*tpr; // id in row int offset = r*BLOCK_SIZE+tidr; //dbg[id] = offset; #pragma unroll for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) { int expr = s/tpr; // executions per row //dbg[id] = expr; for (int ex=0; ex<expr; ++ex) mu[offset+ex*tpr] += mu[offset+ex*tpr+s]; int exprem = s%tpr; // remaining executions if (tidr <exprem) mu[offset+expr*tpr] += mu[offset+expr*tpr+s]; __syncthreads(); if(s==BLOCK_SIZE/4) { exit=true; break; } } } if(exit) break; } //dbg[id] = mu[id+BLOCK_SIZE*3]; //dbg[id] =tid; if(tid<6*4) {// && Ni[(k/3)*BLOCK_SIZE]>0 ) { atomicAdd_<T>(&mu_karch[tid],mu[tid*BLOCK_SIZE]); } } extern void reduction(float *h_mu, float *d_mu, float *h_dbg, float *d_dbg, int selection) { for(uint32_t i=0; i<4*6; ++i) h_mu[i] =float(0.0f); for(uint32_t i=0; i<256; ++i) h_dbg[i] =float(0.0f); checkCudaErrors(cudaMemcpy(d_mu, h_mu, 6*4* sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_dbg, h_dbg, 256* sizeof(float), cudaMemcpyHostToDevice)); dim3 threads(16,16,1); //dim3 blocks(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1); dim3 blocks(1,1,1); if(selection == 0){ reduction_old<float><<<blocks,threads>>>(d_mu,d_dbg); }else if(selection == 1) { reduction_new<float><<<blocks,threads>>>(d_mu,d_dbg); }else if (selection ==2) reduction_newNew<float><<<blocks,threads>>>(d_mu,d_dbg); else if (selection ==3) reduction_oldOptimized<256,float><<<blocks,threads>>>(d_mu,d_dbg); else if (selection ==4) reduction_oldOptimizedMemLayout<256,float><<<blocks,threads>>>(d_mu,d_dbg); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemcpy(h_mu, d_mu, 6*4*sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_dbg, d_dbg, 256*sizeof(float), cudaMemcpyDeviceToHost)); } extern void reduction(double *h_mu, double *d_mu, double *h_dbg, double *d_dbg, int selection) { for(uint32_t i=0; i<4*6; ++i) h_mu[i] =double(0.0f); for(uint32_t i=0; i<256; ++i) h_dbg[i] =double(0.0f); checkCudaErrors(cudaMemcpy(d_mu, h_mu, 6*4* sizeof(double), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_dbg, h_dbg, 256* sizeof(double), cudaMemcpyHostToDevice)); dim3 threads(16,16,1); //dim3 blocks(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1); dim3 blocks(1,1,1); if(selection == 0){ reduction_old<double><<<blocks,threads>>>(d_mu,d_dbg); }else if(selection == 1) { reduction_new<double><<<blocks,threads>>>(d_mu,d_dbg); }else if (selection ==2) reduction_newNew<double><<<blocks,threads>>>(d_mu,d_dbg); else if (selection ==3) reduction_oldOptimized<256,double><<<blocks,threads>>>(d_mu,d_dbg); else if (selection ==4) reduction_oldOptimizedMemLayout<256,double><<<blocks,threads>>>(d_mu,d_dbg); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemcpy(h_mu, d_mu, 6*4*sizeof(double), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_dbg, d_dbg, 256*sizeof(double), cudaMemcpyDeviceToHost)); }
the_stack
#include <async_event.h> #include <thrust/count.h> //count #include <thrust/sort.h> //sort #include <thrust/binary_search.h> //lower_bound #include <thrust/unique.h> //unique #include <thrust/host_vector.h> #include <cusp/detail/format_utils.h> //offsets_to_indices #include <determinism_checker.h> #include <solvers/solver.h> #include <aggregation/coarseAgenerators/thrust_coarse_A_generator.h> #include <aggregation/coarseAgenerators/low_deg_coarse_A_generator.h> #include <logger.h> #include <omp.h> #define EXPERIMENTAL_ITERATIVE_MATCHING namespace amgx { namespace aggregation { namespace multi_pairwise { // include common routines for all selectors #include <aggregation/selectors/common_selector.h> // ------------------------ // Kernels // ------------------------ __device__ float random_weight2(int i, int j) { #define RAND_MULTIPLIER 1145637293 unsigned long i_min = (min(i, j) * RAND_MULTIPLIER); unsigned long i_max = (max(i, j) * RAND_MULTIPLIER); return ((float)i_min / i_max); } __device__ unsigned long random_weight3(int i, int j) { unsigned long a; a = (i + j) ^ 8; a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) + (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a ^ 0xd3a2646c) + (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) + (a >> 16); return a; } // findStrongestNeighbour kernel for block_dia_csr_matrix format // Reads the weight from edge_weights array template <typename IndexType, typename MatrixValueType> __global__ void findStrongestNeighbourBlockDiaCsr_V2(const IndexType *row_offsets, const IndexType *column_indices, MatrixValueType *edge_weights, IndexType num_block_rows, IndexType *aggregates, IndexType *strongest_neighbour_1phase, IndexType *strongest_neighbour, const size_t bsize, int phase, int merge_singletons) { int tid = threadIdx.x + blockDim.x * blockIdx.x; MatrixValueType weight; int jcol; while (tid < num_block_rows) { int strongest_unaggregated = -1; int strongest_aggregated = -1; MatrixValueType max_weight_unaggregated = 0.; MatrixValueType max_weight_aggregated = 0.; if (aggregates[tid] == -1) // Unaggregated row { for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++) { jcol = column_indices[j]; if (phase == 1) { weight = edge_weights[j]; } else { weight = random_weight2(tid, jcol); } if (tid == jcol || jcol >= num_block_rows) { continue; } // skip diagonal and halo if (phase == 2 && strongest_neighbour_1phase[jcol] != tid) { continue; } // if 2nd phase only accept those who gave a hand on the 1st phase // Identify strongest aggregated and unaggregated neighbours (method by multi_pairwise) if (aggregates[jcol] == -1 && weight > 0.0 && (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && random_weight3(tid, jcol) > random_weight3(tid, strongest_unaggregated)))) // unaggregated { max_weight_unaggregated = weight; strongest_unaggregated = jcol; } else if (aggregates[jcol] != -1 && weight > 0.0 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && random_weight3(tid, jcol) > random_weight3(tid, strongest_aggregated)))) // aggregated { max_weight_aggregated = weight; strongest_aggregated = jcol; } } if (strongest_unaggregated == -1 && strongest_aggregated != -1) // All neighbours are aggregated { if ( merge_singletons == 1 ) // Put in same aggregate as strongest neighbour { aggregates[tid] = aggregates[strongest_aggregated]; } else // create singleton { aggregates[tid] = tid; } } else if (strongest_unaggregated != -1) { if (phase == 2) { MatrixValueType rand_w1 = random_weight2(tid, strongest_neighbour_1phase[tid]); strongest_neighbour[tid] = max_weight_unaggregated > rand_w1 ? strongest_unaggregated : strongest_neighbour_1phase[tid]; } else { strongest_neighbour_1phase[tid] = strongest_unaggregated; } } else { if (phase == 2) { strongest_neighbour[tid] = strongest_neighbour_1phase[tid]; } else { strongest_neighbour_1phase[tid] = tid; } } } tid += gridDim.x * blockDim.x; } } template <typename IndexType> __device__ bool atomicJoin( IndexType node, IndexType aggregate, IndexType *aggregates, int *sizes, int allowed ) { int mySize = sizes[node]; int theirSize = sizes[aggregate]; int theirSizeOld = theirSize; do { int newSize = mySize + theirSize; if ( newSize > allowed ) { return false; } theirSizeOld = theirSize; theirSize = atomicCAS( &sizes[aggregate], theirSize, newSize ); } while ( theirSize != theirSizeOld ); aggregates[node] = aggregate; return true; } template <typename IndexType, typename MatrixValueType, bool use_degree> __global__ void findStrongestNeighbourBlockDiaCsr_V3(const IndexType *row_offsets, const IndexType *column_indices, MatrixValueType *edge_weights, IndexType num_block_rows, IndexType *aggregates, IndexType *strongest_neighbour, int *sizes, int *degree, const size_t bsize, int max_aggregate_size, int merge_singletons) { int tid = threadIdx.x + blockDim.x * blockIdx.x; MatrixValueType weight; int jcol; while (tid < num_block_rows) { int strongest_unaggregated = -1; int strongest_aggregated = -1; int lowest_degree; if ( use_degree ) { lowest_degree = degree[tid]; //only interested in finding lower degree than self } else { lowest_degree = 0; //if we decide to not use degree than just propose to the strongest edge } int lowest_degree_neighbor = tid; MatrixValueType lowest_degree_weight = 1e100; //high start value, so that same degree neighbor won't become lowest degree neighbor MatrixValueType max_weight_unaggregated = 0.; MatrixValueType max_weight_aggregated = 0.; int mySize; if ( merge_singletons == 2 ) { mySize = sizes[tid]; } else { mySize = 0; } if ( merge_singletons != 2 ) { max_aggregate_size = 100000; } //this aggregate is already full if (mySize == max_aggregate_size) { aggregates[tid] = tid; } if (aggregates[tid] == -1) // Unaggregated row { for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++) { jcol = column_indices[j]; if (tid == jcol || jcol >= num_block_rows) { continue; } // skip diagonal and halo weight = edge_weights[j]; if (!(weight > 0.0)) { continue; } if ( aggregates[jcol] != -1 ) //aggregated neighbor { int theirSize; if ( merge_singletons == 2 ) { theirSize = aggregates[sizes[jcol]]; } else { theirSize = 0; } //if all neighbors are aggregated, find the strongest edge to neighbor aggregate that is not full yet if (mySize + theirSize <= max_aggregate_size && (weight > max_weight_aggregated)) // aggregated { max_weight_aggregated = weight; strongest_aggregated = jcol; } } else //unaggregated neighbor { if ( use_degree && merge_singletons == 2 ) { int theirSize = sizes[jcol]; //get lowest degree neighbor or find out that there is no lower degree neighbor int current_degree = degree[jcol]; if ( mySize + theirSize <= max_aggregate_size && (current_degree < lowest_degree || current_degree == lowest_degree && weight > lowest_degree_weight) ) { lowest_degree = current_degree; lowest_degree_weight = weight; lowest_degree_neighbor = jcol; } //get highest weight neighbor if ( mySize + theirSize <= max_aggregate_size && (weight > max_weight_unaggregated) ) { strongest_unaggregated = jcol; max_weight_unaggregated = weight; } } if ( use_degree && merge_singletons != 2 ) //same as above but ignore sizes { //get lowest degree neighbor or find out that there is no lower degree neighbor int current_degree = degree[jcol]; if ( current_degree < lowest_degree || current_degree == lowest_degree && weight > lowest_degree_weight) { lowest_degree = current_degree; lowest_degree_weight = weight; lowest_degree_neighbor = jcol; } //get highest weight neighbor if (weight > max_weight_unaggregated) { strongest_unaggregated = jcol; max_weight_unaggregated = weight; } } if ( !use_degree && merge_singletons == 2 ) { //get highest weight neighbor only but pay attention to the aggregate sizes int theirSize = sizes[jcol]; //get highest weight neighbor if ( mySize + theirSize <= max_aggregate_size && (weight > max_weight_unaggregated) ) { strongest_unaggregated = jcol; max_weight_unaggregated = weight; } } if ( !use_degree && merge_singletons != 2 ) { //just highest weight if (weight > max_weight_unaggregated) { strongest_unaggregated = jcol; max_weight_unaggregated = weight; } } } } //prefer lowest degree neighbor if ( lowest_degree_neighbor != tid ) { strongest_unaggregated = lowest_degree_neighbor; } if (strongest_unaggregated != -1) //Unaggregated neighbor exists { strongest_neighbour[tid] = strongest_unaggregated; //assign strongest aggregated } if (strongest_unaggregated == -1 && strongest_aggregated != -1) // All neighbours are aggregated but small enough aggregated neighbors exist { if ( merge_singletons == 0 ) { aggregates[tid] = tid; } if ( merge_singletons == 1 ) { aggregates[tid] = aggregates[strongest_aggregated]; } if ( merge_singletons == 2) { atomicJoin( tid, aggregates[strongest_aggregated], aggregates, sizes, max_aggregate_size ); //try to join, can fail. maybe it works next round. } } if (strongest_unaggregated == -1 && strongest_aggregated == -1) //no feasable neighbor at all, become singleton { strongest_neighbour[tid] = tid; //become singleton } } tid += gridDim.x * blockDim.x; } } template <typename IndexType, typename ValueType> __global__ void computeDegree( const IndexType *ia, const IndexType *ja, const ValueType *weights, IndexType *aggregates, IndexType *sizes, IndexType *degree, IndexType numRows, IndexType max_aggregate_size) { int i = threadIdx.x + blockDim.x * blockIdx.x; while ( i < numRows ) { int myDegree = 0; IndexType ia_ip1 = ia[i + 1]; for ( IndexType ii = ia[i]; ii < ia_ip1; ii++ ) { IndexType j = ja[ii]; if ( j == i ) { continue; } int mySize, theirSize; if ( sizes != NULL ) { mySize = sizes[i]; theirSize = sizes[j]; } else { mySize = theirSize = 0; } if ( weights[ii] > 0.0 && aggregates[j] == -1 && mySize + theirSize <= max_aggregate_size ) { myDegree++; } } degree[i] = myDegree; i += gridDim.x * blockDim.x; } } template <typename IndexType, typename ValueType> __global__ void mergeSingletonsSmart(const IndexType *ia, const IndexType *ja, const ValueType *weights, IndexType *aggregates, IndexType *sizes, IndexType numRows, int max_aggregate_size) { int tid = threadIdx.x + blockDim.x * blockIdx.x; while ( tid < numRows ) { //unaggregated nodes try to join or create their own aggregate if ( aggregates[tid] == -1 ) { bool joined = false; while ( !joined ) { int neighbor_aggregate = -1; ValueType max_weight = 0.0; IndexType mySize = sizes[tid]; for (IndexType ii = ia[tid]; ii < ia[tid + 1]; ii++) { IndexType j = ja[ii]; if (j == tid || j >= numRows) { continue; } if ( aggregates[j] != -1 && sizes[aggregates[j]] + mySize <= max_aggregate_size && weights[ii] > max_weight ) { neighbor_aggregate = aggregates[j]; max_weight = weights[ii]; } } //no possible neighbor found if ( neighbor_aggregate == -1 ) { //create own aggregate aggregates[tid] = tid; joined = true; } else { //try to join joined = atomicJoin( tid, neighbor_aggregate, aggregates, sizes, max_aggregate_size ); } } } tid += gridDim.x * blockDim.x; } } template <typename IndexType> __global__ void updateAggregateSizes( IndexType *sizesSource, IndexType *sizes, IndexType *aggregates, IndexType numRows ) { int tid = threadIdx.x + blockDim.x * blockIdx.x; while ( tid < numRows ) { IndexType agg = aggregates[tid]; IndexType aggregateSize = sizes[agg]; IndexType mySize = sizesSource[tid]; while ( mySize > aggregateSize ) { aggregateSize = atomicCAS( &sizes[agg], aggregateSize, mySize ); } tid += gridDim.x * blockDim.x; } } // Kernel that checks if perfect matchs exist template <typename IndexType> __global__ void matchEdges(const IndexType num_rows, IndexType *aggregates, int *strongest_neighbour, IndexType *sizes) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int potential_match, potential_match_neighbour; while (tid < num_rows) { if (aggregates[tid] == -1) // Unaggregated row { potential_match = strongest_neighbour[tid]; potential_match_neighbour = strongest_neighbour[potential_match]; if ( potential_match == tid ) { aggregates[tid] = tid; } else if (potential_match != -1 && potential_match_neighbour == tid && tid < potential_match) // we have a match { aggregates[tid] = tid; aggregates[potential_match] = tid; if ( sizes != NULL) { sizes[tid] += sizes[potential_match]; } } } tid += gridDim.x * blockDim.x; } } template <typename IndexType, int block_size> __global__ void countAggregates(const IndexType num_rows, IndexType *aggregates, int *num_unaggregated) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int c = 0; int i = tid; while ( i < num_rows ) { c += ( aggregates[i] == -1 ); i += gridDim.x * blockDim.x; } __shared__ volatile int smem[block_size]; smem[threadIdx.x] = c; __syncthreads(); for ( int off = blockDim.x / 2; off >= 32; off = off / 2 ) { if ( threadIdx.x < off ) { smem[threadIdx.x] += smem[threadIdx.x + off]; } __syncthreads(); } // warp reduce if ( threadIdx.x < 32 ) { smem[threadIdx.x] += smem[threadIdx.x + 16]; smem[threadIdx.x] += smem[threadIdx.x + 8]; smem[threadIdx.x] += smem[threadIdx.x + 4]; smem[threadIdx.x] += smem[threadIdx.x + 2]; smem[threadIdx.x] += smem[threadIdx.x + 1]; } if ( threadIdx.x == 0 ) { atomicAdd(num_unaggregated, smem[0]); } } template <typename IndexType> __global__ void joinExistingAggregates(IndexType num_rows, IndexType *aggregates, IndexType *aggregates_candidate) { int tid = threadIdx.x + blockDim.x * blockIdx.x; while (tid < num_rows) { if (aggregates[tid] == -1 && aggregates_candidate[tid] != -1) // Unaggregated row { aggregates[tid] = aggregates_candidate[tid]; } tid += gridDim.x * blockDim.x; } } // Kernel that merges unaggregated vertices its strongest aggregated neighbour // Weights are read from edge_weights array // For block_dia_csr_matrix_format template <typename IndexType, typename MatrixValueType> __global__ void mergeWithExistingAggregatesBlockDiaCsr_V2(const IndexType *row_offsets, const IndexType *column_indices, const MatrixValueType *edge_weights, const int num_block_rows, IndexType *aggregates, int bsize, const int deterministic, IndexType *aggregates_candidate) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int jcol; MatrixValueType weight; while (tid < num_block_rows) { MatrixValueType max_weight_aggregated = 0.; int strongest_aggregated = -1; if (aggregates[tid] == -1) // Unaggregated row { for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++) { // Compute edge weight weight = edge_weights[j]; jcol = column_indices[j]; if (jcol == tid || jcol >= num_block_rows) { continue; } // skip diagonal if ( aggregates[jcol] == num_block_rows ) { continue; } // skip dd rows // Identify strongest aggregated neighbour if (aggregates[jcol] != -1 && weight > 0 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && random_weight3( tid, jcol ) > random_weight3( tid, strongest_aggregated )))) // { max_weight_aggregated = weight; strongest_aggregated = jcol; } } if (strongest_aggregated != -1) // Found a neighbour to aggregate to { if (deterministic == 1) { aggregates_candidate[tid] = aggregates[strongest_aggregated]; } else { // Put in same aggregate as strongest neighbour aggregates[tid] = aggregates[strongest_aggregated]; } } else // All neighbours are unaggregated, leave alone { if (deterministic == 1) { aggregates_candidate[tid] = tid; } else { aggregates[tid] = tid; } } } tid += gridDim.x * blockDim.x; } } // Kernel to extract diagonal for csr_matrix format template <typename IndexType, typename ValueType> __global__ void getDiagonalKernel(const IndexType *offsets, const IndexType *column_indices, const ValueType *values, const IndexType numRows, ValueType *diagonal) { int tIdx = threadIdx.x + blockDim.x * blockIdx.x; while (tIdx < numRows) { const int offset = offsets[tIdx]; const int numj = offsets[tIdx + 1] - offset; for (int j = offset; j < offset + numj; j++) { int jcol = column_indices[j]; if (tIdx == jcol) { diagonal[tIdx] = values[j]; } } tIdx += gridDim.x * blockDim.x; } } // Kernel to extract diagonal for csr_matrix format template <typename IndexType, typename ValueType> __global__ void getDiagonalKernelNoDiaProp(const IndexType *dia_idx, const ValueType *values, const IndexType numRows, ValueType *diagonal) { int tIdx = threadIdx.x + blockDim.x * blockIdx.x; while (tIdx < numRows) { diagonal[tIdx] = values[dia_idx[tIdx]]; tIdx += gridDim.x * blockDim.x; } } // filter edge weights like this: // set w_ij = 0 iff // w_ij < alpha * sqrt( max_k{w_ik} * max_l{w_jl} ) // alpha is some constant, 0.25 or 0.5 should work fine template<typename IndexType, typename ValueType> __global__ void filterWeights( const IndexType *row_offsets, const IndexType *row_indices, const IndexType *col_indices, const IndexType *diag, const ValueType *old_weights, ValueType *new_weights, IndexType num_nonzero_blocks, IndexType num_owned, ValueType alpha ) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int i, j, kmin, kmax; ValueType max_ik, max_jl; while ( tid < num_nonzero_blocks ) { i = row_indices[tid]; j = col_indices[tid]; if ( i != j && j < num_owned ) { //find max_k{w_ik} kmin = row_offsets[i]; kmax = row_offsets[i + 1]; max_ik = 0.0; for (int k = kmin; k < kmax; k++) { if ( col_indices[k] != i && old_weights[k] > max_ik ) { max_ik = old_weights[k]; } } //find max_l{w_jl} kmin = row_offsets[j]; kmax = row_offsets[j + 1]; max_jl = 0.0; for (int l = kmin; l < kmax; l++) { if ( col_indices[l] != j && old_weights[l] > max_jl ) { max_jl = old_weights[l]; } } //test squared inequality if ( old_weights[tid] * old_weights[tid] < alpha * alpha * max_ik * max_jl ) { new_weights[tid] = 0.0; } else //rescale to relative importance. this should also increase the chance of a handshake { new_weights[tid] = old_weights[tid]; } // new_weights[tid] = old_weights[tid] / sqrt(max_ik*max_jl); } tid += gridDim.x * blockDim.x; } } template<typename IndexType, typename ValueType> __global__ void gatherValuesInterleaved( const ValueType *inValues, ValueType *outValues, IndexType nnz, int sq_blocksize, int index_offset ) { int tid = threadIdx.x + blockDim.x * blockIdx.x; while ( tid < nnz ) { //at least the write is coalesced outValues[tid] = inValues[tid * sq_blocksize + index_offset]; tid += gridDim.x * blockDim.x; } } template<typename IndexType, typename ValueTypeV, typename ValueTypeM> __global__ void addToWeights( ValueTypeM *edge_weights, const ValueTypeV *x, const IndexType *row_indices, IndexType *col_indices, IndexType nnz, double scale ) { int tid = threadIdx.x + blockDim.x * blockIdx.x; while ( tid < nnz ) { int i = row_indices[tid]; int j = col_indices[tid]; edge_weights[tid] -= static_cast<ValueTypeM>( scale * fabs( x[i] - x[j] ) ); tid += gridDim.x * blockDim.x; } } template <typename ValueType, typename IndexType> __global__ void rescaleVector( ValueType *x, IndexType numRows ) { int tid = threadIdx.x + blockDim.x * blockIdx.x; while ( tid < numRows ) { x[tid] = 2 * x[tid] - 1; tid += gridDim.x * blockDim.x; } } // ----------------- // Methods // ---------------- // Constructor template<class T_Config> MultiPairwiseSelectorBase<T_Config>::MultiPairwiseSelectorBase(AMG_Config &cfg, const std::string &cfg_scope) { deterministic = cfg.AMG_Config::getParameter<IndexType>("determinism_flag", "default"); max_iterations = cfg.AMG_Config::getParameter<IndexType>("max_matching_iterations", cfg_scope); numUnassigned_tol = cfg.AMG_Config::getParameter<double>("max_unassigned_percentage", cfg_scope); two_phase = cfg.AMG_Config::getParameter<int>("handshaking_phases", cfg_scope) == 2; m_aggregation_edge_weight_component = cfg.AMG_Config::getParameter<int>("aggregation_edge_weight_component", cfg_scope); aggregation_passes = cfg.AMG_Config::getParameter<int>("aggregation_passes", cfg_scope); //default to size 8 aggregates. maybe its more convenient to have that as a config parameter filter_weights = cfg.AMG_Config::getParameter<int>("filter_weights", cfg_scope); //by default: no filtering filter_weights_alpha = cfg.AMG_Config::getParameter<double>( "filter_weights_alpha", cfg_scope ); //default to 0.25 full_ghost_level = cfg.AMG_Config::getParameter<int>( "full_ghost_level", cfg_scope ); //defaults to 0 notay_weights = cfg.AMG_Config::getParameter<int>( "notay_weights", cfg_scope ); //defaults to 0 ghost_offdiag_limit = cfg.AMG_Config::getParameter<int>( "ghost_offdiag_limit", cfg_scope ); //defaults to 0 merge_singletons = cfg.AMG_Config::getParameter<int>( "merge_singletons", cfg_scope ); //defaults to 1 weight_formula = cfg.AMG_Config::getParameter<int>( "weight_formula", cfg_scope ); //wheight formula defaults to 0 serial_matching = cfg.AMG_Config::getParameter<int>( "serial_matching", cfg_scope ) != 0; //will use a serial matching algorithm instead of handshake modified_handshake = cfg.AMG_Config::getParameter<int>("modified_handshake", cfg_scope ) == 1; //passes = 1 -> max = 3 //passes = 2 -> max = 5 //passes = 3 -> max = 10 //passes = 4 -> max = 18 max_aggregate_size = 2; for (int i = 1; i < aggregation_passes; i ++) { max_aggregate_size *= 2; } max_aggregate_size += aggregation_passes - (aggregation_passes / 2); mCfg = cfg; mCfg_scope = cfg_scope; } // setAggregates for block_dia_csr_matrix_h format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MultiPairwiseSelector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblocks(Matrix_h &A, typename Matrix_h::IVector &aggregates, typename Matrix_h::IVector &aggregates_global, int &num_aggregates, MVector &edge_weights, IVector &sizes) { FatalError("MultiPairwise selector: setAggregates not implemented on CPU, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET); } // device specialization //edge_weights is an in/out parameter: //if its size is zero, the edge_weights will be computed from A and stored into edge_weights //else the edge_weights will not be computed and assumed to be valid for the given A. the value array of A is not used in this case template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MultiPairwiseSelector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblocks(Matrix_d &A, typename Matrix_d::IVector &aggregates, typename Matrix_d::IVector &aggregates_global, int &num_aggregates, MVector &edge_weights, IVector &sizes) { IndexType num_block_rows = (int) A.get_num_rows(); IndexType num_nonzero_blocks = (int) A.get_num_nz(); // both ways are supported IndexType total_nz = (A.is_matrix_singleGPU()) ? num_nonzero_blocks : A.manager->num_nz_all(); typename Matrix_d::IVector &row_indices = A.row_indices; row_indices.resize( total_nz); cusp::detail::offsets_to_indices(A.row_offsets, row_indices); IndexType total_rows = (A.is_matrix_singleGPU()) ? A.get_num_rows() : A.manager->num_rows_all(); aggregates.resize(total_rows); thrust::fill(aggregates.begin(), aggregates.end(), -1); cudaCheckError(); if ( this->merge_singletons == 2 && sizes.size() == 0 ) { sizes.resize( total_rows, 1 ); //init with all ones } const IndexType *A_row_offsets_ptr = A.row_offsets.raw(); const IndexType *A_row_indices_ptr = row_indices.raw(); const IndexType *A_column_indices_ptr = A.col_indices.raw(); const IndexType *A_dia_idx_ptr = A.diag.raw(); const ValueType *A_nonzero_values_ptr = A.values.raw(); typename Matrix_d::IVector strongest_neighbour(num_block_rows, -1); typename Matrix_d::IVector strongest_neighbour_1phase(num_block_rows, -1); Vector<TemplateConfig<AMGX_device, AMGX_vecUInt, t_matPrec, t_indPrec> > aggregated(num_block_rows, 0); IndexType *strongest_neighbour_ptr = strongest_neighbour.raw(); IndexType *strongest_neighbour_1phase_ptr = strongest_neighbour_1phase.raw(); IndexType *aggregates_ptr = aggregates.raw(); const int threads_per_block = 256; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (num_block_rows - 1) / threads_per_block + 1 ); int numUnassigned = num_block_rows; int numUnassigned_previous = numUnassigned; bool computeWeights = ( edge_weights.size() == 0 ); if (computeWeights) { if ( A.hasProps( DIAG ) ) { edge_weights.resize( num_nonzero_blocks + num_block_rows, 0.0 ); } else { edge_weights.resize( num_nonzero_blocks + 1, -1 ); //+1 is important to some algorithms } } ValueType *edge_weights_ptr = edge_weights.raw(); ValueType *rand_edge_weights_ptr = NULL; cudaStream_t str = thrust::global_thread_handle::get_stream(); // Compute the edge weights if ( computeWeights ) { const int num_blocks_V2 = min( AMGX_GRID_MAX_SIZE, (num_nonzero_blocks - 1) / threads_per_block + 1); //compute with std formula cudaFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType, ValueType, ValueType>, cudaFuncCachePreferL1); computeEdgeWeightsBlockDiaCsr_V2 <<< num_blocks_V2, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, num_nonzero_blocks, edge_weights_ptr, rand_edge_weights_ptr, num_block_rows, A.get_block_dimy(), this->m_aggregation_edge_weight_component, this->weight_formula); cudaCheckError(); } //filter weights if desired if ( this->filter_weights == 1 ) { MVector tmp( edge_weights.size() ); const int num_blocks_filter = min( AMGX_GRID_MAX_SIZE, (num_nonzero_blocks - 1) / threads_per_block + 1); cudaStreamSynchronize(str); cudaCheckError(); filterWeights <<< num_blocks_filter, threads_per_block, 0, str>>>( A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_dia_idx_ptr, edge_weights_ptr, tmp.raw(), num_nonzero_blocks, num_block_rows, this->filter_weights_alpha); cudaStreamSynchronize(str); cudaCheckError(); tmp.swap( edge_weights ); edge_weights_ptr = edge_weights.raw(); } // compute matching if ( !this->serial_matching ) { IVector degree; if ( this->modified_handshake ) { degree.resize( num_block_rows ); } #ifdef EXPERIMENTAL_ITERATIVE_MATCHING // TODO: allocate host pinned memory AsyncEvent *throttle_event = new AsyncEvent; throttle_event->create(); typename Matrix_h::IVector h_unagg_vec(1); typename Matrix_d::IVector d_unagg_vec(1); int *unaggregated = h_unagg_vec.raw(); int *d_unaggregated = d_unagg_vec.raw(); #endif int icount, s = 1; { icount = 0; ValueType *weights_ptr = edge_weights_ptr; do { if ( !this->two_phase ) { if ( this->modified_handshake ) computeDegree <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, aggregates_ptr, sizes.raw(), degree.raw(), num_block_rows, this->max_aggregate_size ); // 1-phase handshaking if ( this->modified_handshake ) findStrongestNeighbourBlockDiaCsr_V3<IndexType, ValueType, true> <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, num_block_rows, aggregates_ptr, strongest_neighbour_ptr, sizes.raw(), degree.raw(), A.get_block_dimy(), this->max_aggregate_size, this->merge_singletons); else findStrongestNeighbourBlockDiaCsr_V3<IndexType, ValueType, false> <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, num_block_rows, aggregates_ptr, strongest_neighbour_ptr, sizes.raw(), degree.raw(), A.get_block_dimy(), this->max_aggregate_size, this->merge_singletons); cudaCheckError(); } else { // 2-phase handshaking findStrongestNeighbourBlockDiaCsr_V2 <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, num_block_rows, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, A.get_block_dimy(), 1, this->merge_singletons); cudaCheckError(); // 2nd phase: for each block_row, find the strongest neighbour among those who gave hand on 1st phase findStrongestNeighbourBlockDiaCsr_V2 <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, num_block_rows, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, A.get_block_dimy(), 2, this->merge_singletons); cudaCheckError(); } // Look for perfect matches. Also, for nodes without unaggregated neighbours, merge with aggregate containing strongest neighbour if ( this->merge_singletons == 2 ) { matchEdges <<< num_blocks, threads_per_block, 0, str>>>(num_block_rows, aggregates_ptr, strongest_neighbour_ptr, sizes.raw()); } else { matchEdges <<< num_blocks, threads_per_block, 0, str>>>(num_block_rows, aggregates_ptr, strongest_neighbour_ptr, (int *)NULL); } cudaCheckError(); #ifdef EXPERIMENTAL_ITERATIVE_MATCHING s = (icount & 1); if ( s == 0 ) { // count unaggregated vertices cudaMemsetAsync(d_unaggregated, 0, sizeof(int), str); countAggregates<IndexType, threads_per_block> <<< num_blocks, threads_per_block, 0, str>>>(num_block_rows, aggregates_ptr, d_unaggregated); cudaCheckError(); cudaMemcpyAsync(unaggregated, d_unaggregated, sizeof(int), cudaMemcpyDeviceToHost, str); throttle_event->record(str); } else { throttle_event->sync(); numUnassigned_previous = numUnassigned; numUnassigned = *unaggregated; } #else cudaStreamSynchronize(str); numUnassigned_previous = numUnassigned; numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_block_rows, -1); cudaCheckError(); #endif icount++; } while ( (s == 0) || !(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_block_rows < this->numUnassigned_tol || numUnassigned == numUnassigned_previous)); } // printf("%i,\n", icount); #ifdef EXPERIMENTAL_ITERATIVE_MATCHING delete throttle_event; #endif } else { computeMatchingSerialGreedy( A, aggregates, num_aggregates, edge_weights ); } if ( this->merge_singletons == 1 ) { // Merge remaining vertices with current aggregates if (this->deterministic != 1) { while (numUnassigned != 0) { mergeWithExistingAggregatesBlockDiaCsr_V2 <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, A.get_block_dimy(), this->deterministic, (IndexType *) NULL); cudaCheckError(); numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_block_rows, -1); cudaCheckError(); } } else { typename Matrix_d::IVector aggregates_candidate(num_block_rows, -1); while (numUnassigned != 0) { mergeWithExistingAggregatesBlockDiaCsr_V2 <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, A.get_block_dimy(), this->deterministic, aggregates_candidate.raw()); cudaCheckError(); joinExistingAggregates <<< num_blocks, threads_per_block, 0, str>>>(num_block_rows, aggregates_ptr, aggregates_candidate.raw()); cudaCheckError(); numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_block_rows, -1); cudaCheckError(); } aggregates_candidate.resize(0); } } else if (this->merge_singletons == 0 ) { //make singletons aggregateSingletons <<< num_blocks, threads_per_block, 0, str>>>( aggregates_ptr, num_block_rows ); cudaCheckError(); } else if ( this->merge_singletons == 2 ) { //merges all remaining singletons into adequate neighbors if possible mergeSingletonsSmart <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, aggregates_ptr, sizes.raw(), num_block_rows, this->max_aggregate_size); cudaCheckError(); } //This will assign num_aggregates to the pseudo aggregate without counting it. Perfect! this->renumberAndCountAggregates(aggregates, aggregates_global, num_block_rows, num_aggregates); if ( this->merge_singletons == 2 ) { //udpate the sizes vector, so it matches the renumbered aggregates size IVector sizesSource; sizesSource.swap( sizes ); sizes.resize( num_aggregates, 1 ); updateAggregateSizes <<< num_blocks, threads_per_block, 0, str>>>( sizesSource.raw(), sizes.raw(), aggregates_ptr, num_block_rows ); cudaCheckError(); } } //instead of a handshake, we use a serial greedy algorithm to compute a better matching //the algorithm: // 1. compute degree of every node and sort nodes by degree into double linked list // 2. while non-isolated nodes left: // take node with minimum degree > 0 // find strongest edge to unaggregated node and assign to new aggregate // remove both nodes from linked list // decrease degree of each neighbor by one for each of the two nodes // update list template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MultiPairwiseSelector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeMatchingSerialGreedy( const Matrix_d &A, IVector &aggregates, int &numAggregates, MVector &edge_weights) { IndexType numRows = A.row_offsets.size() - 1; IndexType nnz = A.col_indices.size(); //allocate memory on host IndexType *ia = new IndexType[numRows + 1]; IndexType *ja = new IndexType[nnz]; ValueType *w = new ValueType[nnz]; IndexType *agg = new IndexType[numRows]; IndexType *deg = new IndexType[numRows]; //copy cudaMemcpy( ia, A.row_offsets.raw(), sizeof(IndexType) * (numRows + 1), cudaMemcpyDeviceToHost ); cudaMemcpy( ja, A.col_indices.raw(), sizeof(IndexType)*nnz, cudaMemcpyDeviceToHost ); cudaMemcpy( w, edge_weights.raw(), sizeof(ValueType)*nnz, cudaMemcpyDeviceToHost ); //init agg and compute the degree of each aggregate int max_degree = 0; for (IndexType i = 0; i < numRows; i++) { agg[i] = -1; int degree = 0; for (IndexType ii = ia[i]; ii < ia[i + 1]; ii++) //only care for positive weights { if ( ja[ii] != i && w[ii] > 0.0 ) { degree++; } } if ( degree > max_degree ) { max_degree = degree; } deg[i] = degree; } if ( max_degree >= numRows ) { FatalError( "max degree is greater than numRows.", AMGX_ERR_UNKNOWN ); } //init double linked list IndexType *fwd = new IndexType[numRows + max_degree + 1]; IndexType *bwd = new IndexType[numRows + max_degree + 1]; for (IndexType i = 0; i < numRows + max_degree + 1; i++) { fwd[i] = i; bwd[i] = i; } IndexType nodesLeft = numRows; numAggregates = 0; //insert nodes into list for (IndexType i = numRows - 1; i >= 0; i--) //inserting in backward order the nodes will be sorted by index in case of same degree { //insert forward following root fwd[i] = fwd[numRows + deg[i]]; fwd[numRows + deg[i]] = i; //insert backward bwd[i] = numRows + deg[i]; bwd[fwd[i]] = i; //isolated nodes cannot be aggregated if ( deg[i] == 0 ) { nodesLeft--; } } while ( nodesLeft > 0 ) { IndexType node = numRows; int degree; for (degree = 1; degree <= max_degree; degree++) { //list not empty -> select node if ( fwd[numRows + degree] < numRows ) //selecting the first node will select the most recently inserted one or the one with lowest index. both is preferable { node = fwd[numRows + degree]; } if ( node < numRows ) { break; } } //no node with degree > 1 found even though nodesLeft > 0 if ( node == numRows ) { FatalError("nodeLeft counting or list invalid", AMGX_ERR_UNKNOWN ); } if ( agg[node] != -1 ) { FatalError("node is already aggregated", AMGX_ERR_UNKNOWN ); } //find strongest edge ValueType max_weight = 0.0; IndexType max_node = numRows; //use this as gatekeeper, so if weight == 0 the node index will not be greater than this for (IndexType ii = ia[node]; ii < ia[node + 1]; ii++) { IndexType j = ja[ii]; if ( agg[j] != -1 || j == node) { continue; } if ( w[ii] > 0.0 ) { degree--; } //deterministic, doesn't selects 0 weight. if ( w[ii] > max_weight || (w[ii] == max_weight && j > max_node) ) //always taking the edge pointing to the max node can give good alignment if numbering is structured { max_node = j; max_weight = w[ii]; } } //Note that there has to be at least one neighbor node because degree of node is at least 1. if ( max_node == numRows ) { FatalError( "node has no neighbor although degree of node is at least 1", AMGX_ERR_UNKNOWN ); } if ( degree != 0 ) { FatalError( "node degree corrupted", AMGX_ERR_UNKNOWN ); } //aggregate agg[node] = node; agg[max_node] = node; numAggregates++; nodesLeft -= 2; //remove from list fwd[bwd[node]] = fwd[node]; bwd[fwd[node]] = bwd[node]; fwd[bwd[max_node]] = fwd[max_node]; bwd[fwd[max_node]] = bwd[max_node]; //update neighbors and list //max_node first for (IndexType ii = ia[max_node]; ii < ia[max_node + 1]; ii++) { IndexType j = ja[ii]; if ( agg[j] != -1 || w[ii] <= 0.0) { continue; } //remove j from list fwd[bwd[j]] = fwd[j]; bwd[fwd[j]] = bwd[j]; //update degree of j deg[j]--; //add j back to start of the list fwd[j] = fwd[numRows + deg[j]]; bwd[j] = numRows + deg[j]; bwd[fwd[j]] = j; fwd[bwd[j]] = j; if (deg[j] == 0) { nodesLeft--; } } //node second, this will prefer nodes neighbors over max_nodes neighbors when choosing the next node for (IndexType ii = ia[node]; ii < ia[node + 1]; ii++) { IndexType j = ja[ii]; if ( agg[j] != -1 || w[ii] <= 0.0) { continue; } //remove j from list fwd[bwd[j]] = fwd[j]; bwd[fwd[j]] = bwd[j]; //update degree of j deg[j]--; //add j back to start of the list fwd[j] = fwd[numRows + deg[j]]; bwd[j] = numRows + deg[j]; bwd[fwd[j]] = j; fwd[bwd[j]] = j; if (deg[j] == 0) { nodesLeft--; } } } //copy result back to device cudaMemcpy( aggregates.raw(), agg, sizeof(IndexType)*numRows, cudaMemcpyHostToDevice ); //assert matching for (IndexType node = 0; node < numRows; node++) { if ( agg[node] == -1 ) { continue; } for ( IndexType partner = 0; partner < numRows; partner++) { if ( agg[partner] == agg[node] ) { if ( partner == node ) { continue; } bool neighbor = false; for (IndexType ii = ia[node]; ii < ia[node + 1]; ii++) if ( ja[ii] == partner ) { neighbor = true; break; } if ( !neighbor ) { for (IndexType ii = ia[partner]; ii < ia[partner + 1]; ii++) if ( ja[ii] == node ) { neighbor = true; break; } } if ( !neighbor ) { FatalError("Internal error in aggregation selector", AMGX_ERR_INTERNAL); } } } } //you shall not leak memory delete[] ia; delete[] ja; delete[] w; delete[] agg; delete[] deg; delete[] fwd; delete[] bwd; } //this kernel merges aggregate2 into aggregate1 template<typename IndexType> __global__ void mergeAggregates(IndexType *aggregate1, const IndexType *aggregate2, IndexType sizeAggregate1, IndexType sizeAggregate2, IndexType sizeAggregate3) { int tid = threadIdx.x + blockDim.x * blockIdx.x; while ( tid < sizeAggregate1 ) { if ( aggregate1[tid] == sizeAggregate2 ) { aggregate1[tid] = sizeAggregate3; } else { aggregate1[tid] = aggregate2[aggregate1[tid]]; } tid += gridDim.x * blockDim.x; } } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MultiPairwiseSelector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeIncompleteGalerkin( const Matrix_h &A, Matrix_h &Ac, const typename Matrix_h::IVector &aggregates, const typename Matrix_h::IVector &R_row_offsets, const typename Matrix_h::IVector &R_column_indices, const int num_aggregates ) { FatalError("computeIncomlpetegalerkin is not supported on host. Run with ghost_offdiag_limit=0 instead.", AMGX_ERR_NOT_SUPPORTED_TARGET); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void MultiPairwiseSelector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeIncompleteGalerkin( const Matrix_d &A, Matrix_d &Ac, const typename Matrix_d::IVector &aggregates, const typename Matrix_d::IVector &R_row_offsets, const typename Matrix_d::IVector &R_column_indices, const int num_aggregates ) { FatalError("computeIncomlpetegalerkin is not implemented yet. run with ghost_offdiag_limit=0 instead.", AMGX_ERR_NOT_IMPLEMENTED); } template<class TConfig> void MultiPairwiseSelectorBase<TConfig>::assertRestriction( const IVector &R_row_offsets, const IVector &R_col_indices, const IVector &aggregates ) { int *r_ia = new int[R_row_offsets.size()]; int *r_ja = new int[R_col_indices.size()]; int *agg = new int[aggregates.size()]; int *used_col = new int[aggregates.size()]; for ( int i = 0; i < aggregates.size(); i++ ) { used_col[i] = 0; } cudaMemcpy( r_ia, R_row_offsets.raw(), sizeof(int)*R_row_offsets.size(), cudaMemcpyDeviceToHost ); cudaMemcpy( r_ja, R_col_indices.raw(), sizeof(int)*R_col_indices.size(), cudaMemcpyDeviceToHost ); cudaMemcpy( agg, aggregates.raw(), sizeof(int)*aggregates.size(), cudaMemcpyDeviceToHost ); for ( int i = 0; i < R_row_offsets.size() - 1; i++ ) { for ( int ii = r_ia[i]; ii < r_ia[i + 1]; ii++ ) { int j = r_ja[ii]; used_col[j]++; if ( used_col[j] > 1 ) { std::cout << "column " << j << " is present at least " << used_col[j] << " times" << std::endl; } if ( j < 0 || j >= aggregates.size() ) { std::cout << "Error: j out of bounds, j = " << j << " and numRows = " << aggregates.size() << std::endl; } else if ( agg[j] != i ) { std::cout << "Error: agg[" << j << "] = " << agg[j] << " != " << i << std::endl; } } } std::cout << "assert restriction done" << std::endl; } template<class T_Config> void MultiPairwiseSelectorBase<T_Config>::setAggregates(Matrix<T_Config> &A, IVector &aggregates, IVector &aggregates_global, int &num_aggregates) { if (A.get_block_dimx() == A.get_block_dimy()) { //ghost level matrix. this is a probably a weight matrix Matrix<TConfig> ghostA; ghostA.values.resize(0); //prolongation and restriction operator. this is only needed in when LowDegCoarseAGenerator is used IVector R_row_offsets; IVector R_col_indices; //holds the size of each aggregate IVector sizes; sizes.resize(0); //aggregates for ghost level IVector aggregates_current; IVector aggregates_global_current; bool aggregates_initialized = true; if (aggregates.size() == 0) { aggregates_initialized = false; if (!A.is_matrix_singleGPU()) { aggregates.resize(A.manager->halo_offset(A.manager->num_neighbors())); } else { aggregates.resize(A.get_num_rows()); } } //for mergeAggregates kernel const int threads_per_block = 256; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (A.get_num_rows() - 1) / threads_per_block + 1 ); cudaStream_t stream = thrust::global_thread_handle::get_stream(); //initialize and prepare weight matrix Matrix<TConfig> w; w.set_initialized(0); w.addProps(CSR); w.delProps(COO); w.setColsReorderedByColor(false); w.resize( 0, 0, 0, 1, 1, true ); //empty scalar 0x0 matrix w.values.resize(0); //matrix resize sets the values array to nnz+1 for no apparent reason IndexType targetSize = 1; //initialize coarse A generator CoarseAGenerator<TConfig> *cag; const bool use_restriction = true; const bool shrink_ghost_level = false; cag = new LowDegCoarseAGenerator<TConfig>(mCfg, mCfg_scope); // This will make coarseAGenerator to allocate more memory inside of galerkin ghostA.manager = new DistributedManager<TConfig>(); w.manager = new DistributedManager<TConfig>(); Matrix<TConfig> *curA = &A; //foreach pass do: // 1. build aggregates and weights // 2. create weight matrix (in full_ghost_level mode this is the input matrix or the last ghostlevel matrix) // 3. if in full ghost level mode, build R // 4. compute next level for (int current_pass = 1; true; current_pass++) { const IndexType numRows = curA->get_num_rows(); const IndexType nnz = curA->get_num_nz(); targetSize *= 2; if ( full_ghost_level ) { w.values.resize(0); //compute weights from curA } else { w.values.swap( ghostA.values ); //use the weights computed with the galerkin operator (this will do nothing in the first pass, both values have size 0) } // create aggregates from correct input matrix setAggregates_common_sqblocks( *curA, aggregates_current, aggregates_global_current, num_aggregates, w.values, sizes ); if ( current_pass > 1 ) { //merge original aggregate with the newly created ones mergeAggregates <<< num_blocks, threads_per_block, 0, stream >>>( aggregates.raw(), aggregates_current.raw(), A.get_num_rows(), numRows, num_aggregates ); cudaCheckError(); //mergeAggregates<<< num_blocks, threads_per_block, 0, stream >>>( aggregates_global.raw(), aggregates_global_current.raw(), A.get_num_rows() ); //cudaCheckError(); } //try to free memory if ( full_ghost_level ) { //then we don't need to save the weights, only for original level to do post processing w.values.resize(0); } else { //save edge weights for original level later //in that case we can throw away the values of ghostA as we will use the values to compute the next ghost level ghostA.values.resize(0); } // this is the break condition for the loop if ( current_pass >= aggregation_passes || num_aggregates <= 1 || num_aggregates == numRows) { //this means, aggregates has not been initialized yet if ( !aggregates_initialized ) { aggregates.swap( aggregates_current ); } if ( !aggregates_initialized ) { aggregates_global.swap( aggregates_global_current ); } cudaStreamSynchronize( stream ); cudaCheckError(); break; } //prepare A to be corrupted curA->set_initialized(0); //swap in ia, ja from curA w.row_offsets.swap( curA->row_offsets ); w.col_indices.swap( curA->col_indices ); if ( full_ghost_level ) { if ( shrink_ghost_level && curA->get_block_dimx() > 1) { //set w to correct size w.values.resize( nnz ); //define grid and offsets const int num_blocks_inter = min( (int)AMGX_GRID_MAX_SIZE, (int)(nnz - 1) / threads_per_block + 1 ); const int sq_blocksize = A.get_block_dimx() * A.get_block_dimy(); const int index_offset = A.get_block_dimy() * m_aggregation_edge_weight_component + m_aggregation_edge_weight_component; //do the interleaved copy gatherValuesInterleaved <<< num_blocks_inter, threads_per_block, 0, stream>>>( A.values.raw(), w.values.raw(), nnz, sq_blocksize, index_offset ); cudaStreamSynchronize( stream ); cudaCheckError(); } else { w.values.swap( curA->values ); } } w.diag.swap( curA->diag ); //resize to inform the matrix of its new size if ( full_ghost_level && !shrink_ghost_level ) { w.set_block_dimx( A.get_block_dimx() ); w.set_block_dimy( A.get_block_dimy() ); } else { w.set_block_dimx( 1 ); w.set_block_dimy( 1 ); } w.set_num_rows( numRows ); w.set_num_cols( numRows ); w.set_num_nz( nnz ); w.set_allow_recompute_diag( false ); if ( curA->hasProps( DIAG ) ) { w.addProps( DIAG ); } //ready to use w.set_initialized(1); //compute restriction operator if ( use_restriction ) { IVector R_row_indices(aggregates_current); R_row_offsets.resize(num_aggregates + 2); R_col_indices.resize(numRows); thrust::sequence(R_col_indices.begin(), R_col_indices.end()); cudaCheckError(); thrust::sort_by_key(R_row_indices.begin(), R_row_indices.end(), R_col_indices.begin()); cudaCheckError(); cusp::detail::indices_to_offsets(R_row_indices, R_row_offsets); cudaCheckError(); //delete last row, which holds the pseudo aggregate R_row_offsets.resize( num_aggregates + 1); R_col_indices.resize( R_row_offsets[num_aggregates] ); } // 3. compute galerkin ghost level if ( ghost_offdiag_limit == 0 ) { //compute full galerkin cag->computeAOperator(w, ghostA, aggregates_current, R_row_offsets, R_col_indices, num_aggregates); } else { //compute incomplete galerkin computeIncompleteGalerkin(w, ghostA, aggregates_current, R_row_offsets, R_col_indices, num_aggregates); } //from now on w will be destroyed again. w.set_initialized(0); //repair the original A matrix. its ia and ja are in w if ( current_pass == 1 ) { //swap back w.row_offsets.swap( A.row_offsets ); w.col_indices.swap( A.col_indices ); //only in that case we have swapped the values if ( full_ghost_level && !shrink_ghost_level ) { w.values.swap( A.values ); } //save the edge weights of the original level A.diag.swap( w.diag ); A.set_initialized(1); //A is repaired now //save the first aggregates into the original aggregate vector so we can merge them later aggregates.swap( aggregates_current ); aggregates_global.swap( aggregates_global_current ); aggregates_initialized = true; curA = &ghostA; } } delete cag; } else { FatalError("Unsupported block size for MultiPairwise", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } } // ------------------------- // Explict instantiations // ------------------------- #define AMGX_CASE_LINE(CASE) template class MultiPairwiseSelectorBase<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class MultiPairwiseSelector<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } } }
the_stack
#include <kernels.cuh> #include <cub/block/block_radix_sort.cuh> #include <cub/warp/warp_reduce.cuh> #include <cub/block/block_load.cuh> #include <cub/block/block_discontinuity.cuh> #include <cub/block/block_store.cuh> #include <cub/block/block_reduce.cuh> #include <cub/cub.cuh> #include <math_constants.h> #define HLF_MAX 65504 #define TH 1024 #define NUM 4 #define NUM_BLOCK 4096 // source: https://stackoverflow.com/questions/17399119/how-do-i-use-atomicmax-on-floating-point-values-in-cuda __device__ float atomicMax(float* address, float val) { int* address_as_i = reinterpret_cast<int*>(address); int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS( reinterpret_cast<int*>(address), assumed, __float_as_int(fmaxf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } __device__ float atomicMin(float* address, float val) { int* address_as_i = reinterpret_cast<int*>(address); int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS( reinterpret_cast<int*>(address), assumed, __float_as_int(fminf(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } template <int STOCHASTIC> __device__ unsigned char dQuantize(float* smem_code, const float rand, float x) { int pivot = 127; int upper_pivot = 255; int lower_pivot = 0; float lower = -1.0f; float upper = 1.0f; float val = smem_code[pivot]; // i>>=1 = {32, 16, 8, 4, 2, 1} for(int i = 64; i > 0; i>>=1) { if(x > val) { lower_pivot = pivot; lower = val; pivot+=i; } else { upper_pivot = pivot; upper = val; pivot-=i; } val = smem_code[pivot]; } if(upper_pivot == 255) upper = smem_code[upper_pivot]; if(lower_pivot == 0) lower = smem_code[lower_pivot]; if(!STOCHASTIC) { if(x > val) { float midpoint = (upper+val)*0.5f; if(x > midpoint) { return upper_pivot; } else return pivot; } else { float midpoint = (lower+val)*0.5f; if(x < midpoint) return lower_pivot; else return pivot; } } else { if(x > val) { float dist_to_upper = fabsf(upper-x); float dist_full = upper-val; if(rand >= dist_to_upper/dist_full) return upper_pivot; else return pivot; } else { float dist_to_lower = fabsf(lower-x); float dist_full = val-lower; if(rand >= dist_to_lower/dist_full) return lower_pivot; else return pivot; } } } template <int SIGNED> __device__ __forceinline__ unsigned char quantize_2D(float *__restrict__ quadrants, float *__restrict__ const smem_code, float x) { int pivot = 127; int upper_pivot = 255; int lower_pivot = 0; float lower = SIGNED ? -1.0f : 0.0f; float upper = 1.0f; float midpoint; float val = quadrants[1]; int local_pivot = 1; int offset = 1; // i>>=1 = {32, 16, 8, 4, 2, 1} for(int i = 64; i > 0; i>>=1) { if(x > val) { lower_pivot = pivot; lower = val; pivot+=i; //val = i == 64 ? quadrants[2] : smem_code[pivot]; local_pivot += offset; } else { upper_pivot = pivot; upper = val; pivot-=i; //val = i == 64 ? quadrants[0] : smem_code[pivot]; local_pivot -= offset; } val = i >= 64 ? quadrants[local_pivot] : smem_code[pivot]; offset -= 1; } if(x > val) { midpoint = (upper+val)*0.5f; if(x > midpoint) return upper_pivot; else return pivot; } else { midpoint = (lower+val)*0.5f; if(x < midpoint) return lower_pivot; else return pivot; } } template <int SIGNED> __device__ __forceinline__ unsigned char quantize_quadrant(int QUADRANT, float *__restrict__ const smem_code, float x, float lower, float midpoint, float upper) { int lower_pivot = QUADRANT*16-1 - 0; int pivot = QUADRANT*16-1 + 16; int upper_pivot = QUADRANT*16-1 + 31; float val = midpoint; // i>>=1 = {32, 16, 8, 4, 2, 1} for(int i = 16; i > 0; i>>=1) { if(x > val) { lower_pivot = pivot; lower = val; pivot+=i; } else { upper_pivot = pivot; upper = val; pivot-=i; } val = smem_code[pivot]; } if(x > val) { midpoint = (upper+val)*0.5f; if(x > midpoint) return upper_pivot; else return pivot; } else { midpoint = (lower+val)*0.5f; if(x < midpoint) return lower_pivot; else return pivot; } } __global__ void kHistogramScatterAdd2D(float* histogram, int *index1, int *index2, float *src, const int maxidx1, const int n) { const int tid = threadIdx.x + (blockDim.x*blockIdx.x); const int numThreads = blockDim.x*gridDim.x; for(int i = tid; i < n; i+=numThreads) { int idx = (index1[i]*maxidx1) + index2[i]; atomicAdd(&histogram[idx], src[i]); } } template<typename T, int BLOCK_SIZE, int NUM_MAX> __global__ void kCompressMax(T * __restrict__ const A, T* out, unsigned char* out_idx, const int n) { typedef cub::WarpReduce<T> WarpReduce; __shared__ typename WarpReduce::TempStorage temp_storage; typedef cub::BlockLoad<T, BLOCK_SIZE/8 , 8, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadT; __shared__ typename LoadT::TempStorage loadt; const int warp_idx = threadIdx.x/32; const int valid_items = n - (blockIdx.x*BLOCK_SIZE) > BLOCK_SIZE ? BLOCK_SIZE : n - (blockIdx.x*BLOCK_SIZE); // BLOCK_SIZE/32 == number of warps __shared__ int smem_max_indices[8*BLOCK_SIZE/32]; __shared__ float smem_max_values[8*BLOCK_SIZE/32]; T values[8]; T max1 = -64000.0f; T max2 = -64000.0f; int max_idx1 = -1; int max_idx2 = -1; int sign1 = -1; int sign2 = -1; // 1. load 8 values per thread // 2. compute 2-max in registers (64 max per warp) // 3. do warp reduction + broadcast back // 4. Up-shift maxed value, write index into shared memory, replace with 2nd largest // 5. Repeat (3) 8 times for top 8 values in 256 // 6. store with byte index LoadT(loadt).Load(&(A[(blockIdx.x*BLOCK_SIZE)]), values, valid_items, (T)0.0f); #pragma unroll 8 for(int i = 0; i < 8; i++) { T absval = fabsf(values[i]); if(absval > max1) { max1 = values[i]; sign1 = signbit(values[i]); max_idx1 = 8*threadIdx.x + i; } else if(absval > max2) { max2 = values[i]; sign2 = signbit(values[i]); max_idx2 = 8*threadIdx.x + i; } } float warp_max; for(int i = 0; i < 8; i++) { // 3. do warp reduction + broadcast back warp_max = WarpReduce(temp_storage).Reduce(max1, cub::Max()); warp_max = cub::ShuffleIndex<32>(warp_max, 0, 0xffffffff); // 4. Up-shift maxed value, write index into shared memory, replace with 2nd largest if(warp_max == max1) { smem_max_values[warp_idx*8 + i] = sign1 != 0 ? -max1 : max1; smem_max_indices[warp_idx*8 + i] = max_idx1; sign1 = sign2; max1 = max2; max_idx1 = max_idx2; max2 = -64000.0f; } __syncwarp(); } if(threadIdx.x % 32 < 8) { // offset: 8 values per 256 input values // int offset = BLOCK_SIZE*blockIdx.x*BLOCK_SIZE/32*8; } } #define THREADS_ESTIMATE 512 #define NUM_ESTIMATE 8 #define BLOCK_ESTIMATE 4096 template<typename T> __launch_bounds__(THREADS_ESTIMATE, 1) __global__ void kEstimateQuantiles(T *__restrict__ const A, float *code, const float offset, const T max_val, const int n) { const int n_full = (BLOCK_ESTIMATE*(n/BLOCK_ESTIMATE)) + (n % BLOCK_ESTIMATE == 0 ? 0 : BLOCK_ESTIMATE); int valid_items = (blockIdx.x+1 == gridDim.x) ? n - (blockIdx.x*BLOCK_ESTIMATE) : BLOCK_ESTIMATE; const int base_idx = (blockIdx.x * BLOCK_ESTIMATE); const float reciprocal_num_blocks = 1.0f/(n < 4096 ? 1.0f : (n/BLOCK_ESTIMATE)); T vals[NUM_ESTIMATE]; typedef cub::BlockRadixSort<T, THREADS_ESTIMATE, NUM_ESTIMATE, cub::NullType, 4, true, cub::BLOCK_SCAN_RAKING> BlockRadixSort; typedef cub::BlockLoad<T, THREADS_ESTIMATE, NUM_ESTIMATE, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadFloat; __shared__ union { typename LoadFloat::TempStorage loadf; typename BlockRadixSort::TempStorage sort; int smem_qidx[BLOCK_ESTIMATE]; } temp_storage; for (unsigned int i = base_idx; i < n_full; i += gridDim.x*BLOCK_ESTIMATE) { valid_items = n - i > BLOCK_ESTIMATE ? BLOCK_ESTIMATE : n - i; // do not process half-blocks if(valid_items < BLOCK_ESTIMATE && n > BLOCK_ESTIMATE){ continue; } #pragma unroll 4 for(int j = 0; j < NUM_ESTIMATE; j++) vals[j] = max_val; __syncthreads(); LoadFloat(temp_storage.loadf).Load(&(A[i]), vals, valid_items); #pragma unroll 4 for(int j = 0; j < NUM_ESTIMATE; j++) vals[j] = ((float)vals[j]) * reciprocal_num_blocks; __syncthreads(); // sort into striped pattern to mitigate bank conflicts // striped pattern index for thread 0 [0, 1024, 2048, 3096] // striped pattern index for thread 1 [1, 1025, 2049, 3097] BlockRadixSort(temp_storage.sort).SortBlockedToStriped(vals); __syncthreads(); for(int j = threadIdx.x; j < BLOCK_ESTIMATE; j+=blockDim.x) temp_storage.smem_qidx[j] = -1; if(threadIdx.x < 256) { float q_interval = (1.0f-(2.0f*offset))/255.0f; int local_idx = round(((offset+(threadIdx.x*q_interval))*(valid_items-1))); temp_storage.smem_qidx[local_idx] = threadIdx.x; } __syncthreads(); for(int i = threadIdx.x; i < BLOCK_ESTIMATE; i+=blockDim.x) { if(temp_storage.smem_qidx[i] != -1) atomicAdd(&code[temp_storage.smem_qidx[i]], vals[i/THREADS_ESTIMATE]); } } } __launch_bounds__(TH, 4) __global__ void kQuantize(float * code, float * __restrict__ const A, unsigned char *out, const int n) { const int n_full = (NUM_BLOCK*(n/NUM_BLOCK)) + (n % NUM_BLOCK == 0 ? 0 : NUM_BLOCK); int valid_items = (blockIdx.x+1 == gridDim.x) ? n - (blockIdx.x*NUM_BLOCK) : NUM_BLOCK; const int base_idx = (blockIdx.x * NUM_BLOCK); float vals[NUM]; unsigned char qvals[NUM]; //const int lane_id = threadIdx.x % 2; typedef cub::BlockLoad<float, TH, NUM, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadFloat; typedef cub::BlockStore<unsigned char, TH, NUM, cub::BLOCK_STORE_WARP_TRANSPOSE> StoreChar; __shared__ typename LoadFloat::TempStorage loadf; __shared__ typename StoreChar::TempStorage storec; __shared__ float smem_code[256]; //__shared__ float smem_code[2][257]; if(threadIdx.x < 256) { smem_code[threadIdx.x] = code[threadIdx.x]; //smem_code[0][threadIdx.x] = code[threadIdx.x]; //smem_code[1][threadIdx.x] = smem_code[0][threadIdx.x]; } for (unsigned int i = base_idx; i < n_full; i += gridDim.x*NUM_BLOCK) { // number of values already processed in blocks + // number of values already processed in this block + // rand_offset % mod value valid_items = n - i > NUM_BLOCK ? NUM_BLOCK : n - i; __syncthreads(); LoadFloat(loadf).Load(&(A[i]), vals, valid_items); #pragma unroll 4 for(int j = 0; j < NUM; j++) qvals[j] = dQuantize<0>(smem_code, 0.0f, vals[j]); __syncthreads(); StoreChar(storec).Store(&(out[i]), qvals, valid_items); } } template<typename T, int BLOCK_SIZE, int NUM_PER_TH, int STOCHASTIC> __launch_bounds__(TH, 4) __global__ void kQuantizeBlockwise(float * code, T * __restrict__ const A, float *absmax, unsigned char *out, float * __restrict__ const rand, const int rand_offset, const int n) { const int n_full = gridDim.x * BLOCK_SIZE; int valid_items = 0; const int base_idx = (blockIdx.x * BLOCK_SIZE); T vals[NUM]; float rand_vals[NUM]; unsigned char qvals[NUM]; //float local_abs_max = -FLT_MAX; float local_abs_max = 0.0f; int local_rand_idx = 0; typedef cub::BlockLoad<T, BLOCK_SIZE/NUM_PER_TH, NUM_PER_TH, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadT; typedef cub::BlockStore<unsigned char, BLOCK_SIZE/NUM_PER_TH, NUM_PER_TH, cub::BLOCK_STORE_WARP_TRANSPOSE> StoreChar; typedef cub::BlockReduce<float, BLOCK_SIZE/NUM_PER_TH> BlockReduce; typedef cub::BlockLoad<float, BLOCK_SIZE/NUM_PER_TH, NUM_PER_TH, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadFloat; __shared__ typename LoadT::TempStorage loadt; __shared__ typename LoadFloat::TempStorage loadf; __shared__ typename StoreChar::TempStorage storec; __shared__ typename BlockReduce::TempStorage reduce; __shared__ float smem_code[256]; __shared__ float smem_absmax_value[1]; if(threadIdx.x < 256) smem_code[threadIdx.x] = code[threadIdx.x]; for (unsigned int i = base_idx; i < n_full; i += gridDim.x*BLOCK_SIZE) { valid_items = n - i > BLOCK_SIZE ? BLOCK_SIZE : n - i; local_abs_max = -FLT_MAX; __syncthreads(); LoadT(loadt).Load(&(A[i]), vals, valid_items, (T)0.0f); // 1. compute local max // 2. broadcast local max // 3. normalize inputs and quantize #pragma unroll NUM_PER_TH for(int j = 0; j < NUM_PER_TH; j++) local_abs_max = fmaxf(local_abs_max, fabsf((float)vals[j])); local_abs_max = BlockReduce(reduce).Reduce(local_abs_max, cub::Max(), valid_items); if(threadIdx.x == 0) smem_absmax_value[0] = local_abs_max; __syncthreads(); if(threadIdx.x == 0) absmax[i/BLOCK_SIZE] = local_abs_max; else local_abs_max = smem_absmax_value[0]; __syncwarp(); local_abs_max = 1.0f/local_abs_max; if(STOCHASTIC) { local_rand_idx = ((blockIdx.x*NUM_BLOCK) + (threadIdx.x*NUM) + rand_offset) % (1024-4); LoadFloat(loadf).Load(&rand[local_rand_idx], rand_vals, BLOCK_SIZE, 0); } #pragma unroll NUM_PER_TH for(int j = 0; j < NUM_PER_TH; j++) { if(!STOCHASTIC) qvals[j] = dQuantize<0>(smem_code, 0.0f, ((float)vals[j])*local_abs_max); else qvals[j] = dQuantize<1>(smem_code, rand_vals[j], ((float)vals[j])*local_abs_max); } __syncthreads(); StoreChar(storec).Store(&(out[i]), qvals, valid_items); } } template<typename T, int BLOCK_SIZE, int THREADS, int NUM_PER_TH> __global__ void kDequantizeBlockwise(float *code, unsigned char * __restrict__ const A, float * __restrict__ const absmax, T *out, const int n) { const int n_full = gridDim.x * BLOCK_SIZE; int valid_items = 0; const int base_idx = (blockIdx.x * BLOCK_SIZE); T vals[NUM]; unsigned char qvals[NUM]; float local_abs_max = -FLT_MAX; typedef cub::BlockLoad<unsigned char, THREADS, NUM_PER_TH, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadChar; typedef cub::BlockStore<T, THREADS, NUM_PER_TH, cub::BLOCK_STORE_WARP_TRANSPOSE> StoreT; __shared__ typename LoadChar::TempStorage loadchar; __shared__ typename StoreT::TempStorage storet; __shared__ float smem_code[256]; if(threadIdx.x < 256) smem_code[threadIdx.x] = code[threadIdx.x]; for (unsigned int i = base_idx; i < n_full; i += gridDim.x*BLOCK_SIZE) { valid_items = n - i > BLOCK_SIZE ? BLOCK_SIZE : n - i; local_abs_max = absmax[i/BLOCK_SIZE]; __syncthreads(); LoadChar(loadchar).Load(&(A[i]), qvals, valid_items, 128); #pragma unroll NUM_PER_TH for(int j = 0; j < NUM_PER_TH; j++) vals[j] = smem_code[qvals[j]]*local_abs_max; __syncthreads(); StoreT(storet).Store(&(out[i]), vals, valid_items); } } __global__ void kDequantize(float *code, unsigned char *A, float *out, const int n) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; __shared__ float smem_code[256]; if(threadIdx.x < 256) { smem_code[threadIdx.x] = code[threadIdx.x]; } __syncthreads(); for (int i = idx;i < n; i += numThreads) { out[i] = smem_code[A[i]]; } } template<typename T, int OPTIMIZER, int BLOCK_SIZE, int NUM_VALS> __launch_bounds__(BLOCK_SIZE/NUM_VALS, 1) __global__ void kPreconditionOptimizer32bit2State(T* g, T* p, float* state1, float* state2, float *unorm, const float beta1, const float beta2, const float eps, const float weight_decay, const int step, const float lr, const float gnorm_scale, const int n) { const int n_full = (BLOCK_SIZE*(n/BLOCK_SIZE)) + (n % BLOCK_SIZE == 0 ? 0 : BLOCK_SIZE); const int base_idx = (blockIdx.x * blockDim.x * NUM_VALS); int valid_items = 0; T g_vals[NUM_VALS]; float s1_vals[NUM_VALS]; float s2_vals[NUM_VALS]; const float correction1 = 1.0f/(1.0f - powf(beta1, step)); const float correction2 = 1.0f/(1.0f - powf(beta2, step)); typedef cub::BlockLoad<T, BLOCK_SIZE/NUM_VALS, NUM_VALS, cub::BLOCK_LOAD_WARP_TRANSPOSE> Load; typedef cub::BlockLoad<float, BLOCK_SIZE/NUM_VALS, NUM_VALS, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadFloat; typedef cub::BlockReduce<float, BLOCK_SIZE/NUM_VALS> BlockReduce; __shared__ union { typename Load::TempStorage load; typename LoadFloat::TempStorage loadf; typename BlockReduce::TempStorage reduce; } temp_storage; for (unsigned int i = base_idx; i < n_full; i += gridDim.x*BLOCK_SIZE) { valid_items = n - i >= (BLOCK_SIZE) ? (BLOCK_SIZE) : n - i; __syncthreads(); Load(temp_storage.load).Load(&(g[i]), g_vals, valid_items, 0.0f); __syncthreads(); LoadFloat(temp_storage.loadf).Load(&(state1[i]), s1_vals, valid_items, 0.0f); __syncthreads(); LoadFloat(temp_storage.loadf).Load(&(state2[i]), s2_vals, valid_items, 0.0f); # pragma unroll NUM_VALS for(unsigned int j = 0; j < NUM_VALS; j++) g_vals[j] = gnorm_scale*((float)g_vals[j]); # pragma unroll NUM_VALS for(unsigned int j = 0; j < NUM_VALS; j++) { switch(OPTIMIZER) { case ADAM: s1_vals[j] = s1_vals[j]*beta1 + ((1.0f -beta1)*((float)g_vals[j])); s2_vals[j] = s2_vals[j]*beta2 + ((1.0f -beta2)*(((float)g_vals[j])*((float)g_vals[j]))); s1_vals[j] *= correction1; s2_vals[j] *= correction2; s1_vals[j] = s1_vals[j]/(sqrtf(s2_vals[j])+eps); // update s1_vals[j] *= s1_vals[j]; // update l2 norm (update*update) break; } } # pragma unroll NUM_VALS-1 for(unsigned int j = 1; j < NUM_VALS; j++) s1_vals[0] += s1_vals[j]; __syncthreads(); s1_vals[0] = BlockReduce(temp_storage.reduce).Sum(s1_vals[0]); if(threadIdx.x == 0) atomicAdd(&unorm[0], s1_vals[0]); __syncwarp(); } } #define NUM_PER_THREAD 4 template<typename T, int OPTIMIZER> __launch_bounds__(TH, 1) __global__ void kOptimizer32bit2State(T* g, T* p, float* state1, float* state2, float *unorm, const float max_unorm, const float param_norm, const float beta1, const float beta2, const float eps, const float weight_decay, const int step, const float lr, const float gnorm_scale, const bool skip_zeros, const int n) { const int n_full = ((TH*NUM_PER_THREAD)*(n/(TH*NUM_PER_THREAD))) + (n % (TH*NUM_PER_THREAD) == 0 ? 0 : (TH*NUM_PER_THREAD)); const int base_idx = (blockIdx.x * blockDim.x * NUM_PER_THREAD); int valid_items = 0; float update_scale = 0.0f; T g_vals[NUM_PER_THREAD]; T p_vals[NUM_PER_THREAD]; float s1_vals[NUM_PER_THREAD]; float s2_vals[NUM_PER_THREAD]; const float correction1 = 1.0f - powf(beta1, step); const float correction2 = sqrtf(1.0f - powf(beta2, step)); const float step_size = -lr*correction2/correction1; if(max_unorm > 0.0f) { update_scale = max_unorm > 0.0f ? sqrtf(unorm[0]) : 1.0f; if(update_scale > max_unorm*param_norm){ update_scale = (max_unorm*param_norm)/update_scale; } else{ update_scale = 1.0f; } } else{ update_scale = 1.0f; } typedef cub::BlockLoad<T, TH, NUM_PER_THREAD, cub::BLOCK_LOAD_WARP_TRANSPOSE> Load; typedef cub::BlockStore<T, TH, NUM_PER_THREAD, cub::BLOCK_STORE_WARP_TRANSPOSE> Store; typedef cub::BlockLoad<float, TH, NUM_PER_THREAD, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadFloat; typedef cub::BlockStore<float, TH, NUM_PER_THREAD, cub::BLOCK_STORE_WARP_TRANSPOSE> StoreFloat; __shared__ union { typename Load::TempStorage load; typename Store::TempStorage store; typename LoadFloat::TempStorage loadf; typename StoreFloat::TempStorage storef; } temp_storage; for (unsigned int i = base_idx; i < n_full; i += gridDim.x*TH*NUM_PER_THREAD) { valid_items = n - i >= (TH*NUM_PER_THREAD) ? (TH*NUM_PER_THREAD) : n - i; __syncthreads(); Load(temp_storage.load).Load(&(g[i]), g_vals, valid_items); __syncthreads(); LoadFloat(temp_storage.loadf).Load(&(state1[i]), s1_vals, valid_items); __syncthreads(); LoadFloat(temp_storage.loadf).Load(&(state2[i]), s2_vals, valid_items); __syncthreads(); Load(temp_storage.load).Load(&(p[i]), p_vals, valid_items); # pragma unroll 4 for(unsigned int j = 0; j < NUM_PER_THREAD; j++) g_vals[j] = gnorm_scale*((float)g_vals[j]); # pragma unroll 4 for(unsigned int j = 0; j < NUM_PER_THREAD; j++) { switch(OPTIMIZER) { case ADAM: if(!skip_zeros || (skip_zeros && ((float)g_vals[j] != 0.0f))) { s1_vals[j] = s1_vals[j]*beta1 + ((1.0f -beta1)*((float)g_vals[j])); s2_vals[j] = s2_vals[j]*beta2 + ((1.0f -beta2)*(((float)g_vals[j])*((float)g_vals[j]))); p_vals[j] = ((float)p_vals[j]) + (update_scale*step_size*(s1_vals[j]/(sqrtf(s2_vals[j])+(eps*correction2)))); if(weight_decay > 0.0f) p_vals[j] = ((float)p_vals[j])*(1.0f-(lr*weight_decay)); } break; } } __syncthreads(); Store(temp_storage.store).Store(&(p[i]), p_vals, valid_items); __syncthreads(); StoreFloat(temp_storage.storef).Store(&(state1[i]), s1_vals, valid_items); __syncthreads(); StoreFloat(temp_storage.storef).Store(&(state2[i]), s2_vals, valid_items); } } template<typename T, int OPTIMIZER, int BLOCK_SIZE, int NUM_VALS> __launch_bounds__(BLOCK_SIZE/NUM_VALS, 1) __global__ void kPreconditionOptimizer32bit1State(T* g, T* p, float* state1, float *unorm, const float beta1, const float eps, const float weight_decay, const int step, const float lr, const float gnorm_scale, const int n) { const int n_full = (BLOCK_SIZE*(n/BLOCK_SIZE)) + (n % BLOCK_SIZE == 0 ? 0 : BLOCK_SIZE); const int base_idx = (blockIdx.x * blockDim.x * NUM_VALS); int valid_items = 0; T g_vals[NUM_VALS]; float s1_vals[NUM_VALS]; typedef cub::BlockLoad<T, BLOCK_SIZE/NUM_VALS, NUM_VALS, cub::BLOCK_LOAD_WARP_TRANSPOSE> Load; typedef cub::BlockLoad<float, BLOCK_SIZE/NUM_VALS, NUM_VALS, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadFloat; typedef cub::BlockReduce<float, BLOCK_SIZE/NUM_VALS> BlockReduce; __shared__ union { typename Load::TempStorage load; typename LoadFloat::TempStorage loadf; typename BlockReduce::TempStorage reduce; } temp_storage; for (unsigned int i = base_idx; i < n_full; i += gridDim.x*BLOCK_SIZE) { valid_items = n - i >= (BLOCK_SIZE) ? (BLOCK_SIZE) : n - i; __syncthreads(); Load(temp_storage.load).Load(&(g[i]), g_vals, valid_items, 0.0f); __syncthreads(); LoadFloat(temp_storage.loadf).Load(&(state1[i]), s1_vals, valid_items, 0.0f); # pragma unroll NUM_VALS for(unsigned int j = 0; j < NUM_VALS; j++) g_vals[j] = gnorm_scale*((float)g_vals[j]); # pragma unroll NUM_VALS for(unsigned int j = 0; j < NUM_VALS; j++) { switch(OPTIMIZER) { case MOMENTUM: if(step == 1) s1_vals[j] = (float)g_vals[j]; // state update else s1_vals[j] = s1_vals[j]*beta1 + ((float)g_vals[j]); // state update s1_vals[j] = s1_vals[j]*s1_vals[j]; // update norm break; case RMSPROP: s1_vals[j] = s1_vals[j]*beta1 + ((1.0f-beta1)*((float)g_vals[j])*((float)g_vals[j])); // state update s1_vals[j] = __fdividef((float)g_vals[j],sqrtf(s1_vals[j])+eps); // update value s1_vals[j] = s1_vals[j]*s1_vals[j]; // update norm break; case ADAGRAD: s1_vals[j] = s1_vals[j] + ((float)g_vals[j])*((float)g_vals[j]); // state update s1_vals[j] = __fdividef((float)g_vals[j],sqrtf(s1_vals[j])+eps); // update value s1_vals[j] = s1_vals[j]*s1_vals[j]; // update norm break; } } # pragma unroll for(unsigned int j = 1; j < NUM_VALS; j++) s1_vals[0] += s1_vals[j]; __syncthreads(); s1_vals[0] = BlockReduce(temp_storage.reduce).Sum(s1_vals[0], valid_items); if(threadIdx.x == 0) atomicAdd(&unorm[0], s1_vals[0]); __syncwarp(); } } template<typename T, int OPTIMIZER> __launch_bounds__(TH, 1) __global__ void kOptimizer32bit1State(T *g, T *p, float *state1, float *unorm, const float max_unorm, const float param_norm, const float beta1, const float eps, const float weight_decay, const int step, const float lr, const float gnorm_scale, const bool skip_zeros, const int n) { const int n_full = ((TH*NUM_PER_THREAD)*(n/(TH*NUM_PER_THREAD))) + (n % (TH*NUM_PER_THREAD) == 0 ? 0 : (TH*NUM_PER_THREAD)); const int base_idx = (blockIdx.x * blockDim.x * NUM_PER_THREAD); int valid_items = 0; float update_scale = 0.0f; if(max_unorm > 0.0f) { update_scale = max_unorm > 0.0f ? sqrtf(unorm[0]) : 1.0f; if(update_scale > max_unorm*param_norm+eps){ update_scale = (max_unorm*param_norm+eps)/update_scale; } else{ update_scale = 1.0f; } } else{ update_scale = 1.0f; } T g_vals[NUM_PER_THREAD]; T p_vals[NUM_PER_THREAD]; float s1_vals[NUM_PER_THREAD]; typedef cub::BlockLoad<T, TH, NUM_PER_THREAD, cub::BLOCK_LOAD_WARP_TRANSPOSE> Load; typedef cub::BlockStore<T, TH, NUM_PER_THREAD, cub::BLOCK_STORE_WARP_TRANSPOSE> Store; typedef cub::BlockLoad<float, TH, NUM_PER_THREAD, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadFloat; typedef cub::BlockStore<float, TH, NUM_PER_THREAD, cub::BLOCK_STORE_WARP_TRANSPOSE> StoreFloat; __shared__ union { typename Load::TempStorage load; typename Store::TempStorage store; typename LoadFloat::TempStorage loadf; typename StoreFloat::TempStorage storef; } temp_storage; for (unsigned int i = base_idx; i < n_full; i += gridDim.x*TH*NUM_PER_THREAD) { valid_items = n - i >= (TH*NUM_PER_THREAD) ? (TH*NUM_PER_THREAD) : n - i; __syncthreads(); Load(temp_storage.load).Load(&(g[i]), g_vals, valid_items); __syncthreads(); LoadFloat(temp_storage.loadf).Load(&(state1[i]), s1_vals, valid_items); __syncthreads(); Load(temp_storage.load).Load(&(p[i]), p_vals, valid_items); # pragma unroll 4 for(unsigned int j = 0; j < NUM_PER_THREAD; j++) { g_vals[j] = gnorm_scale*((float)g_vals[j]); if(weight_decay > 0.0f) g_vals[j] = (float)g_vals[j] + (((float)p_vals[j])*weight_decay); } # pragma unroll 4 for(unsigned int j = 0; j < NUM_PER_THREAD; j++) { if(!skip_zeros || (skip_zeros && ((float)g_vals[j] != 0.0f))) { switch(OPTIMIZER) { case MOMENTUM: if(step == 1) s1_vals[j] = (float)g_vals[j]; else s1_vals[j] = s1_vals[j]*beta1 + ((float)g_vals[j]); p_vals[j] = ((float)p_vals[j]) + update_scale*(-lr*(s1_vals[j])); break; case RMSPROP: s1_vals[j] = s1_vals[j]*beta1 + ((1.0f-beta1)*((float)g_vals[j])*((float)g_vals[j])); p_vals[j] = ((float)p_vals[j]) - update_scale*(lr*__fdividef((float)g_vals[j],sqrtf((float)s1_vals[j])+eps)); break; case ADAGRAD: s1_vals[j] = s1_vals[j] + ((float)g_vals[j])*((float)g_vals[j]); p_vals[j] = ((float)p_vals[j]) - lr*__fdividef((float)g_vals[j],sqrtf((float)s1_vals[j])+eps); break; } } } __syncthreads(); Store(temp_storage.store).Store(&(p[i]), p_vals, valid_items); __syncthreads(); StoreFloat(temp_storage.storef).Store(&(state1[i]), s1_vals, valid_items); } } #define NUM8BIT 16 #define NUM_THREADS 256 #define NUM_PER_BLOCK 4096 template<typename T, int OPTIMIZER> __global__ void __launch_bounds__(NUM_THREADS, 2) kPreconditionOptimizerStatic8bit2State(T* p, T* __restrict__ const g, unsigned char*__restrict__ const state1, unsigned char* __restrict__ const state2, float *unorm, const float beta1, const float beta2, const float eps, const int step, float* __restrict__ const quantiles1, float* __restrict__ const quantiles2, float* max1, float* max2, float* new_max1, float* new_max2, const float gnorm_scale, const int n) { const int n_full = gridDim.x * NUM_PER_BLOCK; const int base_idx = (blockIdx.x * blockDim.x * NUM_PER_THREAD); int valid_items = n - (blockIdx.x*NUM_PER_BLOCK) > NUM_PER_BLOCK ? NUM_PER_BLOCK : n - (blockIdx.x*NUM_PER_BLOCK); float g_val = 0.0f; float local_max_s1 = -FLT_MAX; float local_max_s2 = -FLT_MAX; float local_unorm = 0.0f; float s2_vals[NUM8BIT]; float s1_vals[NUM8BIT]; T g_vals[NUM8BIT]; unsigned char m_c1[NUM8BIT]; unsigned char r_c2[NUM8BIT]; typedef cub::BlockLoad<T, NUM_THREADS, NUM8BIT, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadT; typedef cub::BlockLoad<unsigned char, NUM_THREADS, NUM8BIT, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadUInt8; typedef cub::BlockReduce<float, NUM_THREADS> BlockReduce; __shared__ union { typename LoadT::TempStorage loadh; typename LoadUInt8::TempStorage loadc; typename BlockReduce::TempStorage reduce; } temp_storage; __shared__ float smem_quantiles1[256]; __shared__ float smem_quantiles2[256]; if(threadIdx.x < 256) { smem_quantiles1[threadIdx.x] = quantiles1[threadIdx.x]; smem_quantiles2[threadIdx.x] = quantiles2[threadIdx.x]; } __syncthreads(); for (unsigned int i = base_idx; i < n_full; i += NUM_THREADS*gridDim.x*NUM8BIT) { valid_items = n - i >= (TH*NUM_PER_THREAD) ? (TH*NUM_PER_THREAD) : n - i; LoadT(temp_storage.loadh).Load(&(g[i]), g_vals, valid_items, (T)0.0f); __syncthreads(); LoadUInt8(temp_storage.loadc).Load(&(state1[i]), m_c1, valid_items, 128); __syncthreads(); LoadUInt8(temp_storage.loadc).Load(&(state2[i]), r_c2, valid_items, 128); __syncthreads(); #pragma unroll 16 for(int j = 0; j < NUM8BIT; j++) { g_val = g_vals[j]; g_val *= gnorm_scale; s1_vals[j] = smem_quantiles1[m_c1[j]]*max1[0]*beta1; s1_vals[j] += (1.0f-beta1)*g_val; local_max_s1 = fmaxf(local_max_s1, fabsf(s1_vals[j])); } #pragma unroll 16 for(int j = 0; j < NUM8BIT; j++) { g_val = g_vals[j]; g_val *= gnorm_scale; s2_vals[j] = smem_quantiles2[r_c2[j]]*max2[0]*beta2; s2_vals[j] += (1.0f-beta2)*g_val*g_val; local_max_s2 = fmaxf(local_max_s2, fabsf(s2_vals[j])); } if(unorm != NULL) { #pragma unroll 16 for(int j = 0; j < NUM8BIT; j++) { float correction1 = __fdividef(1.0f, 1.0f - powf(beta1, step)); float correction2 = __fdividef(1.0f, 1.0f - powf(beta2, step)); s1_vals[j] *= correction1; s2_vals[j] *= correction2; float update_val = s1_vals[j]/(sqrtf(s2_vals[j])+eps); // update local_unorm += update_val*update_val; } } } __syncthreads(); local_max_s1 = BlockReduce(temp_storage.reduce).Reduce(local_max_s1, cub::Max(), valid_items); __syncthreads(); local_max_s2 = BlockReduce(temp_storage.reduce).Reduce(local_max_s2, cub::Max(), valid_items); if(unorm != NULL) { __syncthreads(); local_unorm = BlockReduce(temp_storage.reduce).Reduce(local_unorm, cub::Sum(), valid_items); } if(threadIdx.x == 0) { atomicMax(&new_max1[0], local_max_s1); atomicMax(&new_max2[0], local_max_s2); if(unorm != NULL){ atomicAdd(&unorm[0], local_unorm); } } } #define NUM_PER_THREAD2 4 #define NUM_THREADS2 1024 #define NUM_PER_BLOCK2 4096 template<typename T, int OPTIMIZER> __global__ void __launch_bounds__(NUM_THREADS2, 1) kOptimizerStatic8bit2State(T* p, T* const g, unsigned char* state1, unsigned char* state2, const float *unorm, const float max_unorm, const float param_norm, \ const float beta1, const float beta2, const float eps, const int step, const float lr, float* __restrict__ const quantiles1, float* __restrict__ const quantiles2, float* max1, float* max2, float* new_max1, float* new_max2, float weight_decay, const float gnorm_scale, const int n) { const int n_full = (blockDim.x * gridDim.x)*NUM_PER_THREAD2; const int base_idx = (blockIdx.x * blockDim.x * NUM_PER_THREAD2); int valid_items = 0; float g_val = 0.0f; float s1_vals[NUM_PER_THREAD2]; float s2_vals[NUM_PER_THREAD2]; const float correction1 = 1.0f - powf(beta1, step); const float correction2 = sqrtf(1.0f - powf(beta2, step)); const float step_size = -lr*correction2/correction1; //const float step_size = -lr*correction2/correction1; float new_max_val1 = 1.0f/new_max1[0]; float new_max_val2 = 1.0f/new_max2[0]; float update_scale = 1.0f; if(max_unorm > 0.0f) { update_scale = max_unorm > 0.0f ? sqrtf(unorm[0]) : 1.0f; if(update_scale > max_unorm*param_norm){ update_scale = (max_unorm*param_norm)/update_scale; } else{ update_scale = 1.0f; } } else{ update_scale = 1.0f; } unsigned char c1s[NUM_PER_THREAD2]; unsigned char c2s[NUM_PER_THREAD2]; T p_vals[NUM_PER_THREAD2]; T g_vals[NUM_PER_THREAD2]; typedef cub::BlockLoad<T, NUM_THREADS2, NUM_PER_THREAD2, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadT; typedef cub::BlockLoad<unsigned char, NUM_THREADS2, NUM_PER_THREAD2, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadChar; typedef cub::BlockStore<unsigned char, NUM_THREADS2, NUM_PER_THREAD2, cub::BLOCK_STORE_WARP_TRANSPOSE> StoreChar; typedef cub::BlockStore<T, NUM_THREADS2, NUM_PER_THREAD2, cub::BLOCK_STORE_WARP_TRANSPOSE> StoreT; __shared__ float smem_quantiles1[256]; __shared__ float smem_quantiles2[256]; __shared__ union { typename LoadT::TempStorage loadh; typename LoadChar::TempStorage loadc; typename StoreChar::TempStorage storec; typename StoreT::TempStorage storeh; } temp_storage; if(threadIdx.x < 512) { if(threadIdx.x < 256) smem_quantiles1[threadIdx.x] = quantiles1[threadIdx.x]; else smem_quantiles2[threadIdx.x-256] = quantiles2[threadIdx.x-256]; } __syncthreads(); for (unsigned int i = base_idx; i < n_full; i += gridDim.x*NUM_THREADS2*NUM_PER_THREAD2) { valid_items = n - i >= (TH*NUM_PER_THREAD) ? (TH*NUM_PER_THREAD) : n - i; LoadT(temp_storage.loadh).Load(&(g[i]), g_vals, valid_items, (T)0.0f); __syncthreads(); LoadChar(temp_storage.loadc).Load(&(state1[i]), c1s, valid_items, 128); __syncthreads(); LoadChar(temp_storage.loadc).Load(&(state2[i]), c2s, valid_items, 0); __syncthreads(); LoadT(temp_storage.loadh).Load(&(p[i]), p_vals, valid_items); if((i + (threadIdx.x*NUM_PER_THREAD2) + NUM_PER_THREAD2) > n){ continue; } # pragma unroll 4 for(unsigned int j = 0; j < NUM_PER_THREAD2; j++) { g_val = float(g_vals[j]); g_val *= gnorm_scale; s1_vals[j] = smem_quantiles1[c1s[j]]; s1_vals[j] = s1_vals[j]*max1[0]; s1_vals[j] = (s1_vals[j]*beta1) + (((1.0f-beta1)*g_val)); c1s[j] = dQuantize<0>(smem_quantiles1, 0.0f, s1_vals[j]*new_max_val1); // make sure state1 term has still the same sign after quantization // (not needed for state2 term which has only positive values) if(signbit(smem_quantiles1[c1s[j]]) != signbit(s1_vals[j])) { if(s1_vals[j] > 0.0f) c1s[j] += 1; else c1s[j] -= 1; } s2_vals[j] = smem_quantiles2[c2s[j]]; s2_vals[j] = s2_vals[j]*max2[0]; s2_vals[j] = (s2_vals[j]*beta2) + (((1.0f-beta2)*g_val*g_val)); c2s[j] = dQuantize<0>(smem_quantiles2, 0.0f, s2_vals[j]*new_max_val2); } # pragma unroll 4 for(unsigned int j = 0; j < NUM_PER_THREAD2; j++) { p_vals[j] = (T)(((float)p_vals[j]) + ((update_scale*step_size*(s1_vals[j]/(sqrtf(s2_vals[j])+(correction2*eps)))))); if(weight_decay > 0.0f) p_vals[j] = update_scale*((float)p_vals[j])*(1.0f-(lr*weight_decay)); } StoreT(temp_storage.storeh).Store(&(p[i]), p_vals, valid_items); __syncthreads(); StoreChar(temp_storage.storec).Store(&(state1[i]), c1s, valid_items); __syncthreads(); StoreChar(temp_storage.storec).Store(&(state2[i]), c2s, valid_items); __syncthreads(); } } template<typename T, int OPTIMIZER> __global__ void __launch_bounds__(NUM_THREADS, 2) kPreconditionOptimizerStatic8bit1State(T* p, T* __restrict__ const g, unsigned char*__restrict__ const state1, float *unorm, const float beta1, const float eps, const int step, float* __restrict__ const quantiles1, float* max1, float* new_max1, const float weight_decay, const float gnorm_scale, const int n) { const int n_full = gridDim.x * NUM_PER_BLOCK; const int base_idx = (blockIdx.x * blockDim.x * NUM_PER_THREAD); int valid_items = n - (blockIdx.x*NUM_PER_BLOCK) > NUM_PER_BLOCK ? NUM_PER_BLOCK : n - (blockIdx.x*NUM_PER_BLOCK); float g_val = 0.0f; float local_max_s1 = -FLT_MAX; float local_unorm = 0.0f; float s1_vals[NUM8BIT]; T g_vals[NUM8BIT]; unsigned char m_c1[NUM8BIT]; typedef cub::BlockLoad<T, NUM_THREADS, NUM8BIT, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadT; typedef cub::BlockLoad<unsigned char, NUM_THREADS, NUM8BIT, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadUInt8; typedef cub::BlockReduce<float, NUM_THREADS> BlockReduce; __shared__ union { typename LoadT::TempStorage loadh; typename LoadUInt8::TempStorage loadc; typename BlockReduce::TempStorage reduce; } temp_storage; __shared__ float smem_quantiles1[256]; if(threadIdx.x < 256) smem_quantiles1[threadIdx.x] = quantiles1[threadIdx.x]; __syncthreads(); for (unsigned int i = base_idx; i < n_full; i += gridDim.x*NUM_THREADS*NUM8BIT) { valid_items = n - i >= (TH*NUM_PER_THREAD) ? (TH*NUM_PER_THREAD) : n - i; __syncthreads(); LoadT(temp_storage.loadh).Load(&(g[i]), g_vals, valid_items, (T)0.0f); __syncthreads(); LoadUInt8(temp_storage.loadc).Load(&(state1[i]), m_c1, valid_items, 128); #pragma unroll 16 for(int j = 0; j < NUM8BIT; j++) { g_val = g_vals[j]; g_val *= gnorm_scale; s1_vals[j] = smem_quantiles1[m_c1[j]]*max1[0]; switch(OPTIMIZER) { case MOMENTUM: if(step == 1) s1_vals[j] = (float)g_vals[j]; else s1_vals[j] = s1_vals[j]*beta1 + ((float)g_vals[j]); if(unorm != NULL) local_unorm += s1_vals[j]*s1_vals[j]; break; case RMSPROP: s1_vals[j] = s1_vals[j]*beta1 + ((1.0f-beta1)*(g_val*g_val)); break; } local_max_s1 = fmaxf(local_max_s1, fabsf(s1_vals[j])); } } __syncthreads(); local_max_s1 = BlockReduce(temp_storage.reduce).Reduce(local_max_s1, cub::Max(), valid_items); if(threadIdx.x == 0){ atomicMax(&new_max1[0], local_max_s1); } if(unorm != NULL) { __syncthreads(); local_unorm = BlockReduce(temp_storage.reduce).Reduce(local_unorm, cub::Sum(), valid_items); if(threadIdx.x == 0){ atomicAdd(&unorm[0], local_unorm); } } } template<typename T, int OPTIMIZER> __global__ void kOptimizerStatic8bit1State(T* p, T* const g, unsigned char* state1, const float *unorm, const float max_unorm, const float param_norm, const float beta1, const float eps, const int step, const float lr, float* __restrict__ const quantiles1, float* max1, float* new_max1, float weight_decay, const float gnorm_scale, const int n) { const int n_full = (blockDim.x * gridDim.x)*NUM_PER_THREAD2; const int base_idx = (blockIdx.x * blockDim.x * NUM_PER_THREAD2); int valid_items = 0; float g_val = 0.0f; float s1_vals[NUM_PER_THREAD2]; float new_max_val1 = 1.0f/new_max1[0]; float update_scale = 1.0f; if(max_unorm > 0.0f) { update_scale = max_unorm > 0.0f ? sqrtf(unorm[0]) : 1.0f; if(update_scale > max_unorm*param_norm){ update_scale = (max_unorm*param_norm)/update_scale; } else{ update_scale = 1.0f; } } else{ update_scale = 1.0f; } unsigned char c1s[NUM_PER_THREAD2]; T p_vals[NUM_PER_THREAD2]; T g_vals[NUM_PER_THREAD2]; typedef cub::BlockLoad<T, NUM_THREADS2, NUM_PER_THREAD2, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadT; typedef cub::BlockLoad<unsigned char, NUM_THREADS2, NUM_PER_THREAD2, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadChar; typedef cub::BlockStore<unsigned char, NUM_THREADS2, NUM_PER_THREAD2, cub::BLOCK_STORE_WARP_TRANSPOSE> StoreChar; typedef cub::BlockStore<T, NUM_THREADS2, NUM_PER_THREAD2, cub::BLOCK_STORE_WARP_TRANSPOSE> StoreT; __shared__ float smem_quantiles1[256]; __shared__ union { typename LoadT::TempStorage loadh; typename LoadChar::TempStorage loadc; typename StoreChar::TempStorage storec; typename StoreT::TempStorage storeh; } temp_storage; if(threadIdx.x < 256) smem_quantiles1[threadIdx.x] = quantiles1[threadIdx.x]; __syncthreads(); for (unsigned int i = base_idx; i < n_full; i += gridDim.x*NUM_THREADS2*NUM_PER_THREAD2) { valid_items = n - i >= (TH*NUM_PER_THREAD) ? (TH*NUM_PER_THREAD) : n - i; LoadT(temp_storage.loadh).Load(&(g[i]), g_vals, valid_items, (T)0.0f); __syncthreads(); LoadChar(temp_storage.loadc).Load(&(state1[i]), c1s, valid_items, 128); __syncthreads(); LoadT(temp_storage.loadh).Load(&(p[i]), p_vals, valid_items); if((i + (threadIdx.x*NUM_PER_THREAD2) + NUM_PER_THREAD2) > n){ continue; } # pragma unroll 4 for(unsigned int j = 0; j < NUM_PER_THREAD2; j++) { g_val = float(g_vals[j]); g_val *= gnorm_scale; if(weight_decay > 0.0f) g_val += ((float)p_vals[j])*weight_decay; s1_vals[j] = smem_quantiles1[c1s[j]]*max1[0]; switch(OPTIMIZER) { case MOMENTUM: if(step == 1) s1_vals[j] = g_vals[j]; else s1_vals[j] = s1_vals[j]*beta1 + ((float)g_vals[j]); p_vals[j] = ((float)p_vals[j]) + (-lr*update_scale*(s1_vals[j])); break; case RMSPROP: s1_vals[j] = s1_vals[j]*beta1 + ((1.0f-beta1)*(g_val*g_val)); p_vals[j] = ((float)p_vals[j]) - (lr*__fdividef(g_val,sqrtf(s1_vals[j])+eps)); break; } c1s[j] = dQuantize<0>(smem_quantiles1, 0.0f, s1_vals[j]*new_max_val1); // make sure state1 term has still the same sign after quantization if(signbit(smem_quantiles1[c1s[j]]) != signbit(s1_vals[j])) { if(s1_vals[j] > 0.0f) c1s[j] += 1; else c1s[j] -= 1; } } StoreT(temp_storage.storeh).Store(&(p[i]), p_vals, valid_items); __syncthreads(); StoreChar(temp_storage.storec).Store(&(state1[i]), c1s, valid_items); __syncthreads(); } } template<typename T, int BLOCK_SIZE, int NUM_VALS> __global__ void kPercentileClipping(T * __restrict__ g, float *gnorm_vec, int step, const int n) { const int n_full = (BLOCK_SIZE*(n/BLOCK_SIZE)) + (n % BLOCK_SIZE == 0 ? 0 : BLOCK_SIZE); int valid_items = 0; typedef cub::BlockReduce<float, BLOCK_SIZE/NUM_VALS> BlockReduce; typedef cub::BlockLoad<T, BLOCK_SIZE/NUM_VALS, NUM_VALS, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadT; __shared__ typename BlockReduce::TempStorage reduce; __shared__ typename LoadT::TempStorage loadT; T vals[NUM_VALS]; float local_sum = 0.0f; for (unsigned int i = (blockIdx.x * BLOCK_SIZE); i < n_full; i += gridDim.x*BLOCK_SIZE) { valid_items = n - i > BLOCK_SIZE ? BLOCK_SIZE : n - i; local_sum = 0.0f; __syncthreads(); LoadT(loadT).Load(&(g[i]), vals, valid_items, (T)0.0f); #pragma unroll NUM_VALS for(int j = 0; j < NUM_VALS; j++) local_sum += ((float)vals[j])*((float)vals[j]); local_sum = BlockReduce(reduce).Sum(local_sum, valid_items); if(threadIdx.x == 0) { if(step == 1) { // initialize with the same norm for all positions //#pragma unroll 10 for(int j = 0; j < 100; j++) atomicAdd(&gnorm_vec[j], local_sum); } else atomicAdd(&gnorm_vec[step % 100], local_sum); } } } #define LANES 2 #define QUAD 3 template<typename T, int OPTIMIZER, int BLOCK_SIZE, int N_PER_TH> __launch_bounds__(256, 3) __global__ void kOptimizerStatic8bit2StateBlockwise(T* p, T* __restrict__ const g, unsigned char* state1, unsigned char* state2, const float beta1, const float beta2, const float eps, const int step, const float lr, float* __restrict__ const quantiles1, float* __restrict__ const quantiles2, float* absmax1, float* absmax2, float weight_decay, const float gnorm_scale, const bool skip_zeros, const int n) { //const int n_full = n + (n%BLOCK_SIZE); const int n_full = gridDim.x * BLOCK_SIZE; const int base_idx = (blockIdx.x * BLOCK_SIZE); int valid_items = 0; float g_val = 0.0f; float s1_vals[N_PER_TH]; float s2_vals[N_PER_TH]; // 2-5% const float correction1 = 1.0f - __powf(beta1, step); const float correction2 = sqrtf(1.0f -__powf(beta2, step)); const float step_size = __fdividef(-lr*correction2,correction1); const int lane_id = threadIdx.x % LANES; float new_local_abs_max1 = -FLT_MAX; float new_local_abs_max2 = -FLT_MAX; float quadrants1[QUAD]; float quadrants2[QUAD]; unsigned char c1s[N_PER_TH]; unsigned char c2s[N_PER_TH]; T g_vals[N_PER_TH]; typedef cub::BlockLoad<T, BLOCK_SIZE/N_PER_TH, N_PER_TH, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadT; typedef cub::BlockLoad<unsigned char, BLOCK_SIZE/N_PER_TH, N_PER_TH, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadChar; typedef cub::BlockStore<unsigned char, BLOCK_SIZE/N_PER_TH, N_PER_TH, cub::BLOCK_STORE_WARP_TRANSPOSE> StoreChar; typedef cub::BlockStore<T, BLOCK_SIZE/N_PER_TH, N_PER_TH, cub::BLOCK_STORE_WARP_TRANSPOSE> StoreT; __shared__ float smem_quantiles1[LANES][257]; __shared__ float smem_quantiles2[LANES][257]; typedef cub::BlockReduce<float, BLOCK_SIZE/N_PER_TH> BlockReduce1; typedef cub::BlockReduce<float, BLOCK_SIZE/N_PER_TH> BlockReduce2; __shared__ typename BlockReduce1::TempStorage reduce1; __shared__ typename BlockReduce2::TempStorage reduce2; __shared__ float smem_exchange1[1]; __shared__ float smem_exchange2[1]; __shared__ union { typename LoadT::TempStorage loadh; typename LoadChar::TempStorage loadc; typename StoreChar::TempStorage storec; typename StoreT::TempStorage storeh; } temp_storage; // init: 0.2 -> 0.23 // 0.23 -> 0.23 smem_quantiles1[0][threadIdx.x] = quantiles1[threadIdx.x]; smem_quantiles2[0][threadIdx.x] = quantiles2[threadIdx.x]; # pragma unroll for(unsigned int j = 1; j < LANES; j++) { smem_quantiles1[j][threadIdx.x] = smem_quantiles1[0][threadIdx.x]; smem_quantiles2[j][threadIdx.x] = smem_quantiles2[0][threadIdx.x]; } __syncthreads(); #pragma unroll for(int k = 0; k < QUAD; k++) { quadrants1[k] = smem_quantiles1[lane_id][(k*256/(QUAD+1)) + (256/(QUAD+1)-1)]; quadrants2[k] = smem_quantiles2[lane_id][(k*256/(QUAD+1)) + (256/(QUAD+1)-1)]; } for (unsigned int i = base_idx; i < n_full; i += gridDim.x*BLOCK_SIZE) { // loads: 0.23 -> 0.85/1.44 valid_items = n - i >= BLOCK_SIZE ? BLOCK_SIZE : n - i; __syncthreads(); LoadT(temp_storage.loadh).Load(&(g[i]), g_vals, valid_items, (T)0.0f); __syncthreads(); LoadChar(temp_storage.loadc).Load(&(state1[i]), c1s, valid_items, 128); __syncthreads(); LoadChar(temp_storage.loadc).Load(&(state2[i]), c2s, valid_items, 0); new_local_abs_max1 = -FLT_MAX; new_local_abs_max2 = -FLT_MAX; // update: 2.48/1.57 -> 2.51/1.60 # pragma unroll N_PER_TH for(unsigned int j = 0; j < N_PER_TH; j++) { g_val = float(g_vals[j]); g_val *= gnorm_scale; if(!skip_zeros || (skip_zeros && ((float)g_vals[j] != 0.0f))) { s1_vals[j] = smem_quantiles1[lane_id][c1s[j]]*absmax1[i/BLOCK_SIZE]; s1_vals[j] = (s1_vals[j]*beta1) + (((1.0f-beta1)*g_val)); s2_vals[j] = smem_quantiles2[lane_id][c2s[j]]*absmax2[i/BLOCK_SIZE]; s2_vals[j] = (s2_vals[j]*beta2) + (((1.0f-beta2)*g_val*g_val)); } new_local_abs_max1 = fmaxf(new_local_abs_max1, fabsf(s1_vals[j])); new_local_abs_max2 = fmaxf(new_local_abs_max2, fabsf(s2_vals[j])); } // reduce: 2.51/1.60 -> 2.67/1.69 new_local_abs_max1 = BlockReduce1(reduce1).Reduce(new_local_abs_max1, cub::Max()); new_local_abs_max2 = BlockReduce2(reduce2).Reduce(new_local_abs_max2, cub::Max()); if(threadIdx.x == 0) { smem_exchange1[0] = new_local_abs_max1; smem_exchange2[0] = new_local_abs_max2; } __syncthreads(); if(threadIdx.x == 0) { absmax1[i/BLOCK_SIZE] = new_local_abs_max1; absmax2[i/BLOCK_SIZE] = new_local_abs_max2; } else { new_local_abs_max1 = smem_exchange1[0]; new_local_abs_max2 = smem_exchange2[0]; } __syncthreads(); LoadT(temp_storage.loadh).Load(&(p[i]), g_vals, valid_items, (T)0.0f); // reduce: 2.67/1.69 -> 2.67/1.70 # pragma unroll N_PER_TH for(unsigned int j = 0; j < N_PER_TH; j++) { if(!skip_zeros || (skip_zeros && ((float)g_vals[j] != 0.0f))) { g_vals[j] = (T)(((float)g_vals[j]) + ((step_size*(__fdividef(s1_vals[j],(sqrtf(s2_vals[j])+(correction2*eps))))))); if(weight_decay > 0.0f) g_vals[j] = ((float)g_vals[j])*(1.0f-(lr*weight_decay)); } } // store: 0.85/1.44 -> 2.48/1.57 __syncthreads(); StoreT(temp_storage.storeh).Store(&(p[i]), g_vals, valid_items); // quantizaztion: 2.67/1.70 -> 3.4/3.3 # pragma unroll N_PER_TH for(unsigned int j = 0; j < N_PER_TH; j++) { c1s[j] = quantize_2D<1>(quadrants1, smem_quantiles1[lane_id], __fdividef(s1_vals[j],new_local_abs_max1)); c2s[j] = quantize_2D<0>(quadrants2, smem_quantiles2[lane_id], __fdividef(s2_vals[j],new_local_abs_max2)); // make sure state1 term has still the same sign after quantization // (not needed for state2 term which has only positive values) if(signbit(smem_quantiles1[lane_id][c1s[j]]) != signbit(s1_vals[j])) { if(s1_vals[j] > 0.0f) c1s[j] += 1; else c1s[j] -= 1; } } __syncthreads(); StoreChar(temp_storage.storec).Store(&(state1[i]), c1s, valid_items); __syncthreads(); StoreChar(temp_storage.storec).Store(&(state2[i]), c2s, valid_items); } } #define LANES 2 #define QUAD 3 template<typename T, int OPTIMIZER, int BLOCK_SIZE, int N_PER_TH> __launch_bounds__(256, 3) __global__ void kOptimizerStatic8bit1StateBlockwise(T* p, T* __restrict__ const g, unsigned char* state1, const float beta1, const float beta2, const float eps, const int step, const float lr, float* __restrict__ const quantiles1, float* absmax1, float weight_decay, const float gnorm_scale, const bool skip_zeros, const int n) { //const int n_full = n + (n%BLOCK_SIZE); const int n_full = gridDim.x * BLOCK_SIZE; const int base_idx = (blockIdx.x * BLOCK_SIZE); int valid_items = 0; float g_val = 0.0f; float s1_vals[N_PER_TH]; // 2-5% const int lane_id = threadIdx.x % LANES; float new_local_abs_max1 = -FLT_MAX; float quadrants1[QUAD]; unsigned char c1s[N_PER_TH]; T g_vals[N_PER_TH]; T p_vals[N_PER_TH]; typedef cub::BlockLoad<T, BLOCK_SIZE/N_PER_TH, N_PER_TH, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadT; typedef cub::BlockLoad<unsigned char, BLOCK_SIZE/N_PER_TH, N_PER_TH, cub::BLOCK_LOAD_WARP_TRANSPOSE> LoadChar; typedef cub::BlockStore<unsigned char, BLOCK_SIZE/N_PER_TH, N_PER_TH, cub::BLOCK_STORE_WARP_TRANSPOSE> StoreChar; typedef cub::BlockStore<T, BLOCK_SIZE/N_PER_TH, N_PER_TH, cub::BLOCK_STORE_WARP_TRANSPOSE> StoreT; __shared__ float smem_quantiles1[LANES][257]; typedef cub::BlockReduce<float, BLOCK_SIZE/N_PER_TH> BlockReduce1; __shared__ typename BlockReduce1::TempStorage reduce1; __shared__ float smem_exchange1[1]; __shared__ union { typename LoadT::TempStorage loadh; typename LoadChar::TempStorage loadc; typename StoreChar::TempStorage storec; typename StoreT::TempStorage storeh; } temp_storage; // init: 0.2 -> 0.23 // 0.23 -> 0.23 smem_quantiles1[0][threadIdx.x] = quantiles1[threadIdx.x]; # pragma unroll for(unsigned int j = 1; j < LANES; j++) smem_quantiles1[j][threadIdx.x] = smem_quantiles1[0][threadIdx.x]; __syncthreads(); #pragma unroll for(int k = 0; k < QUAD; k++) quadrants1[k] = smem_quantiles1[lane_id][(k*256/(QUAD+1)) + (256/(QUAD+1)-1)]; for (unsigned int i = base_idx; i < n_full; i += gridDim.x*BLOCK_SIZE) { // loads: 0.23 -> 0.85/1.44 valid_items = n - i >= BLOCK_SIZE ? BLOCK_SIZE : n - i; __syncthreads(); LoadT(temp_storage.loadh).Load(&(g[i]), g_vals, valid_items, (T)0.0f); __syncthreads(); LoadChar(temp_storage.loadc).Load(&(state1[i]), c1s, valid_items, 128); __syncthreads(); LoadT(temp_storage.loadh).Load(&(p[i]), p_vals, valid_items, (T)0.0f); new_local_abs_max1 = -FLT_MAX; // update: 2.48/1.57 -> 2.51/1.60 # pragma unroll N_PER_TH for(unsigned int j = 0; j < N_PER_TH; j++) { g_val = float(g_vals[j]); g_val *= gnorm_scale; if(!skip_zeros || (skip_zeros && ((float)g_vals[j] != 0.0f))) { if(weight_decay > 0.0f) g_val += ((float)p_vals[j])*weight_decay; s1_vals[j] = smem_quantiles1[lane_id][c1s[j]]*absmax1[i/BLOCK_SIZE]; switch(OPTIMIZER) { case MOMENTUM: if(step == 1) s1_vals[j] = g_val; else s1_vals[j] = (s1_vals[j]*beta1) + g_val; break; case RMSPROP: s1_vals[j] = s1_vals[j]*beta1 + ((1.0f-beta1)*(g_val*g_val)); break; case ADAGRAD: s1_vals[j] = s1_vals[j] + (g_val*g_val); break; } } new_local_abs_max1 = fmaxf(new_local_abs_max1, fabsf(s1_vals[j])); } // reduce: 2.51/1.60 -> 2.67/1.69 new_local_abs_max1 = BlockReduce1(reduce1).Reduce(new_local_abs_max1, cub::Max()); if(threadIdx.x == 0) smem_exchange1[0] = new_local_abs_max1; __syncthreads(); if(threadIdx.x == 0) absmax1[i/BLOCK_SIZE] = new_local_abs_max1; else new_local_abs_max1 = smem_exchange1[0]; // reduce: 2.67/1.69 -> 2.67/1.70 # pragma unroll N_PER_TH for(unsigned int j = 0; j < N_PER_TH; j++) { if(!skip_zeros || (skip_zeros && ((float)g_vals[j] != 0.0f))) { switch(OPTIMIZER) { case MOMENTUM: p_vals[j] = ((float)p_vals[j]) - lr*(s1_vals[j]); break; case RMSPROP: g_val = g_vals[j]; p_vals[j] = ((float)p_vals[j]) - lr*(__fdividef(g_val, sqrtf(s1_vals[j])+eps)); break; case ADAGRAD: g_val = g_vals[j]; p_vals[j] = ((float)p_vals[j]) - lr*(__fdividef(g_val, sqrtf(s1_vals[j])+eps)); break; } } } // store: 0.85/1.44 -> 2.48/1.57 __syncthreads(); StoreT(temp_storage.storeh).Store(&(p[i]), p_vals, valid_items); // quantizaztion: 2.67/1.70 -> 3.4/3.3 # pragma unroll N_PER_TH for(unsigned int j = 0; j < N_PER_TH; j++) { c1s[j] = quantize_2D<1>(quadrants1, smem_quantiles1[lane_id], __fdividef(s1_vals[j],new_local_abs_max1)); // make sure state1 term has still the same sign after quantization // (not needed for state2 term which has only positive values) if(signbit(smem_quantiles1[lane_id][c1s[j]]) != signbit(s1_vals[j])) { if(s1_vals[j] > 0.0f) c1s[j] += 1; else c1s[j] -= 1; } } __syncthreads(); StoreChar(temp_storage.storec).Store(&(state1[i]), c1s, valid_items); } } //============================================================== // TEMPLATE DEFINITIONS //============================================================== template __device__ unsigned char dQuantize<0>(float* smem_code, const float rand, float x); template __device__ unsigned char dQuantize<1>(float* smem_code, const float rand, float x); template __global__ void kEstimateQuantiles(float *__restrict__ const A, float *code, const float offset, const float max_val, const int n); template __global__ void kEstimateQuantiles(half *__restrict__ const A, float *code, const float offset, const half max_val, const int n); #define MAKE_PreconditionOptimizer32bit1State(oname, gtype) \ template __global__ void kPreconditionOptimizer32bit1State<gtype, oname, 4096, 8>(gtype* g, gtype* p, \ float* state1, float *unorm, \ const float beta1, const float eps, const float weight_decay, \ const int step, const float lr, const float gnorm_scale, const int n); \ MAKE_PreconditionOptimizer32bit1State(MOMENTUM, half) MAKE_PreconditionOptimizer32bit1State(MOMENTUM, float) MAKE_PreconditionOptimizer32bit1State(RMSPROP, half) MAKE_PreconditionOptimizer32bit1State(RMSPROP, float) MAKE_PreconditionOptimizer32bit1State(ADAGRAD, half) MAKE_PreconditionOptimizer32bit1State(ADAGRAD, float) #define MAKE_Optimizer32bit1State(oname, gtype) \ template __global__ void kOptimizer32bit1State<gtype, oname>(gtype* g, gtype* p, float* state1, float *unorm, const float max_unorm, const float param_norm, \ const float beta1, const float eps, const float weight_decay,const int step, const float lr, const float gnorm_scale, const bool skip_zeros, const int n); \ MAKE_Optimizer32bit1State(MOMENTUM, half) MAKE_Optimizer32bit1State(MOMENTUM, float) MAKE_Optimizer32bit1State(RMSPROP, half) MAKE_Optimizer32bit1State(RMSPROP, float) MAKE_Optimizer32bit1State(ADAGRAD, half) MAKE_Optimizer32bit1State(ADAGRAD, float) #define MAKE_PreconditionOptimizer32bit2State(oname, gtype) \ template __global__ void kPreconditionOptimizer32bit2State<gtype, oname, 4096, 8>(gtype* g, gtype* p, \ float* state1, float* state2, float *unorm, \ const float beta1, const float beta2, const float eps, const float weight_decay, \ const int step, const float lr, const float gnorm_scale, const int n); \ MAKE_PreconditionOptimizer32bit2State(ADAM, half) MAKE_PreconditionOptimizer32bit2State(ADAM, float) template __global__ void kOptimizer32bit2State<half, ADAM>(half* g, half* p, float* state1, float* state2, float *unorm, const float max_unorm, const float param_norm, const float beta1, const float beta2, const float eps, const float weight_decay,const int step, const float lr, const float gnorm_scale, const bool skip_zeros, const int n); template __global__ void kOptimizer32bit2State<float, ADAM>(float* g, float* p, float* state1, float* state2, float *unorm, const float max_unorm, const float param_norm, const float beta1, const float beta2, const float eps, const float weight_decay,const int step, const float lr, const float gnorm_scale, const bool skip_zeros, const int n); #define MAKE_PreconditionStatic8bit1State(oname, gtype) \ template __global__ void kPreconditionOptimizerStatic8bit1State<gtype, oname>(gtype* p, gtype* __restrict__ const g, unsigned char*__restrict__ const state1, \ float *unorm, \ const float beta1, \ const float eps, const int step, \ float* __restrict__ const quantiles1, \ float* max1, float* new_max1, \ const float weight_decay, \ const float gnorm_scale, \ const int n); \ MAKE_PreconditionStatic8bit1State(MOMENTUM, half) MAKE_PreconditionStatic8bit1State(MOMENTUM, float) MAKE_PreconditionStatic8bit1State(RMSPROP, half) MAKE_PreconditionStatic8bit1State(RMSPROP, float) #define MAKE_optimizerStatic8bit1State(oname, gtype) \ template __global__ void kOptimizerStatic8bit1State<gtype, oname>(gtype* p, gtype* const g, unsigned char* state1, \ const float *unorm, const float max_unorm, const float param_norm, \ const float beta1, \ const float eps, const int step, const float lr, \ float* __restrict__ const quantiles1, \ float* max1, float* new_max1, \ float weight_decay, \ const float gnorm_scale, \ const int n); \ MAKE_optimizerStatic8bit1State(MOMENTUM, half) MAKE_optimizerStatic8bit1State(MOMENTUM, float) MAKE_optimizerStatic8bit1State(RMSPROP, half) MAKE_optimizerStatic8bit1State(RMSPROP, float) #define MAKE_PreconditionStatic8bit2State(oname, gtype) \ template __global__ void kPreconditionOptimizerStatic8bit2State<gtype, oname>(gtype* p, gtype* __restrict__ const g, unsigned char*__restrict__ const state1, unsigned char* __restrict__ const state2, \ float *unorm, \ const float beta1, const float beta2, \ const float eps, const int step, \ float* __restrict__ const quantiles1, float* __restrict__ const quantiles2, \ float* max1, float* max2, float* new_max1, float* new_max2, \ const float gnorm_scale, \ const int n); \ MAKE_PreconditionStatic8bit2State(ADAM, half) MAKE_PreconditionStatic8bit2State(ADAM, float) #define MAKE_optimizerStatic8bit2State(oname, gtype) \ template __global__ void kOptimizerStatic8bit2State<gtype, oname>(gtype* p, gtype* const g, unsigned char* state1, unsigned char* state2, \ const float *unorm, const float max_unorm, const float param_norm, \ const float beta1, const float beta2, \ const float eps, const int step, const float lr, \ float* __restrict__ const quantiles1, float* __restrict__ const quantiles2, \ float* max1, float* max2, float* new_max1, float* new_max2, \ float weight_decay, \ const float gnorm_scale, \ const int n); \ MAKE_optimizerStatic8bit2State(ADAM, half) MAKE_optimizerStatic8bit2State(ADAM, float) template __global__ void kPercentileClipping<float, 2048, 4>(float * __restrict__ g, float *gnorm_vec, int step, const int n); template __global__ void kPercentileClipping<half, 2048, 4>(half * __restrict__ g, float *gnorm_vec, int step, const int n); template __global__ void kQuantizeBlockwise<half, 4096, 4, 0>(float * code, half * __restrict__ const A, float *absmax, unsigned char *out, float * __restrict__ const rand, const int rand_offset, const int n); template __global__ void kQuantizeBlockwise<float, 4096, 4, 0>(float * code, float * __restrict__ const A, float *absmax, unsigned char *out, float * __restrict__ const rand, const int rand_offset, const int n); template __global__ void kQuantizeBlockwise<half, 4096, 4, 1>(float * code, half * __restrict__ const A, float *absmax, unsigned char *out, float * __restrict__ const rand, const int rand_offset, const int n); template __global__ void kQuantizeBlockwise<float, 4096, 4, 1>(float * code, float * __restrict__ const A, float *absmax, unsigned char *out, float * __restrict__ const rand, const int rand_offset, const int n); template __global__ void kDequantizeBlockwise<half, 4096, 1024, 4>(float *code, unsigned char * __restrict__ const A, float * __restrict__ const absmax, half *out, const int n); template __global__ void kDequantizeBlockwise<float, 4096, 1024, 4>(float *code, unsigned char * __restrict__ const A, float * __restrict__ const absmax, float *out, const int n); template __global__ void kDequantizeBlockwise<half, 2048, 512, 4>(float *code, unsigned char * __restrict__ const A, float * __restrict__ const absmax, half *out, const int n); template __global__ void kDequantizeBlockwise<float, 2048, 512, 4>(float *code, unsigned char * __restrict__ const A, float * __restrict__ const absmax, float *out, const int n); #define MAKE_OptimizerStatic8bit2StateBlockwise(oname, gtype, block_size, num_per_thread) \ template __global__ void kOptimizerStatic8bit2StateBlockwise<gtype, oname, block_size, num_per_thread>(gtype* p, gtype* __restrict__ const g, unsigned char* state1, unsigned char* state2, \ const float beta1, const float beta2, \ const float eps, const int step, const float lr, \ float* __restrict__ const quantiles1, float* __restrict__ const quantiles2, \ float* absmax1, float* absmax2, \ float weight_decay, \ const float gnorm_scale, const bool skip_zeros, const int n); \ MAKE_OptimizerStatic8bit2StateBlockwise(ADAM, float, 2048, 8) MAKE_OptimizerStatic8bit2StateBlockwise(ADAM, half, 2048, 8) #define MAKE_OptimizerStatic8bit1StateBlockwise(oname, gtype, block_size, num_per_thread) \ template __global__ void kOptimizerStatic8bit1StateBlockwise<gtype, oname, block_size, num_per_thread>( \ gtype* p, gtype* __restrict__ const g, unsigned char* state1, \ const float beta1, const float beta2, \ const float eps, const int step, const float lr, \ float* __restrict__ const quantiles1, \ float* absmax1, \ float weight_decay, \ const float gnorm_scale, const bool skip_zeros, const int n); \ MAKE_OptimizerStatic8bit1StateBlockwise(MOMENTUM, float, 2048, 8) MAKE_OptimizerStatic8bit1StateBlockwise(MOMENTUM, half, 2048, 8) MAKE_OptimizerStatic8bit1StateBlockwise(RMSPROP, float, 2048, 8) MAKE_OptimizerStatic8bit1StateBlockwise(RMSPROP, half, 2048, 8) MAKE_OptimizerStatic8bit1StateBlockwise(ADAGRAD, float, 2048, 8) MAKE_OptimizerStatic8bit1StateBlockwise(ADAGRAD, half, 2048, 8)
the_stack
void tsnecuda::util::GaussianNormalizeDeviceVector(cublasHandle_t &handle, thrust::device_vector<float> &d_points, const int num_points, const int num_dims) { // Compute the means auto d_means = tsnecuda::util::ReduceMean(handle, d_points, num_points, num_dims, 0); // Zero-Center tsnecuda::util::BroadcastMatrixVector(d_points, d_means, num_points, num_dims, thrust::minus<float>(), 1, 1.f); // Compute the standard deviation thrust::device_vector<float> squared_vals(d_points.size()); tsnecuda::util::SquareDeviceVector(squared_vals, d_points); auto norm_sum_of_squares = tsnecuda::util::ReduceAlpha(handle, squared_vals, num_points, num_dims, 1.f / (num_points - 1), 0); thrust::device_vector<float> standard_deviation(norm_sum_of_squares.size()); tsnecuda::util::SqrtDeviceVector(standard_deviation, norm_sum_of_squares); // Normalize the values tsnecuda::util::BroadcastMatrixVector(d_points, standard_deviation, num_points, num_dims, thrust::divides<float>(), 1, 1.f); } void tsnecuda::util::SquareDeviceVector(thrust::device_vector<float> &d_out, const thrust::device_vector<float> &d_input) { thrust::transform(d_input.begin(), d_input.end(), d_out.begin(), tsnecuda::util::FunctionalSquare()); } void tsnecuda::util::SqrtDeviceVector(thrust::device_vector<float> &d_out, const thrust::device_vector<float> &d_input) { thrust::transform(d_input.begin(), d_input.end(), d_out.begin(), tsnecuda::util::FunctionalSqrt()); } float tsnecuda::util::L2NormDeviceVector( const thrust::device_vector<float> &d_vector) { return std::sqrt(thrust::transform_reduce(d_vector.begin(), d_vector.end(), tsnecuda::util::FunctionalSquare(), 0.0f, thrust::plus<float>())); } bool tsnecuda::util::AnyNanOrInfDeviceVector( const thrust::device_vector<float> &d_vector) { return thrust::transform_reduce(d_vector.begin(), d_vector.end(), tsnecuda::util::FunctionalNanOrInf(), 0, thrust::plus<bool>()); } void tsnecuda::util::MaxNormalizeDeviceVector( thrust::device_vector<float> &d_vector) { float max_val = thrust::transform_reduce(d_vector.begin(), d_vector.end(), tsnecuda::util::FunctionalAbs(), 0.0f, thrust::maximum<float>()); thrust::constant_iterator<float> division_iterator(max_val); thrust::transform(d_vector.begin(), d_vector.end(), division_iterator, d_vector.begin(), thrust::divides<float>()); } // Needs to compute pij = pj|i + pi|j / 2n // void tsnecuda::util::SymmetrizeMatrix(cusparseHandle_t &handle, // thrust::device_vector<float> &d_symmetrized_values, // thrust::device_vector<int32_t> &d_symmetrized_rowptr, // thrust::device_vector<int32_t> &d_symmetrized_colind, // thrust::device_vector<float> &d_values, // thrust::device_vector<int32_t> &d_indices, // const float magnitude_factor, // const int num_points, // const int num_neighbors) // { // // Allocate memory // int32_t *csr_row_ptr_a = nullptr; // cudaMalloc(reinterpret_cast<void **>(&csr_row_ptr_a), // (num_points + 1) * sizeof(int32_t)); // int32_t *csr_column_ptr_a = thrust::raw_pointer_cast(d_indices.data()); // float *csr_values_a = thrust::raw_pointer_cast(d_values.data()); // // Copy the data // thrust::device_vector<int> d_vector_memory(csr_row_ptr_a, // csr_row_ptr_a + num_points + 1); // thrust::sequence(d_vector_memory.begin(), d_vector_memory.end(), // 0, static_cast<int32_t>(num_neighbors)); // thrust::copy(d_vector_memory.begin(), d_vector_memory.end(), csr_row_ptr_a); // cudaDeviceSynchronize(); // // Initialize the matrix descriptor // cusparseMatDescr_t matrix_descriptor; // cusparseCreateMatDescr(&matrix_descriptor); // cusparseSetMatType(matrix_descriptor, CUSPARSE_MATRIX_TYPE_GENERAL); // cusparseSetMatIndexBase(matrix_descriptor, CUSPARSE_INDEX_BASE_ZERO); // // Sort the matrix properly // size_t permutation_buffer_byte_size = 0; // void *permutation_buffer = NULL; // int32_t *permutation = NULL; // // step 1: Allocate memory buffer // cusparseXcsrsort_bufferSizeExt(handle, num_points, num_points, // num_points * num_neighbors, csr_row_ptr_a, // csr_column_ptr_a, &permutation_buffer_byte_size); // cudaDeviceSynchronize(); // cudaMalloc(&permutation_buffer, // sizeof(char) * permutation_buffer_byte_size); // // step 2: Setup permutation vector permutation to be the identity // cudaMalloc(reinterpret_cast<void **>(&permutation), // sizeof(int32_t) * num_points * num_neighbors); // cusparseCreateIdentityPermutation(handle, num_points * num_neighbors, // permutation); // cudaDeviceSynchronize(); // // step 3: Sort CSR format // cusparseXcsrsort(handle, num_points, num_points, // num_points * num_neighbors, matrix_descriptor, csr_row_ptr_a, // csr_column_ptr_a, permutation, permutation_buffer); // cudaDeviceSynchronize(); // // step 4: Gather sorted csr_values // float *csr_values_a_sorted = nullptr; // cudaMalloc(reinterpret_cast<void **>(&csr_values_a_sorted), // (num_points * num_neighbors) * sizeof(float)); // cusparseSgthr(handle, num_points * num_neighbors, csr_values_a, // csr_values_a_sorted, permutation, CUSPARSE_INDEX_BASE_ZERO); // cudaDeviceSynchronize(); // // Free some memory // cudaFree(permutation_buffer); // cudaFree(permutation); // csr_values_a = csr_values_a_sorted; // // We need A^T, so we do a csr2csc() call // int32_t *csc_row_ptr_at = nullptr; // cudaMalloc(reinterpret_cast<void **>(&csc_row_ptr_at), // (num_points * num_neighbors) * sizeof(int32_t)); // int32_t *csc_column_ptr_at = nullptr; // cudaMalloc(reinterpret_cast<void **>(&csc_column_ptr_at), // (num_points + 1) * sizeof(int32_t)); // float *csc_values_at = nullptr; // cudaMalloc(reinterpret_cast<void **>(&csc_values_at), // (num_points * num_neighbors) * sizeof(float)); // // TODO: Compute the CSR2CSC buffer // // Do the transpose operation // cusparseScsr2csc(handle, num_points, num_points, // num_neighbors * num_points, csr_values_a, csr_row_ptr_a, // csr_column_ptr_a, csc_values_at, csc_row_ptr_at, // csc_column_ptr_at, CUSPARSE_ACTION_NUMERIC, // CUSPARSE_INDEX_BASEa_ZERO); // cudaDeviceSynchronize(); // // Now compute the output size of the matrix // int32_t base_C, num_nonzeros_C; // int32_t symmetrized_num_nonzeros = -1; // cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_HOST); // d_symmetrized_rowptr.resize(num_points + 1); // cusparseXcsrgeamNnz(handle, num_points, num_points, // matrix_descriptor, num_points * num_neighbors, csr_row_ptr_a, // csr_column_ptr_a, // matrix_descriptor, num_points * num_neighbors, csc_column_ptr_at, // csc_row_ptr_at, // matrix_descriptor, // thrust::raw_pointer_cast(d_symmetrized_rowptr.data()), // &symmetrized_num_nonzeros); // cudaDeviceSynchronize(); // // Do some useful checking... // if (-1 != symmetrized_num_nonzeros) // { // num_nonzeros_C = symmetrized_num_nonzeros; // } // else // { // cudaMemcpy(&num_nonzeros_C, // thrust::raw_pointer_cast(d_symmetrized_rowptr.data()) + // num_points, // sizeof(int32_t), cudaMemcpyDeviceToHost); // cudaMemcpy(&base_C, // thrust::raw_pointer_cast(d_symmetrized_rowptr.data()), // sizeof(int), cudaMemcpyDeviceToHost); // } // // Allocate memory for the new summed array // d_symmetrized_colind.resize(num_nonzeros_C); // d_symmetrized_values.resize(num_nonzeros_C); // // Sum the arrays // float kAlpha = 1.0f / (2.0f * num_points); // float kBeta = 1.0f / (2.0f * num_points); // cusparseScsrgeam(handle, num_points, num_points, // &kAlpha, matrix_descriptor, num_points * num_neighbors, // csr_values_a, csr_row_ptr_a, csr_column_ptr_a, // &kBeta, matrix_descriptor, num_points * num_neighbors, // csc_values_at, csc_column_ptr_at, csc_row_ptr_at, // matrix_descriptor, // thrust::raw_pointer_cast(d_symmetrized_values.data()), // thrust::raw_pointer_cast(d_symmetrized_rowptr.data()), // thrust::raw_pointer_cast(d_symmetrized_colind.data())); // cudaDeviceSynchronize(); // // Free the memory we were using... // cudaFree(csr_values_a); // cudaFree(csc_values_at); // cudaFree(csr_row_ptr_a); // cudaFree(csc_column_ptr_at); // cudaFree(csc_row_ptr_at); // } __global__ void tsnecuda::util::Csr2CooKernel(volatile int *__restrict__ coo_indices, const int *__restrict__ pij_row_ptr, const int *__restrict__ pij_col_ind, const int num_points, const int num_nonzero) { register int TID, i, j, start, end; TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= num_nonzero) return; start = 0; end = num_points + 1; i = (num_points + 1) >> 1; while (end - start > 1) { j = pij_row_ptr[i]; end = (j > TID) ? i : end; start = (j <= TID) ? i : start; i = (start + end) >> 1; } j = pij_col_ind[TID]; coo_indices[2 * TID] = i; coo_indices[2 * TID + 1] = j; } void tsnecuda::util::Csr2Coo(tsnecuda::GpuOptions &gpu_opt, thrust::device_vector<int> &coo_indices, thrust::device_vector<int> &pij_row_ptr, thrust::device_vector<int> &pij_col_ind, const int num_points, const int num_nonzero) { const int num_threads = 1024; const int num_blocks = iDivUp(num_nonzero, num_threads); tsnecuda::util::Csr2CooKernel<<<num_blocks, num_threads>>>(thrust::raw_pointer_cast(coo_indices.data()), thrust::raw_pointer_cast(pij_row_ptr.data()), thrust::raw_pointer_cast(pij_col_ind.data()), num_points, num_nonzero); GpuErrorCheck(cudaDeviceSynchronize()); } __global__ void syv2k( float *const __restrict__ pij, const float *const __restrict__ pij_non_sym, const int *const __restrict__ pij_indices, const int num_points, const int num_neighbors) { register int TID, i, j, jend; register float pij_acc; TID = threadIdx.x + blockIdx.x * blockDim.x; if (TID >= num_points * num_neighbors) return; i = TID / num_neighbors; j = pij_indices[TID]; pij_acc = pij_non_sym[TID]; jend = (j + 1) * num_neighbors; for (register int jidx = j * num_neighbors; jidx < jend; jidx++) pij_acc += pij_indices[jidx] == i ? pij_non_sym[jidx] : 0.0f; pij[TID] = pij_acc / (2.0 * num_points); } void tsnecuda::util::SymmetrizeMatrixV2(thrust::device_vector<float> &pij_symmetrized, thrust::device_vector<float> &pij_unsymmetrized, thrust::device_vector<int32_t> &pij_indices, const int num_points, const int num_neighbors) { const int num_threads = 1024; const int num_blocks = iDivUp(num_points * num_neighbors, num_threads); syv2k<<<num_blocks, num_threads>>>(thrust::raw_pointer_cast(pij_symmetrized.data()), thrust::raw_pointer_cast(pij_unsymmetrized.data()), thrust::raw_pointer_cast(pij_indices.data()), num_points, num_neighbors); GpuErrorCheck(cudaDeviceSynchronize()); }
the_stack
template<typename IndexType, typename ValueType> void __global__ neighbor_count_kernel(IndexType* tri0, IndexType* tri1, IndexType* tri2, IndexType ne, IndexType* nbcount) { for(int eidx = threadIdx.x; eidx < ne; eidx += gridDim.x * blockDim.x) { IndexType i = tri0[eidx]; IndexType j = tri1[eidx]; IndexType k = tri2[eidx]; atomicInc((unsigned *)nbcount + i, INT_MAX); atomicInc((unsigned *)nbcount + j, INT_MAX); atomicInc((unsigned *)nbcount + k, INT_MAX); } } template<typename IndexType, typename ValueType> void __global__ compute_nb_indices_kernel(IndexType* rowoffsets, IndexType* ele_indices, IndexType *tri0, IndexType* tri1, IndexType* tri2, IndexType nv, IndexType* column_indices, size_t num_cols, size_t pitch) { for(int nidx = threadIdx.x; nidx < nv; nidx += gridDim.x * blockDim.x) { for(int i = 0; i < num_cols; i++) { column_indices[pitch * i + nidx] = -1; } int nedges = 0; for(int j = rowoffsets[nidx]; j < rowoffsets[nidx + 1]; j++) { IndexType jj = ele_indices[j]; IndexType node0 = tri0[jj]; IndexType node1 = tri1[jj]; IndexType node2 = tri2[jj]; if(node0 != nidx) { column_indices[pitch * nedges + nidx] = node0; nedges++; } } } } template<typename IndexType, typename ValueType> void __global__ compute_ele_indices_kernel(IndexType* tri0, IndexType* tri1, IndexType* tri2, IndexType ne, IndexType* rowoffsets, IndexType* ele_indices) { for(int eidx = threadIdx.x; eidx < ne; eidx += gridDim.x * blockDim.x) { IndexType i = tri0[eidx]; IndexType j = tri1[eidx]; IndexType k = tri2[eidx]; IndexType starti = rowoffsets[i]; IndexType startj = rowoffsets[j]; IndexType startk = rowoffsets[k]; IndexType endi = rowoffsets[i + 1]; IndexType endj = rowoffsets[j + 1]; IndexType endk = rowoffsets[k + 1]; for(int n = starti; n < endi; n++) { atomicCAS(ele_indices + n, -1, eidx); break; } for(int n = startj; n < endj; n++) { atomicCAS(ele_indices + n, -1, eidx); break; } for(int n = startk; n < endk; n++) { atomicCAS(ele_indices + n, -1, eidx); } } } //template<> //void trimesh2ell<Matrix_ell_d>(TriMesh* meshPtr, Matrix_ell_d &A) //{ // typedef typename Matrix_ell_d::value_type ValueType; // typedef typename Matrix_ell_d::index_type IndexType; // int nv = meshPtr->vertices.size(); // int ne = meshPtr->faces.size(); // ValueType* x = new ValueType[nv]; // ValueType* y = new ValueType[nv]; // IndexType* tri0 = new IndexType[ne]; // IndexType* tri1 = new IndexType[ne]; // IndexType* tri2 = new IndexType[ne]; // // for(int i = 0; i < nv; i++) // { // x[i] = meshPtr->vertices[i][0]; // y[i] = meshPtr->vertices[i][1]; // } // // for(int i = 0; i < ne; i++) // { // tri0[i] = meshPtr->faces[i][0]; // tri1[i] = meshPtr->faces[i][1]; // tri2[i] = meshPtr->faces[i][2]; // } // // IndexType* d_tri0; // IndexType* d_tri1; // IndexType* d_tri2; // IndexType* d_nbcount; // // IdxVector_d d_rowoffsets(nv+1, 0); // IndexType* d_rowoffsets; // cudaSafeCall(cudaMalloc(&d_tri0, ne * sizeof(IndexType))); // cudaSafeCall(cudaMalloc(&d_tri1, ne * sizeof(IndexType))); // cudaSafeCall(cudaMalloc(&d_tri2, ne * sizeof(IndexType))); // cudaSafeCall(cudaMalloc(&d_nbcount, nv * sizeof(IndexType))); // cudaSafeCall(cudaMalloc(&d_rowoffsets, (nv + 1) * sizeof(IndexType))); // // cudaSafeCall(cudaMemset(d_nbcount, 0, nv)); // // size_t threads = 256; // size_t blocks = min((int)ceil(ne / threads), (int)65535); // neighbor_count_kernel<IndexType, ValueType> << <blocks, threads >> >(d_tri0, d_tri1, d_tri2, ne, d_nbcount); // cudaCheckError(); // // cudaSafeCall(cudaMemset(d_rowoffsets, 0, 1)); // thrust::inclusive_scan(d_nbcount, d_nbcount + nv, d_rowoffsets + 1); // out-place scan // // IndexType total; // cudaSafeCall(cudaMemcpy(&total, d_rowoffsets + nv, 1, cudaMemcpyDeviceToHost)); // // // IndexType* d_ele_indices; //stores adjacent elements for each node // IdxVector_d d_ele_indices(total, -1); // // cudaSafeCall(cudaMalloc(&ele_indices, total * sizeof(IndexType))); // // cudaSafeCall(cudaMemset(ele_indices, -1, total * sizeof(IndexType))); // compute_ele_indices_kernel<IndexType, ValueType> << <blocks, threads >> >(d_tri0, d_tri1, d_tri2, ne, d_rowoffsets, thrust::raw_pointer_cast(&d_ele_indices[0])); // cudaCheckError(); // // IndexType* tmpstd::vector = thrust::raw_pointer_cast(&d_ele_indices[0]); // // IndexType maxnumnb = thrust::reduce(d_nbcount, d_nbcount + nv, -1, thrust::maximum<IndexType > ()) * 2; // // A.column_indices.resize(nv, maxnumnb, nv); // A.values.resize(nv, maxnumnb, nv); // A.num_cols = nv; // A.num_rows = nv; // // threads = 256; // blocks = min((int)ceil(nv / threads), (int)65535); // // compute_nb_indices_kernel<IndexType, ValueType> << <blocks, threads >> >(thrust::raw_pointer_cast(&d_rowoffsets[0]), thrust::raw_pointer_cast(&d_ele_indices[0]), d_tri0, d_tri1, d_tri2, nv, thrust::raw_pointer_cast(&A.column_indices.values[0]), A.column_indices.num_cols, A.column_indices.pitch); // // int meiyongde = 0; // int a = meiyongde + 1; //} template<> void tetmesh2ell<Matrix_ell_d_CG>(TetMesh* meshPtr, Matrix_ell_d_CG &A_d, bool verbose) { typedef typename Matrix_ell_d_CG::value_type ValueType; typedef typename Matrix_ell_d_CG::index_type IndexType; const int X = Matrix_ell_d_CG::invalid_index; int nv = meshPtr->vertices.size(); int ne = meshPtr->tets.size(); meshPtr->need_neighbors(); for(int i = 0; i < nv; i++) { std::sort(meshPtr->neighbors[i].begin(), meshPtr->neighbors[i].end()); } int maxsize = 0; int num_entries = 0; for(int i = 0; i < nv; i++) { num_entries += (int)meshPtr->neighbors[i].size(); maxsize = std::max(maxsize, (int)meshPtr->neighbors[i].size()); } num_entries += nv; maxsize += 1; // should include itself if( verbose ) std::cout << "Constructing Matrix_ell_h_CG A... "; Matrix_ell_h_CG A(nv, nv, num_entries, maxsize, 32); if( verbose ) std::cout << "done." << std::endl; //A.resize(nv, nv, num_entries, maxsize, 32); if( verbose ) std::cout << "Adding values to matrix A... "; for(int i = 0; i < nv; i++) { A.column_indices(i, 0) = i; for(int j = 1; j < maxsize; j++) { A.values(i, j) = 0.0; if(j < meshPtr->neighbors[i].size() + 1) { A.column_indices(i, j) = meshPtr->neighbors[i][j - 1]; } else { A.column_indices(i, j) = X; } } } if( verbose ) std::cout << "done." << std::endl; if( verbose ) std::cout << "Copying A to device... "; // A_d = Matrix_ell_d_CG(A); A_d = A; if( verbose ) std::cout << "done." << std::endl; // Matrix_ell_d_CG A_tmp = A; } template<> void trimesh2csr<Matrix_d_CG>(TriMesh* meshPtr, Matrix_d_CG &A_d) { typedef typename Matrix_d_CG::value_type ValueType; typedef typename Matrix_d_CG::index_type IndexType; int nv = meshPtr->vertices.size(); int ne = meshPtr->faces.size(); meshPtr->need_neighbors(); for(int i = 0; i < nv; i++) { std::sort(meshPtr->neighbors[i].begin(), meshPtr->neighbors[i].end()); } int maxsize = 0; int num_entries = 0; for(int i = 0; i < nv; i++) { num_entries += (int)meshPtr->neighbors[i].size(); maxsize = std::max(maxsize, (int)meshPtr->neighbors[i].size()); } num_entries = num_entries / 2 + nv; maxsize += 1; // should include itself std::vector<IndexType> rowoffsets; std::vector<IndexType> idxj; rowoffsets.reserve(nv + 1); idxj.reserve(num_entries); rowoffsets.push_back(0); int count; for(int i = 0; i < nv; i++) { count = 0; idxj.push_back(i); for(int j = 0; j < meshPtr->neighbors[i].size(); j++) { if(meshPtr->neighbors[i][j] > i) { count++; idxj.push_back(meshPtr->neighbors[i][j]); } } rowoffsets.push_back(rowoffsets[i] + count + 1); } int realsz = idxj.size(); std::vector<ValueType> values(realsz, 0.0); Matrix_h_CG A(nv, nv, realsz); A.row_offsets = rowoffsets; A.column_indices = idxj; A.values = values; A_d = A; A.resize(0, 0, 0); } template<> void trimesh2ell<Matrix_ell_d_CG>(TriMesh* meshPtr, Matrix_ell_d_CG &A_d) { typedef typename Matrix_ell_d_CG::value_type ValueType; typedef typename Matrix_ell_d_CG::index_type IndexType; const int X = Matrix_ell_d_CG::invalid_index; int nv = meshPtr->vertices.size(); int ne = meshPtr->faces.size(); meshPtr->need_neighbors(); for(int i = 0; i < nv; i++) { std::sort(meshPtr->neighbors[i].begin(), meshPtr->neighbors[i].end()); } int maxsize = 0; int num_entries = 0; for(int i = 0; i < nv; i++) { num_entries += (int)meshPtr->neighbors[i].size(); maxsize = std::max(maxsize, (int)meshPtr->neighbors[i].size()); } num_entries += nv; maxsize += 1; // should include itself Matrix_ell_h_CG A; A.resize(nv, nv, num_entries, maxsize, 32); for(int i = 0; i < nv; i++) { A.column_indices(i, 0) = i; for(int j = 1; j < maxsize; j++) { A.values(i, j) = 0.0; if(j < meshPtr->neighbors[i].size() + 1) { A.column_indices(i, j) = meshPtr->neighbors[i][j - 1]; } else { A.column_indices(i, j) = X; } } } A_d = A; A.resize(0, 0, 0, 0); // meshPtr->neighbors.clear(); } template<typename IndexType, typename ValueType> __global__ void convert_kernel(IndexType* rowoff1, IndexType* colidx1, ValueType* values1, IndexType* rowidx2, IndexType* colidx2, ValueType* values2, int num_rows) { for(int ridx = blockIdx.x * blockDim.x + threadIdx.x; ridx < num_rows; ridx++) { IndexType start1 = rowoff1[ridx]; IndexType end1 = rowoff1[ridx + 1]; IndexType start2 = start1 * 2 - ridx; rowidx2[start2] = ridx; colidx2[start2] = ridx; values2[start2] = values1[start1]; for(int i = start1 + 1; i < end1; i++) { ValueType v = values1[i]; IndexType col = colidx1[i]; IndexType loc = start2 + 1 + 2 * (i - start1 - 1); rowidx2[loc] = ridx; colidx2[loc] = col; values2[loc] = v; rowidx2[loc + 1] = col; colidx2[loc + 1] = ridx; values2[loc + 1] = v; } } } void convertSym2gen(Matrix_d_CG &Acsr, Matrix_coo_d_CG &Aout) { typedef typename Matrix_d_CG::value_type ValueType; typedef typename Matrix_d_CG::index_type IndexType; int num_entries = Acsr.num_entries; int num_rows = Acsr.num_rows; int num_cols = Acsr.num_cols; Aout.resize(num_rows, num_cols, 2 * num_entries - num_rows); int threads = 256; int blocks = std::min((int)ceil((double)num_rows / threads), 65535); IndexType* rowoff1 = thrust::raw_pointer_cast(&Acsr.row_offsets[0]); IndexType* colidx1 = thrust::raw_pointer_cast(&Acsr.column_indices[0]); ValueType* values1 = thrust::raw_pointer_cast(&Acsr.values[0]); IndexType* rowidx2 = thrust::raw_pointer_cast(&Aout.row_indices[0]); IndexType* colidx2 = thrust::raw_pointer_cast(&Aout.column_indices[0]); ValueType* values2 = thrust::raw_pointer_cast(&Aout.values[0]); convert_kernel<IndexType, ValueType> << <blocks, threads >> >(rowoff1, colidx1, values1, rowidx2, colidx2, values2, num_rows); } template <class Matrix, class Vector> void computeResidual(const Matrix& A, const Vector& x, const Vector& b, Vector& r) { cusp::multiply(A, x, r); cusp::blas::axpby(r, b, r, -1, 1); } //template void trimesh2csr<int,float,cusp::device_memory>(const TriMesh* meshPtr, struct cudaCSRGraph& csrgraph); //template void tetmesh2csr<int,float,cusp::device_memory>(const TetMesh* meshPtr, struct cudaCSRGraph& csrgraph); //template void trimesh2csr<int,float,cusp::host_memory>(const TriMesh* meshPtr, struct cudaCSRGraph& csrgraph); //template void tetmesh2csr<int,float,cusp::host_memory>(const TetMesh* meshPtr, struct cudaCSRGraph& csrgraph); template void computeResidual<Matrix_ell_h, Vector_h>( const Matrix_ell_h& A, const Vector_h& x, const Vector_h& b, Vector_h& r); template void computeResidual<Matrix_hyb_d, Vector_d>( const Matrix_hyb_d& A, const Vector_d& x, const Vector_d& b, Vector_d& r);
the_stack
// GaussianSmoothImage.cu // 图像高斯平滑操作,包括普通高斯平滑和带mask的高斯平滑 #include "GaussianSmoothImage.h" #include "ErrorCode.h" // 宏定义,定义了五个高斯平滑尺度对应的权重总和 #define GAUSS_THREE 16 #define GAUSS_FIVE 256 #define GAUSS_SEVEN 4096 #define GAUSS_NINE 65536 #define GAUSS_ELEVEN 1048576 // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 下列五个核函数为普通高斯平滑核函数 // 1.平滑窗口大小为3*3的高斯平滑函数 static __global__ void gauss3SmoothImage( ImageCuda origiImageGPU, // 原始图像 ImageCuda gaussSmImageGPU, // 平滑后图像 int smLocatX, // 平滑起始横坐标 int smLocatY, // 平滑起始纵坐标 int smWidth, // 平滑窗口宽度 int smHeight // 平滑窗口高度 ); // 2.平滑窗口大小为5*5的高斯平滑函数 static __global__ void gauss5SmoothImage( ImageCuda origiImageGPU, // 原始图像 ImageCuda gaussSmImageGPU, // 平滑后图像 int smLocatX, // 平滑起始横坐标 int smLocatY, // 平滑起始纵坐标 int smWidth, // 平滑窗口宽度 int smHeight // 平滑窗口高度 ); // 3.平滑窗口大小为7*7的高斯平滑函数 static __global__ void gauss7SmoothImage( ImageCuda origiImageGPU, // 原始图像 ImageCuda gaussSmImageGPU, // 平滑后图像 int smLocatX, // 平滑起始横坐标 int smLocatY, // 平滑起始纵坐标 int smWidth, // 平滑窗口宽度 int smHeight // 平滑窗口高度 ); // 4.平滑窗口大小为9*9的高斯平滑函数 static __global__ void gauss9SmoothImage( ImageCuda origiImageGPU, // 原始图像 ImageCuda gaussSmImageGPU, // 平滑后图像 int smLocatX, // 平滑起始横坐标 int smLocatY, // 平滑起始纵坐标 int smWidth, // 平滑窗口宽度 int smHeight // 平滑窗口高度 ); // 5.平滑窗口大小为11*11的高斯平滑函数 static __global__ void gauss11SmoothImage( ImageCuda origiImageGPU, // 原始图像 ImageCuda gaussSmImageGPU, // 平滑后图像 int smLocatX, // 平滑起始横坐标 int smLocatY, // 平滑起始纵坐标 int smWidth, // 平滑窗口宽度 int smHeight // 平滑窗口高度 ); // 下列五个核函数为带mask的高斯平滑函数 // 1.平滑窗口大小为3*3,带mask的高斯平滑函数 static __global__ void gauss3SmoothImage( ImageCuda origiImageGPU, // 原始图像 ImageCuda gaussSmImageGPU, // 平滑后图像 int smLocatX, // 平滑起始横坐标 int smLocatY, // 平滑起始纵坐标 int smWidth, // 平滑窗口宽度 int smHeight, // 平滑窗口高度 ImageCuda maskImageGPU, // mask图像 unsigned char mask // mask值 ); // 2.平滑窗口大小为5*5,带mask的高斯平滑函数 static __global__ void gauss5SmoothImage( ImageCuda origiImageGPU, // 原始图像 ImageCuda gaussSmImageGPU, // 平滑后图像 int smLocatX, // 平滑起始横坐标 int smLocatY, // 平滑起始纵坐标 int smWidth, // 平滑窗口宽度 int smHeight, // 平滑窗口高度 ImageCuda maskImageGPU, // mask图像 unsigned char mask // mask值 ); // 3.平滑窗口大小为7*7,带mask的高斯平滑函数 static __global__ void gauss7SmoothImage( ImageCuda origiImageGPU, // 原始图像 ImageCuda gaussSmImageGPU, // 平滑后图像 int smLocatX, // 平滑起始横坐标 int smLocatY, // 平滑起始纵坐标 int smWidth, // 平滑窗口宽度 int smHeight, // 平滑窗口高度 ImageCuda maskImageGPU, // mask图像 unsigned char mask // mask值 ); // 4.平滑窗口大小为9*9,带mask的高斯平滑函数 static __global__ void gauss9SmoothImage( ImageCuda const origiImageGPU , // 原始图像 ImageCuda gaussSmImageGPU, // 平滑后图像 int smLocatX, // 平滑起始横坐标 int smLocatY, // 平滑起始纵坐标 int smWidth, // 平滑窗口宽度 int smHeight, // 平滑窗口高度 ImageCuda maskImageGPU, // mask图像 unsigned char mask // mask值 ); // 5.平滑窗口大小为11*11,带mask的高斯平滑函数 static __global__ void gauss11SmoothImage( ImageCuda origiImageGPU, // 原始图像 ImageCuda gaussSmImageGPU, // 平滑后图像 int smLocatX, // 平滑起始横坐标 int smLocatY, // 平滑起始纵坐标 int smWidth, // 平滑窗口宽度 int smHeight, // 平滑窗口高度 ImageCuda maskImageGPU, // mask图像 unsigned char mask // mask值 ); // 平滑窗口大小为7*7的高斯平滑函数实现 static __global__ void gauss7SmoothImage(ImageCuda origiImageGPU, ImageCuda gaussSmImageGPU, int smLocatX, int smLocatY, int smWidth, int smHeight) { // 获取pixel在原图像中的位置 int w = origiImageGPU.pitchBytes; int x = blockIdx.x * blockDim.x + threadIdx.x + smLocatX; int y = blockIdx.y * blockDim.y + threadIdx.y + smLocatY; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资 // 源,一方面防止由于段错误导致的程序崩溃。 if(x >= smLocatX + smWidth || y >= smLocatY + smHeight) return ; // 高斯平滑系数数组 int GF[7] = {1, 6, 15, 20, 15, 6, 1}; // 高斯卷积累加和 int c = 0; // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int i = 0; i < 7;i++) { // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int j = 0; j < 7;j++) c += GF[i] * GF[j] * origiImageGPU.imgMeta.imgData[(y + i - 3) * w + (x + j - 3)]; } // 计算平滑后像素值,结果四舍五入 gaussSmImageGPU.imgMeta.imgData[y * w + x] = 1.0 * c / GAUSS_SEVEN + 0.5f; } // 平滑窗口大小为5*5的高斯平滑函数实现 static __global__ void gauss5SmoothImage(ImageCuda origiImageGPU, ImageCuda gaussSmImageGPU, int smLocatX, int smLocatY, int smWidth, int smHeight) { // 获取pixel在原图像中的位置 int w = origiImageGPU.pitchBytes; int x = blockIdx.x * blockDim.x + threadIdx.x + smLocatX; int y = blockIdx.y * blockDim.y + threadIdx.y + smLocatY; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资 // 源,一方面防止由于段错误导致的程序崩溃。 if(x >= smLocatX + smWidth || y >= smLocatY + smHeight) return ; // 高斯平滑系数数组 int GF[5] = {1, 4, 6, 4, 1}; // 高斯卷积累加和 int c = 0; // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int i = 0; i < 5;i++) { // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int j = 0; j < 5;j++) c += GF[i] * GF[j] * origiImageGPU.imgMeta.imgData[(y + i - 2) * w + (x + j - 2)]; } // 计算平滑后像素值,结果四舍五入 gaussSmImageGPU.imgMeta.imgData[y * w + x] = 1.0 * c / GAUSS_FIVE + 0.5f; } // 平滑窗口大小为9*9的高斯平滑函数实现 static __global__ void gauss9SmoothImage(ImageCuda origiImageGPU, ImageCuda gaussSmImageGPU, int smLocatX, int smLocatY, int smWidth, int smHeight) { // 获取pixel在原图像中的位置 int w = origiImageGPU.pitchBytes; int x = blockIdx.x * blockDim.x + threadIdx.x + smLocatX; int y = blockIdx.y * blockDim.y + threadIdx.y + smLocatY; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资 // 源,一方面防止由于段错误导致的程序崩溃。 if(x >= smLocatX + smWidth || y >= smLocatY + smHeight) return ; // 高斯平滑系数数组 const int GF[9] = {1, 8, 28, 56, 70, 56, 28, 8, 1}; // 高斯卷积累加和 int c = 0; // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int i = 0; i < 9;i++) { // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int j = 0; j < 9; j++) c += GF[i] * GF[j] * origiImageGPU.imgMeta.imgData[(y + i - 4) * w + (x + j - 4)]; } // 计算平滑后像素值,结果四舍五入 gaussSmImageGPU.imgMeta.imgData[y * w + x] = 1.0 * c / GAUSS_NINE + 0.5f; } // 平滑窗口大小为11*11的高斯平滑函数实现 static __global__ void gauss11SmoothImage(ImageCuda origiImageGPU, ImageCuda gaussSmImageGPU, int smLocatX, int smLocatY, int smWidth, int smHeight) { // 获取pixel在原图像中的位置 int w = origiImageGPU.pitchBytes; int x = blockIdx.x * blockDim.x + threadIdx.x + smLocatX; int y = blockIdx.y * blockDim.y + threadIdx.y + smLocatY; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资 // 源,一方面防止由于段错误导致的程序崩溃。 if(x >= smLocatX + smWidth || y >= smLocatY + smHeight) return ; // 高斯平滑系数数组 int GF[11] = {1, 10, 45, 120, 210, 252, 210, 120, 45, 10, 1}; // 高斯卷积累加和 int c = 0; // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int i = 0; i < 11;i++) { // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int j = 0; j < 11;j++) c += GF[i] * GF[j] * origiImageGPU.imgMeta.imgData[(y + i - 5) * w + (x + j - 5)]; } // 计算平滑后像素值,结果四舍五入 gaussSmImageGPU.imgMeta.imgData[y * w + x] = 1.0 * c / GAUSS_ELEVEN + 0.5f; } // 平滑窗口大小为3*3的高斯平滑函数实现 static __global__ void gauss3SmoothImage(ImageCuda origiImageGPU, ImageCuda gaussSmImageGPU, int smLocatX, int smLocatY, int smWidth, int smHeight) { // 获取pixel在原图像中的位置 int w = origiImageGPU.pitchBytes; int x = blockIdx.x * blockDim.x + threadIdx.x + smLocatX; int y = blockIdx.y * blockDim.y + threadIdx.y + smLocatY; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资 // 源,一方面防止由于段错误导致的程序崩溃。 if(x >= smLocatX + smWidth || y >= smLocatY + smHeight) return ; // 高斯平滑系数数组 int GF[3] = {1, 2, 1}; // 高斯卷积累加和 int c = 0; // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int i = 0; i < 3;i++) { // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int j = 0; j < 3; j++) c += GF[i] * GF[j] * origiImageGPU.imgMeta.imgData[(y + i - 1) * w + (x + j - 1)]; } // 计算平滑后像素值,结果四舍五入 gaussSmImageGPU.imgMeta.imgData[y * w + x] = 1.0 * c / GAUSS_THREE + 0.5f ; } // 平滑窗口大小为7*7的,带mask高斯平滑函数实现 static __global__ void gauss7SmoothImage(ImageCuda origiImageGPU, ImageCuda gaussSmImageGPU, int smLocatX, int smLocatY, int smWidth, int smHeight, ImageCuda maskImageGPU, unsigned char mask) { // 获取pixel在原图像中的位置 int w = origiImageGPU.pitchBytes; int x = blockIdx.x * blockDim.x + threadIdx.x + smLocatX; int y = blockIdx.y * blockDim.y + threadIdx.y + smLocatY; // 如果mask图像像素值不等于mask则不处理 if (maskImageGPU.imgMeta.imgData[y * w + x] != mask) return ; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资 // 源,一方面防止由于段错误导致的程序崩溃。 if(x >= smLocatX + smWidth || y >= smLocatY + smHeight) return ; // 获取mask图像数据 unsigned char * maskImg = maskImageGPU.imgMeta.imgData; // 高斯平滑系数数组 int gf[7] = {1, 6, 15, 20, 15, 6, 1}; // 高斯卷积累加和 int c = 0; // 参加计算的像素点权重总和wsum,当前权重wgh int wsum = 0, wgh; // 图像像素索引 int mIdx; // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int i = 0; i < 7; i++){ // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int j = 0; j < 7; j++) { // 获取图像像素索引 mIdx=(y + i - 3) * w + (x + j - 3); // 只处理mask图像像素值等于mask值的像素点 if (maskImg[mIdx] == mask) { // 计算当前像素点的权重 wgh = gf[i] * gf[j]; // 当前像素点的权重累加到总权重中 wsum += wgh ; // 计算像素值加权累加和 c += wgh * origiImageGPU.imgMeta.imgData[mIdx]; } } } // 计算平滑后像素值,结果四舍五入 gaussSmImageGPU.imgMeta.imgData[y * w + x] = 1.0 * c / wsum + 0.5f; } // 平滑窗口大小为5*5的,带mask高斯平滑函数实现 static __global__ void gauss5SmoothImage(ImageCuda origiImageGPU, ImageCuda gaussSmImageGPU, int smLocatX, int smLocatY, int smWidth, int smHeight, ImageCuda maskImageGPU, unsigned char mask) { // 获取pixel在原图像中的位置 int w = origiImageGPU.pitchBytes; int x = blockIdx.x * blockDim.x + threadIdx.x + smLocatX; int y = blockIdx.y * blockDim.y + threadIdx.y + smLocatY; // 如果mask图像像素值不等于mask则不处理 if (maskImageGPU.imgMeta.imgData[y * w + x] != mask) return ; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资 // 源,一方面防止由于段错误导致的程序崩溃。 if(x >= smLocatX + smWidth || y >= smLocatY + smHeight) return ; // 获取mask图像数据 unsigned char * maskImg = maskImageGPU.imgMeta.imgData; // 高斯平滑系数数组 int gf[5] = {1, 4, 6, 4, 1}; // 高斯卷积累加和 int c = 0; // 参加计算的像素点权重总和wsum,当前权重wgh int wsum = 0, wgh; // 图像像素索引 int mIdx; // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int i = 0; i < 5; i++){ // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int j = 0; j < 5; j++) { // 获取图像像素索引 mIdx=(y + i - 2) * w + (x + j - 2); // 只处理mask图像像素值等于mask值的像素点 if (maskImg[mIdx] == mask) { // 计算当前像素点的权重 wgh = gf[i] * gf[j]; // 当前像素点的权重累加到总权重中 wsum += wgh ; // 计算像素值加权累加和 c += wgh * origiImageGPU.imgMeta.imgData[mIdx]; } } } // 计算平滑后像素值,结果四舍五入 gaussSmImageGPU.imgMeta.imgData[y * w + x] = 1.0 * c / wsum + 0.5f; } // 平滑窗口大小为9*9的,带mask高斯平滑函数实现 static __global__ void gauss9SmoothImage(ImageCuda origiImageGPU, ImageCuda gaussSmImageGPU, int smLocatX, int smLocatY, int smWidth, int smHeight, ImageCuda maskImageGPU, unsigned char mask) { // 获取pixel在原图像中的位置 int w = origiImageGPU.pitchBytes; int x = blockIdx.x * blockDim.x + threadIdx.x + smLocatX; int y = blockIdx.y * blockDim.y + threadIdx.y + smLocatY; // 如果mask图像像素值不等于mask则不处理 if (maskImageGPU.imgMeta.imgData[y * w + x] != mask) return ; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资 // 源,一方面防止由于段错误导致的程序崩溃。 if(x >= smLocatX + smWidth || y >= smLocatY + smHeight) return ; // 获取mask图像数据 unsigned char * maskImg = maskImageGPU.imgMeta.imgData; // 高斯平滑系数数组 int gf[9] = {1, 8, 28, 56, 70, 56, 28, 8, 1}; // 高斯卷积累加和 int c = 0; // 参加计算的像素点权重总和wsum,当前权重wgh int wsum = 0, wgh; // 图像像素索引 int mIdx; // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int i = 0; i < 9; i++){ // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int j = 0; j < 9; j++) { // 获取图像像素索引 mIdx=(y + i - 4) * w + (x + j - 4); // 只处理mask图像像素值等于mask值的像素点 if (maskImg[mIdx] == mask) { // 计算当前像素点的权重 wgh = gf[i] * gf[j]; // 当前像素点的权重累加到总权重中 wsum += wgh ; // 计算像素值加权累加和 c += wgh * origiImageGPU.imgMeta.imgData[mIdx]; } } } // 计算平滑后像素值,结果四舍五入 gaussSmImageGPU.imgMeta.imgData[y * w + x] = 1.0 * c / wsum + 0.5f; } // 平滑窗口大小为11*11的,带mask高斯平滑函数实现 static __global__ void gauss11SmoothImage(ImageCuda origiImageGPU, ImageCuda gaussSmImageGPU, int smLocatX, int smLocatY, int smWidth, int smHeight, ImageCuda maskImageGPU, unsigned char mask) { // 获取pixel在原图像中的位置 int w = origiImageGPU.pitchBytes; int x = blockIdx.x * blockDim.x + threadIdx.x + smLocatX; int y = blockIdx.y * blockDim.y + threadIdx.y + smLocatY; // 如果mask图像像素值不等于mask则不处理 if (maskImageGPU.imgMeta.imgData[y * w + x] != mask) return ; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资 // 源,一方面防止由于段错误导致的程序崩溃。 if(x >= smLocatX + smWidth || y >= smLocatY + smHeight) return ; // 获取mask图像数据 unsigned char * maskImg = maskImageGPU.imgMeta.imgData; // 高斯平滑系数数组 int gf[11] = {1, 10, 45, 120, 210, 252, 210, 120, 45, 10, 1}; // 高斯卷积累加和 int c = 0; // 参加计算的像素点权重总和wsum,当前权重wgh int wsum = 0, wgh; // 图像像素索引 int mIdx; // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int i = 0; i < 11; i++) { // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int j = 0; j < 11; j++) { // 获取图像像素索引 mIdx=(y + i - 5) * w + (x + j - 5); // 只处理mask图像像素值等于mask值的像素点 if (maskImg[mIdx] == mask) { // 计算当前像素点的权重 wgh = gf[i] * gf[j]; // 当前像素点的权重累加到总权重中 wsum += wgh ; // 计算像素值加权累加和 c += wgh * origiImageGPU.imgMeta.imgData[mIdx]; } } } // 计算平滑后像素值,结果四舍五入 gaussSmImageGPU.imgMeta.imgData[y * w + x] = 1.0 * c / wsum + 0.5f; } // 平滑窗口大小为3*3的,带mask高斯平滑函数实现 static __global__ void gauss3SmoothImage(ImageCuda origiImageGPU, ImageCuda gaussSmImageGPU, int smLocatX, int smLocatY, int smWidth, int smHeight, ImageCuda maskImageGPU, unsigned char mask) { // 获取pixel在原图像中的位置 int w = origiImageGPU.pitchBytes; int x = blockIdx.x * blockDim.x + threadIdx.x + smLocatX; int y = blockIdx.y * blockDim.y + threadIdx.y + smLocatY; // 如果mask图像像素值不等于mask则不处理 if (maskImageGPU.imgMeta.imgData[y * w + x] != mask) return ; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资 // 源,一方面防止由于段错误导致的程序崩溃。 if(x >= smLocatX + smWidth || y >= smLocatY + smHeight) return ; // 获取mask图像数据 unsigned char * maskImg = maskImageGPU.imgMeta.imgData; // 高斯平滑系数数组 int gf[3] = {1, 2, 1}; // 高斯卷积累加和 int c = 0; // 参加计算的像素点权重总和wsum,当前权重wgh int wsum = 0, wgh; // 图像像素索引 int mIdx; // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int i = 0; i < 3; i++) { // 编译预处理,在编译阶段将循环展开,节约循环跳转时间 #pragma unroll for(int j = 0; j < 3; j++) { // 获取图像像素索引 mIdx=(y + i - 1) * w + (x + j - 1); // 只处理mask图像像素值等于mask值的像素点 if (maskImg[mIdx] == mask) { // 计算当前像素点的权重 wgh = gf[i] * gf[j]; // 当前像素点的权重累加到总权重中 wsum += wgh ; // 计算像素值加权累加和 c += wgh * origiImageGPU.imgMeta.imgData[mIdx]; } } } // 计算平滑后像素值,结果四舍五入 gaussSmImageGPU.imgMeta.imgData[y * w + x] = 1.0 * c / wsum + 0.5f; } // 普通高斯平滑函数 __host__ int GaussSmoothImage::gaussSmoothImage(Image* origiImage, int smWidth, int smHeight, int smLocatX, int smLocatY, int smWindowSize, Image* gaussSmImage) { // 局部变量,错误码。 int errcode; // 输入输出图像指针不能为空 if (origiImage == NULL || gaussSmImage == NULL) return NULL_POINTER; // 获取图像尺寸信息 int imgWidth = origiImage->width; int imgHeight = origiImage->height; // 图像小于平滑范围 if (imgWidth < smWidth || imgHeight < smHeight) return -11; // 平滑范围小于最大平滑窗口大小 if (smWidth < 11 || smHeight < 11) return -12; // 输入的平滑窗口大小不在处理范围之内 if (smWindowSize < 3 || smWindowSize > 11) return -13; // 平滑计算所涉及data位置或范围不能超出原始图像的物理范围, // 故应根据smWindowSize作适当调整。 int marginOff = (smWindowSize + 1) >> 1; int leftMargin = smLocatX - marginOff; int rightMargin = imgWidth - smLocatX - smWidth - marginOff; int topMargin = smLocatY - marginOff; int bottomMargin = imgHeight - smLocatY - smHeight - marginOff; // 平滑时将发生左侧出界 if (leftMargin < 0) { smLocatX -= leftMargin; smWidth += leftMargin; } // 平滑时将发生右侧出界 if (rightMargin < 0) { smWidth += rightMargin; } // 平滑宽度小于1 if (smWidth < 1) return -14; // 平滑时将发生上方出界 if (topMargin < 0) { smLocatY -= topMargin; smHeight += topMargin; } // 平滑时将发生下方出界 if (bottomMargin < 0) { smHeight += bottomMargin; } // 平滑高度小于1 if (smHeight < 1) return -15; // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(origiImage); if (errcode != NO_ERROR) { return errcode; } // 将输出图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(gaussSmImage); if (errcode != NO_ERROR) { return errcode; } // 提取输入图像。 ImageCuda origiImageGPU; errcode = ImageBasicOp::roiSubImage(origiImage, &origiImageGPU); if (errcode != NO_ERROR) { return errcode; } // 提取输出图像。 ImageCuda gaussSmImageGPU; errcode = ImageBasicOp::roiSubImage(gaussSmImage, &gaussSmImageGPU); if (errcode != NO_ERROR) { return errcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 gridSize,blockSize; blockSize.x = DEF_BLOCK_X; blockSize.y = DEF_BLOCK_Y; gridSize.x = (smWidth + blockSize.x - 1) / blockSize.x; gridSize.y = (smHeight + blockSize.y - 1) / blockSize.y; // 根据平滑窗口大小选择对应的核函数 // 按照委托方要求,顺序为7、5、9、11、3 switch (smWindowSize) { case 7: // 启动平滑窗口大小为7的核函数 gauss7SmoothImage<<<gridSize, blockSize>>>(origiImageGPU, gaussSmImageGPU, smLocatX, smLocatY, smWidth, smHeight); // 核函数出错 if (cudaGetLastError() != cudaSuccess) { return CUDA_ERROR; } break; case 5: // 启动平滑窗口大小为5的核函数 gauss5SmoothImage<<<gridSize, blockSize>>>(origiImageGPU, gaussSmImageGPU, smLocatX, smLocatY, smWidth, smHeight); // 核函数出错 if (cudaGetLastError() != cudaSuccess) { return CUDA_ERROR; } break; case 9: // 启动平滑窗口大小为9的核函数 gauss9SmoothImage<<<gridSize, blockSize>>>(origiImageGPU, gaussSmImageGPU, smLocatX, smLocatY, smWidth, smHeight); // 核函数出错 if (cudaGetLastError() != cudaSuccess) { return CUDA_ERROR; } break; case 11: // 启动平滑窗口大小为11的核函数 gauss11SmoothImage<<<gridSize, blockSize>>>(origiImageGPU, gaussSmImageGPU, smLocatX, smLocatY, smWidth, smHeight); // 核函数出错 if (cudaGetLastError() != cudaSuccess) { return CUDA_ERROR; } break; default: // 启动平滑窗口大小为3的核函数 gauss3SmoothImage<<<gridSize,blockSize>>>(origiImageGPU, gaussSmImageGPU, smLocatX, smLocatY, smWidth, smHeight); // 核函数出错 if (cudaGetLastError() != cudaSuccess) { return CUDA_ERROR; } break; } return NO_ERROR; } // 带mask的高斯平滑函数 __host__ int GaussSmoothImage::gaussSmoothImage(Image* origiImage, int smWidth, int smHeight, int smLocatX, int smLocatY, int smWindowSize, Image* gaussSmImage, Image* maskImage, unsigned char mask) { // 局部变量,错误码。 int errcode; // 获取图像尺寸信息 int imgWidth = origiImage->width; int imgHeight = origiImage->height; // 图像小于平滑范围 if (imgWidth < smWidth || imgHeight < smHeight) return -11; // 平滑范围小于最大平滑窗口 if (smWidth < 11 || smHeight < 11) return -12; // 输入的平滑窗口大小不在可处理范围之内 if (smWindowSize < 3 || smWindowSize > 11) return -13; // 平滑计算所涉及data位置或范围不能超出原始图像的物理范围, // 故应根据smWindowSize作适当调整。 int marginOff = (smWindowSize + 1) >> 1; int leftMargin = smLocatX - marginOff; int rightMargin = imgWidth - smLocatX - smWidth - marginOff; int topMargin = smLocatY - marginOff; int bottomMargin = imgHeight - smLocatY - smHeight - marginOff; // 平滑时将发生左侧出界 if (leftMargin < 0) { smLocatX -= leftMargin; smWidth += leftMargin; } // 平滑时将发生右侧出界 if (rightMargin < 0) { smWidth += rightMargin; } // 平滑宽度小于1 if (smWidth < 1) return -14; // 平滑时将发生上方出界 if (topMargin < 0) { smLocatY -= topMargin; smHeight += topMargin; } // 平滑时将发生下方出界 if (bottomMargin < 0) { smHeight += bottomMargin; } // 平滑高度小于1 if (smHeight < 1) return -15; // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(origiImage); if (errcode != NO_ERROR) { return errcode; } // 将输出图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(gaussSmImage); if (errcode != NO_ERROR) { return errcode; } // 将mask图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(maskImage); if (errcode != NO_ERROR) { return errcode; } // 提取输入图像。 ImageCuda origiImageGPU; errcode = ImageBasicOp::roiSubImage(origiImage, &origiImageGPU); if (errcode != NO_ERROR) { return errcode; } // 提取输出图像。 ImageCuda gaussSmImageGPU; errcode = ImageBasicOp::roiSubImage(gaussSmImage, &gaussSmImageGPU); if (errcode != NO_ERROR) { return errcode; } // 提取mask图像。 ImageCuda maskImageGPU; errcode = ImageBasicOp::roiSubImage(maskImage, &maskImageGPU); if (errcode != NO_ERROR) { return errcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 gridSize,blockSize; blockSize.x = DEF_BLOCK_X; blockSize.y = DEF_BLOCK_Y; gridSize.x = (smWidth + blockSize.x - 1) / blockSize.x; gridSize.y = (smHeight + blockSize.y - 1) / blockSize.y; // 根据平滑窗口大小选择对应的核函数 // 按照委托方要求,顺序为7、5、9、11、3 switch (smWindowSize) { case 7: // 启动平滑窗口大小为7的核函数 gauss7SmoothImage<<<gridSize, blockSize>>>(origiImageGPU, gaussSmImageGPU, smLocatX, smLocatY, smWidth, smHeight, maskImageGPU, mask); // 核函数出错 if (cudaGetLastError() != cudaSuccess) { return CUDA_ERROR; } break; case 5: // 启动平滑窗口大小为5的核函数 gauss5SmoothImage<<<gridSize, blockSize>>>(origiImageGPU, gaussSmImageGPU, smLocatX, smLocatY, smWidth, smHeight, maskImageGPU, mask); // 核函数出错 if (cudaGetLastError() != cudaSuccess) { return CUDA_ERROR; } break; case 9: // 每个窗口纵向线程块数目 gauss9SmoothImage<<<gridSize, blockSize>>>(origiImageGPU, gaussSmImageGPU, smLocatX, smLocatY, smWidth, smHeight, maskImageGPU, mask); // 核函数出错 if (cudaGetLastError() != cudaSuccess) { return CUDA_ERROR; } break; case 11: // 启动平滑窗口大小为11的核函数 gauss11SmoothImage<<<gridSize, blockSize>>>(origiImageGPU, gaussSmImageGPU, smLocatX, smLocatY, smWidth, smHeight, maskImageGPU, mask); // 核函数出错 if (cudaGetLastError() != cudaSuccess) { return CUDA_ERROR; } break; default: // 启动平滑窗口大小为3的核函数 gauss3SmoothImage<<<gridSize, blockSize>>>(origiImageGPU, gaussSmImageGPU, smLocatX, smLocatY, smWidth, smHeight, maskImageGPU, mask); // 核函数出错 if (cudaGetLastError() != cudaSuccess) { return CUDA_ERROR; } break; } return NO_ERROR; }
the_stack
#include "util.cuh" #include "operator.cuh" #include "rspmm.h" namespace at { // Memory & time efficient implementation of generalized spmm // Much of the code is inspired by GE-SpMM // https://github.com/hgyhungry/ge-spmm namespace { const int kCoarseningFactor = 2; const int kThreadPerBlock = 256; } // namespace anonymous template <class scalar_t, class NaryOp, class BinaryOp> __global__ void rspmm_forward_out_cuda(const int64_t *row_ptr, const int64_t *col_ind, const int64_t *layer_ind, const scalar_t *value, const scalar_t *relation, const scalar_t *input, scalar_t *output, int64_t num_row, int64_t nnz, int64_t dim) { // for best optimization, the following code is compiled with constant warpSize assert(blockDim.x == warpSize); extern __shared__ int64_t buffer[]; int64_t *col_ind_buf = buffer; int64_t *layer_ind_buf = buffer + blockDim.y * warpSize; scalar_t *value_buf = reinterpret_cast<scalar_t *>(layer_ind_buf + blockDim.y * warpSize); col_ind_buf += threadIdx.y * warpSize; layer_ind_buf += threadIdx.y * warpSize; value_buf += threadIdx.y * warpSize; int64_t row = blockIdx.x * blockDim.y + threadIdx.y; if (row >= num_row) return; int64_t d_start = blockIdx.y * warpSize * kCoarseningFactor + threadIdx.x; int64_t ptr_start = row_ptr[row]; int64_t ptr_end = row + 1 < num_row ? row_ptr[row + 1] : nnz; scalar_t out[kCoarseningFactor]; #pragma unroll for (int64_t i = 0; i < kCoarseningFactor; i++) out[i] = NaryOp::zero; for (int64_t block_ptr = ptr_start; block_ptr < ptr_end; block_ptr += warpSize) { int64_t ptr = block_ptr + threadIdx.x; if (ptr < ptr_end) { col_ind_buf[threadIdx.x] = col_ind[ptr]; layer_ind_buf[threadIdx.x] = layer_ind[ptr]; value_buf[threadIdx.x] = value[ptr]; } __syncwarp(); int64_t max_offset = warpSize < ptr_end - block_ptr ? warpSize : ptr_end - block_ptr; for (int64_t offset_ptr = 0; offset_ptr < max_offset; offset_ptr++) { int64_t col = col_ind_buf[offset_ptr]; int64_t layer = layer_ind_buf[offset_ptr]; scalar_t val = value_buf[offset_ptr]; #pragma unroll for (int64_t i = 0; i < kCoarseningFactor; i++) { int64_t d = d_start + i * warpSize; if (d >= dim) break; scalar_t x = BinaryOp::forward(relation[layer * dim + d], input[col * dim + d]); scalar_t y = val * x; out[i] = NaryOp::forward(out[i], y); } } __syncwarp(); } #pragma unroll for (int64_t i = 0; i < kCoarseningFactor; i++) { int64_t d = d_start + i * warpSize; if (d >= dim) break; output[row * dim + d] = out[i]; } } template <class scalar_t, class NaryOp, class BinaryOp> __global__ void rspmm_backward_out_cuda(const int64_t *row_ptr, const int64_t *col_ind, const int64_t *layer_ind, const scalar_t *value, const scalar_t *relation, const scalar_t *input, const scalar_t *output, const scalar_t *output_grad, scalar_t *value_grad, scalar_t *relation_grad, scalar_t *input_grad, int64_t num_row, int64_t nnz, int64_t dim) { // for best optimization, the following code is compiled with constant warpSize assert(blockDim.x == warpSize); extern __shared__ int64_t buffer[]; int64_t *col_ind_buf = buffer; int64_t *layer_ind_buf = col_ind_buf + blockDim.y * warpSize; scalar_t *value_buf = reinterpret_cast<scalar_t *>(layer_ind_buf + blockDim.y * warpSize); col_ind_buf += threadIdx.y * warpSize; layer_ind_buf += threadIdx.y * warpSize; value_buf += threadIdx.y * warpSize; int64_t row = blockIdx.x * blockDim.y + threadIdx.y; if (row >= num_row) return; int64_t d_start = blockIdx.y * warpSize * kCoarseningFactor + threadIdx.x; int64_t ptr_start = row_ptr[row]; int64_t ptr_end = row + 1 < num_row ? row_ptr[row + 1] : nnz; for (int64_t block_ptr = ptr_start; block_ptr < ptr_end; block_ptr += warpSize) { int64_t ptr = block_ptr + threadIdx.x; if (ptr < ptr_end) { col_ind_buf[threadIdx.x] = col_ind[ptr]; layer_ind_buf[threadIdx.x] = layer_ind[ptr]; value_buf[threadIdx.x] = value[ptr]; } __syncwarp(); int64_t max_offset = warpSize < ptr_end - block_ptr ? warpSize : ptr_end - block_ptr; for (int64_t offset_ptr = 0; offset_ptr < max_offset; offset_ptr++) { int64_t col = col_ind_buf[offset_ptr]; int64_t layer = layer_ind_buf[offset_ptr]; scalar_t val = value_buf[offset_ptr]; scalar_t val_grad = 0; #pragma unroll for (int64_t i = 0; i < kCoarseningFactor; i++) { int64_t d = d_start + i * warpSize; if (d >= dim) break; scalar_t rel = relation[layer * dim + d]; scalar_t in = input[col * dim + d]; scalar_t out = output[row * dim + d]; scalar_t out_grad = output_grad[row * dim + d]; scalar_t x = BinaryOp::forward(rel, in); scalar_t y = val * x; scalar_t dx_drel = BinaryOp::backward_lhs(rel, in); scalar_t dx_din = BinaryOp::backward_rhs(rel, in); scalar_t dout_dy = NaryOp::backward(out, y); scalar_t dy_dval = x; scalar_t dy_dx = val; val_grad += out_grad * dout_dy * dy_dval; atomicAdd(&relation_grad[layer * dim + d], out_grad * dout_dy * dy_dx * dx_drel); atomicAdd(&input_grad[col * dim + d], out_grad * dout_dy * dy_dx * dx_din); } val_grad = warp_reduce(val_grad); if (threadIdx.x == 0) atomicAdd(&value_grad[block_ptr + offset_ptr], val_grad); } __syncwarp(); } } // only relation & input require gradients template <class scalar_t, class NaryOp, class BinaryOp> __global__ void rspmm_backward_out_cuda(const int64_t *row_ptr, const int64_t *col_ind, const int64_t *layer_ind, const scalar_t *value, const scalar_t *relation, const scalar_t *input, const scalar_t *output, const scalar_t *output_grad, scalar_t *relation_grad, scalar_t *input_grad, int64_t num_row, int64_t nnz, int64_t dim) { // for best optimization, the following code is compiled with constant warpSize assert(blockDim.x == warpSize); extern __shared__ int64_t buffer[]; int64_t *col_ind_buf = buffer; int64_t *layer_ind_buf = col_ind_buf + blockDim.y * warpSize; scalar_t *value_buf = reinterpret_cast<scalar_t *>(layer_ind_buf + blockDim.y * warpSize); col_ind_buf += threadIdx.y * warpSize; layer_ind_buf += threadIdx.y * warpSize; value_buf += threadIdx.y * warpSize; int64_t row = blockIdx.x * blockDim.y + threadIdx.y; if (row >= num_row) return; int64_t d_start = blockIdx.y * warpSize * kCoarseningFactor + threadIdx.x; int64_t ptr_start = row_ptr[row]; int64_t ptr_end = row + 1 < num_row ? row_ptr[row + 1] : nnz; for (int64_t block_ptr = ptr_start; block_ptr < ptr_end; block_ptr += warpSize) { int64_t ptr = block_ptr + threadIdx.x; if (ptr < ptr_end) { col_ind_buf[threadIdx.x] = col_ind[ptr]; layer_ind_buf[threadIdx.x] = layer_ind[ptr]; value_buf[threadIdx.x] = value[ptr]; } __syncwarp(); int64_t max_offset = warpSize < ptr_end - block_ptr ? warpSize : ptr_end - block_ptr; for (int64_t offset_ptr = 0; offset_ptr < max_offset; offset_ptr++) { int64_t col = col_ind_buf[offset_ptr]; int64_t layer = layer_ind_buf[offset_ptr]; scalar_t val = value_buf[offset_ptr]; #pragma unroll for (int64_t i = 0; i < kCoarseningFactor; i++) { int64_t d = d_start + i * warpSize; if (d >= dim) break; scalar_t rel = relation[layer * dim + d]; scalar_t in = input[col * dim + d]; scalar_t out = output[row * dim + d]; scalar_t out_grad = output_grad[row * dim + d]; scalar_t x = BinaryOp::forward(rel, in); scalar_t y = val * x; scalar_t dx_drel = BinaryOp::backward_lhs(rel, in); scalar_t dx_din = BinaryOp::backward_rhs(rel, in); scalar_t dout_dy = NaryOp::backward(out, y); scalar_t dy_dx = val; atomicAdd(&relation_grad[layer * dim + d], out_grad * dout_dy * dy_dx * dx_drel); atomicAdd(&input_grad[col * dim + d], out_grad * dout_dy * dy_dx * dx_din); } } __syncwarp(); } } template <template<class> class NaryOp, template<class> class BinaryOp> Tensor rspmm_forward_cuda(const SparseTensor &sparse, const Tensor &relation_, const Tensor &input_) { constexpr const char *fn_name = "rspmm_forward_cuda"; TensorArg sparse_arg(sparse, "sparse", 1), relation_arg(relation_, "relation", 2), input_arg(input_, "input", 3); rspmm_forward_check(fn_name, sparse_arg, relation_arg, input_arg); checkAllSameGPU(fn_name, {sparse_arg, relation_arg, input_arg}); const Tensor relation = relation_.contiguous(); const Tensor input = input_.contiguous(); int64_t nnz = sparse._nnz(); int64_t dim = input.size(1); int64_t num_row = sparse.size(0); Tensor output = at::empty({num_row, dim}, input.options()); auto csr = coo2csr3d(sparse); Tensor row_ptr = std::get<0>(csr); Tensor col_ind = std::get<1>(csr); Tensor layer_ind = std::get<2>(csr); Tensor value = std::get<3>(csr); cudaSetDevice(input.get_device()); auto stream = at::cuda::getCurrentCUDAStream(); const int dim_per_block = 32; // warpSize const int num_dim_block = (dim + dim_per_block * kCoarseningFactor - 1) / (dim_per_block * kCoarseningFactor); const int row_per_block = kThreadPerBlock / dim_per_block; const int num_row_block = (num_row + row_per_block - 1) / row_per_block; AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "rspmm_forward_cuda", [&] { const int memory_size = kThreadPerBlock * (sizeof(int64_t) * 2 + sizeof(scalar_t)); rspmm_forward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>> <<<dim3(num_row_block, num_dim_block), dim3(dim_per_block, row_per_block), memory_size, stream>>>( row_ptr.data_ptr<int64_t>(), col_ind.data_ptr<int64_t>(), layer_ind.data_ptr<int64_t>(), value.data_ptr<scalar_t>(), relation.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), num_row, nnz, dim ); }); return output; } template <template<class> class NaryOp, template<class> class BinaryOp> std::tuple<SparseTensor, Tensor, Tensor> rspmm_backward_cuda( const SparseTensor &sparse, const Tensor &relation_, const Tensor &input_, const Tensor &output_, const Tensor &output_grad_) { constexpr const char *fn_name = "rspmm_backward_cuda"; TensorArg sparse_arg(sparse, "sparse", 1), relation_arg(relation_, "relation", 2), input_arg(input_, "input", 3), output_arg(output_, "output", 4), output_grad_arg(output_grad_, "output_grad", 5); rspmm_backward_check(fn_name, sparse_arg, relation_arg, input_arg, output_arg, output_grad_arg); checkAllSameGPU(fn_name, {sparse_arg, relation_arg, input_arg, output_arg, output_grad_arg}); const Tensor relation = relation_.contiguous(); const Tensor input = input_.contiguous(); const Tensor output = output_.contiguous(); const Tensor output_grad = output_grad_.contiguous(); int64_t nnz = sparse._nnz(); int64_t dim = input.size(1); int64_t num_row = sparse.size(0); Tensor value_grad = at::zeros_like(sparse.values()); Tensor relation_grad = at::zeros_like(relation); Tensor input_grad = at::zeros_like(input); SparseTensor sparse_grad = at::_sparse_coo_tensor_unsafe(sparse.indices(), value_grad, sparse.sizes()); auto csr = coo2csr3d(sparse); Tensor row_ptr = std::get<0>(csr); Tensor col_ind = std::get<1>(csr); Tensor layer_ind = std::get<2>(csr); Tensor value = std::get<3>(csr); cudaSetDevice(input.get_device()); auto stream = at::cuda::getCurrentCUDAStream(); const int dim_per_block = 32; // warpSize const int num_dim_block = (dim + dim_per_block * kCoarseningFactor - 1) / (dim_per_block * kCoarseningFactor); const int row_per_block = kThreadPerBlock / dim_per_block; const int num_row_block = (num_row + row_per_block - 1) / row_per_block; if (sparse.requires_grad()) AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "rspmm_backward_cuda", [&] { const int memory_size = kThreadPerBlock * (sizeof(int64_t) * 2 + sizeof(scalar_t)); rspmm_backward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>> <<<dim3(num_row_block, num_dim_block), dim3(dim_per_block, row_per_block), memory_size, stream>>>( row_ptr.data_ptr<int64_t>(), col_ind.data_ptr<int64_t>(), layer_ind.data_ptr<int64_t>(), value.data_ptr<scalar_t>(), relation.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), output_grad.data_ptr<scalar_t>(), value_grad.data_ptr<scalar_t>(), relation_grad.data_ptr<scalar_t>(), input_grad.data_ptr<scalar_t>(), num_row, nnz, dim ); }); else AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "rspmm_backward_cuda", [&] { const int memory_size = kThreadPerBlock * (sizeof(int64_t) * 2 + sizeof(scalar_t)); rspmm_backward_out_cuda<scalar_t, NaryOp<scalar_t>, BinaryOp<scalar_t>> <<<dim3(num_row_block, num_dim_block), dim3(dim_per_block, row_per_block), memory_size, stream>>>( row_ptr.data_ptr<int64_t>(), col_ind.data_ptr<int64_t>(), layer_ind.data_ptr<int64_t>(), value.data_ptr<scalar_t>(), relation.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), output_grad.data_ptr<scalar_t>(), relation_grad.data_ptr<scalar_t>(), input_grad.data_ptr<scalar_t>(), num_row, nnz, dim ); }); return std::make_tuple(sparse_grad, relation_grad, input_grad); } #define DECLARE_FORWARD_IMPL(ADD, MUL, NARYOP, BINARYOP) \ Tensor rspmm_##ADD##_##MUL##_forward_cuda( \ const SparseTensor &sparse, const Tensor &relation, const Tensor &input) { \ return rspmm_forward_cuda<NARYOP, BINARYOP>(sparse, relation, input); \ } #define DECLARE_BACKWARD_IMPL(ADD, MUL, NARYOP, BINARYOP) \ std::tuple<SparseTensor, Tensor, Tensor> rspmm_##ADD##_##MUL##_backward_cuda( \ const SparseTensor &sparse, const Tensor &relation, const Tensor &input, const Tensor &output, \ const Tensor &output_grad) { \ return rspmm_backward_cuda<NARYOP, BINARYOP>(sparse, relation, input, output, output_grad); \ } DECLARE_FORWARD_IMPL(add, mul, NaryAdd, BinaryMul) DECLARE_BACKWARD_IMPL(add, mul, NaryAdd, BinaryMul) DECLARE_FORWARD_IMPL(min, mul, NaryMin, BinaryMul) DECLARE_BACKWARD_IMPL(min, mul, NaryMin, BinaryMul) DECLARE_FORWARD_IMPL(max, mul, NaryMax, BinaryMul) DECLARE_BACKWARD_IMPL(max, mul, NaryMax, BinaryMul) DECLARE_FORWARD_IMPL(add, add, NaryAdd, BinaryAdd) DECLARE_BACKWARD_IMPL(add, add, NaryAdd, BinaryAdd) DECLARE_FORWARD_IMPL(min, add, NaryMin, BinaryAdd) DECLARE_BACKWARD_IMPL(min, add, NaryMin, BinaryAdd) DECLARE_FORWARD_IMPL(max, add, NaryMax, BinaryAdd) DECLARE_BACKWARD_IMPL(max, add, NaryMax, BinaryAdd) } // namespace at
the_stack
#pragma once #include <math/vector.h> #include <math/matrix.h> #include "config.h" #include "instrumentation.cuh" #include "fragment_shader_stage.cuh" #include "viewport.cuh" #include "bitmask.cuh" #include "rasterization_stage.cuh" #include <ptx_primitives.cuh> #include <cub/cub.cuh> template<int NUM_WARPS, int RUNS, FRAMEBUFFER_SYNC_METHOD SyncType = FRAMEBUFFER_SYNC_METHOD::NO_SYNC> struct FramebufferAccess; template<int NUM_WARPS, int RUNS> struct FramebufferAccess<NUM_WARPS, RUNS, FRAMEBUFFER_SYNC_METHOD::NO_SYNC> { struct SharedMemT { }; __device__ FramebufferAccess(SharedMemT& shared, int2 tile, int wip, int numWarps) { } template<typename F> __device__ static void access(SharedMemT& shared, int2 tile, bool threadneedsaccess, int wip, int run, int numWarps, F f) { Instrumentation::BlockObserver<13, 3> observer; if (threadneedsaccess) f(); } }; template<int NUM_WARPS, int RUNS> struct FramebufferAccess<NUM_WARPS, RUNS, FRAMEBUFFER_SYNC_METHOD::SYNC_ALL> { //whenever there is a need for any syncing, sync all, otherwise dont sync at all :) struct SharedMemT { unsigned int activetiles[NUM_WARPS]; volatile bool needsync; }; __device__ FramebufferAccess(SharedMemT& shared, int2 tile, int wip, int numWarps) { Instrumentation::BlockObserver<13, 3> observer; unsigned int myTileId = tile.x | (tile.y << 16); shared.activetiles[wip] = myTileId; shared.needsync = false; syncthreads(1, numWarps*WARP_SIZE); // figure out if there are any conflicts warps works on the same tile if (laneid() < wip) if (shared.activetiles[laneid()] == myTileId) shared.needsync = true; } template<typename F> __device__ void access(SharedMemT& shared, int2 tile, bool threadneedsaccess, int wip, int run, int numWarps, F f) { Instrumentation::BlockObserver<13, 3> observer; syncthreads(1, numWarps*WARP_SIZE); if (shared.needsync) { #pragma unroll for (int i = 0; i < NUM_WARPS; ++i) { if (i == wip) if (threadneedsaccess) f(); syncthreads(1, numWarps*WARP_SIZE); } } else if (threadneedsaccess) f(); } }; template<int NUM_WARPS, int RUNS> struct FramebufferAccess<NUM_WARPS, RUNS, FRAMEBUFFER_SYNC_METHOD::SYNC_FRAGMENTS> { //when starting the tile, figure out between which tiles we need to sync and then just sync them as we go along static_assert(NUM_WARPS <= 15, "The block synced access to the framebuffer cannot handle more than 15 warps"); struct SharedMemT { cub::WarpScan<int>::TempStorage scan_storage; unsigned int activetiles[NUM_WARPS]; int barrier[NUM_WARPS]; int count[NUM_WARPS]; int offset[NUM_WARPS]; }; __device__ FramebufferAccess(SharedMemT& shared, int2 tile, int wip, int numWarps) { Instrumentation::BlockObserver<13, 3> observer; unsigned int myTileId = tile.x | (tile.y << 16); shared.activetiles[wip] = myTileId; syncthreads(1, numWarps*WARP_SIZE); // figure out which other warps works on the same tile int active = 0, offset, count; if (laneid() < numWarps) active = (shared.activetiles[laneid()] == myTileId); // get overall number and when it is my turn cub::WarpScan<int>(shared.scan_storage).ExclusiveSum(active, offset, count); offset = __shfl_sync(~0U, offset, wip); //compute barrier id (id of the first warp) int barrier = 1 + __ffs(__ballot_sync(~0U, active)); //note that the id will start at 2 //in case we really have 15 warps, we have to make sure not to use the 17th barrier //this could only happen if the last one is alone, instead we just use the 15th another time //the 15th cannot be used by anyone else but the 14th warp, and if the 15th warp does not access //the same tile as the 14th, the 14th will only use an arrive on the 15th barrier, and so will //the 15th warp and two arrives with 32 threads each are fine too. static_assert(NUM_WARPS < 16, "nope"); if (NUM_WARPS == 15) barrier = min(barrier, 15); shared.barrier[wip] = barrier; shared.offset[wip] = offset; shared.count[wip] = count; } template<typename F> __device__ void access(SharedMemT& shared, int2 tile, bool threadneedsaccess, int wip, int run, int numWarps, F f) { Instrumentation::BlockObserver<13, 3> observer; syncthreads(1, numWarps*WARP_SIZE); int count = shared.count[wip]; int barrier = shared.barrier[wip]; int offset = shared.offset[wip]; //sequential access for every warp that wants to access for (int i = 0; i < offset; ++i) syncthreads(barrier, (count - i) * WARP_SIZE); if (threadneedsaccess) f(); arrive(barrier, (count - offset) * WARP_SIZE); } }; template<int NUM_WARPS, int RUNS> struct FramebufferAccess<NUM_WARPS, RUNS, FRAMEBUFFER_SYNC_METHOD::SUBTILE_SYNC> { //figure out when to sync between warps (i.e. on a subtile level, depending on the tile size: 64 fragments -> 2 tests) // and incorporate the information if threads want to write anything static_assert(NUM_WARPS <= 15, "The block synced access to the framebuffer cannot handle more than 15 warps"); struct SharedMemT { unsigned int activetiles[NUM_WARPS]; cub::WarpScan<int>::TempStorage scan_storage; }; __device__ FramebufferAccess(SharedMemT& shared, int2 tile, int wip, int numWarps) { } template<typename F> __device__ void access(SharedMemT& shared, int2 tile, bool threadneedsaccess, int wip, int run, int numWarps, F f) { Instrumentation::BlockObserver<13, 3> observer; unsigned int myTileId = (!__any_sync(~0U, threadneedsaccess)) ? 0xFFFFFFFFU : tile.x | (tile.y << 16); shared.activetiles[wip] = myTileId; syncthreads(1, numWarps*WARP_SIZE); // figure out which other warps works on the same tile int active = 0, offset, count; if (laneid() < numWarps) active = (shared.activetiles[laneid()] == myTileId); syncthreads(1, numWarps*WARP_SIZE); if (myTileId == 0xFFFFFFFFU) return; // get overall number and when it is my turn cub::WarpScan<int>(shared.scan_storage).ExclusiveSum(active, offset, count); offset = __shfl_sync(~0U, offset, wip); //compute barrier id (id of the first warp) int barrier = 1 + __ffs(__ballot_sync(~0U, active)); //note that the id will start at 2 //in case we really have 15 warps, we have to make sure not to use the 17th barrier //this could only happen if the last one is alone, instead we just use the 15th another time //the 15th cannot be used by anyone else but the 14th warp, and if the 15th warp does not access //the same tile as the 14th, the 14th will only use an arrive on the 15th barrier, and so will //the 15th warp and two arrives with 32 threads each are fine too. static_assert(NUM_WARPS < 16, "nope"); if (NUM_WARPS == 15) barrier = min(barrier, 15); //sequential access for every warp that wants to access for (int i = 0; i < count; ++i) { if (i == offset) { //it's this warp's turn if (threadneedsaccess) f(); arrive(barrier, (count - i) * WARP_SIZE); break; } else syncthreads(barrier, (count - i) * WARP_SIZE); } } }; template<int NUM_WARPS, int RUNS> struct FramebufferAccess<NUM_WARPS, RUNS, FRAMEBUFFER_SYNC_METHOD::MASK_SYNC> { //for every shared tile, we keep a bitmask of active threads and sync as long as we need to serialize the individual accesses struct SharedMemT { union { unsigned int activetiles[NUM_WARPS]; unsigned int bitmasks[NUM_WARPS]; }; int mymask[NUM_WARPS]; }; __device__ FramebufferAccess(SharedMemT& shared, int2 tile, int wip, int numWarps) { Instrumentation::BlockObserver<13, 3> observer; unsigned int myTileId = tile.x | (tile.y << 16); shared.activetiles[wip] = myTileId; syncthreads(1, numWarps*WARP_SIZE); // figure out which other warps works on the same tile int active = 0; if (laneid() < numWarps) active = (shared.activetiles[laneid()] == myTileId); //compute id of used bitmask shared.mymask[wip] = __ffs(__ballot_sync(~0U, active)) - 1; } template<typename F> __device__ void access(SharedMemT& shared, int2 tile, bool threadneedsaccess, int wip, int run, int numWarps, F f) { Instrumentation::BlockObserver<13, 3> observer; unsigned int myBit = 1 << laneid(); syncthreads(1, numWarps*WARP_SIZE); shared.bitmasks[wip] = 0; unsigned int warp_mask = __ballot_sync(~0U, threadneedsaccess); while (syncthreads_or(warp_mask, 1, numWarps*WARP_SIZE)) { unsigned int free; if (laneid() == 0) free = atomicOr(shared.bitmasks + shared.mymask[wip], warp_mask); free = __shfl_sync(~0U, ~free, 0); //if ((warp_mask & free) == 0) if (warp_mask & myBit & free) { //if (threadneedsaccess) f(); //warp_mask = 0; threadneedsaccess = false; } warp_mask = __ballot_sync(~0U, threadneedsaccess); syncthreads(1, numWarps*WARP_SIZE); shared.bitmasks[wip] = 0; } } }; template<int NUM_WARPS, int RUNS> struct FramebufferAccess<NUM_WARPS, RUNS, FRAMEBUFFER_SYNC_METHOD::POLLED_MASK_SYNC> { //for every shared tile, we keep a bitmask of active threads and lock via polling on a per thread basis struct SharedMemT { union { unsigned int activetiles[NUM_WARPS]; unsigned int bitmasks[NUM_WARPS*RUNS]; }; int mymask[NUM_WARPS]; }; __device__ FramebufferAccess(SharedMemT& shared, int2 tile, int wip, int numWarps) { Instrumentation::BlockObserver<13, 3> observer; unsigned int myTileId = tile.x | (tile.y << 16); shared.activetiles[wip] = myTileId; syncthreads(1, numWarps*WARP_SIZE); // figure out which other warps works on the same tile int active = 0; if (laneid() < numWarps) active = (shared.activetiles[laneid()] == myTileId); //compute id of used bitmask shared.mymask[wip] = RUNS*(__ffs(__ballot_sync(~0U, active)) - 1); for (int i = 0; i < RUNS; ++i) shared.bitmasks[NUM_WARPS*wip + i] = 0; syncthreads(1, numWarps*WARP_SIZE); } template<typename F> __device__ void access(SharedMemT& shared, int2 tile, bool threadneedsaccess, int wip, int run, int numWarps, F f) { Instrumentation::BlockObserver<13, 3> observer; unsigned int myBit = 1 << laneid(); unsigned int warp_mask = __ballot_sync(~0U, threadneedsaccess); while (warp_mask) { unsigned int free; if (laneid() == 0) free = atomicOr(shared.bitmasks + shared.mymask[wip] + run, warp_mask); free = __shfl_sync(~0U, ~free, 0); if (warp_mask & myBit & free) { f(); threadneedsaccess = false; } __threadfence_block(); if (laneid() == 0) atomicAnd(shared.bitmasks + shared.mymask[wip] + run, free & warp_mask); warp_mask = __ballot_sync(~0U, threadneedsaccess); } } }; template <unsigned int NUM_WARPS, bool SYNC_ACCESS, class BinTileSpace, class CoverageShader, class FragmentShader, class FrameBuffer, class BlendOp> class TileRasterizer { private: using FragmentShaderInputs = typename ::FragmentShaderInfo<FragmentShader>::Inputs; public: typedef ::TileBitMask<BinTileSpace> TileBitMask; typedef FramebufferAccess<NUM_WARPS, (BinTileSpace::StampsPerTileX*BinTileSpace::StampsPerTileY + WARP_SIZE - 1) / WARP_SIZE, SYNC_ACCESS ? TILE_RASTER_EXCLUSIVE_ACCESS_METHOD : FRAMEBUFFER_SYNC_METHOD::NO_SYNC> FBAccess; struct Triangle { struct { __align__(16) math::float3x3 M; __align__(16) math::float3 uz; FragmentShaderInputStorage<FragmentShaderInputs> fs_input; }; }; struct SharedMemT { Triangle triangles[NUM_WARPS]; FBAccess::SharedMemT fbaccess_shared; }; __device__ static void run(SharedMemT& shared_memory, int tilebit, int triangleId, int binX, int binY, int numTiles) { int2 tile_coords, global_tile; int wip = threadIdx.x / WARP_SIZE; Triangle& mytriangle = shared_memory.triangles[wip]; { Instrumentation::BlockObserver<6, 2> observer; // find tile my tile //int2 tile_coords = binbitMask.getBitCoordsWarp(tile_offset); tile_coords = TileBitMask::bitToCoord(tilebit); global_tile = BinTileSpace::tileCoords(make_int2(binX, binY), tile_coords); //if (laneid() == 0 && (tile_coords_comp.x != tile_coords.x || tile_coords_comp.y != tile_coords.y)) // printf("not the same: %d %d vs %d %d\n", tile_coords.x, tile_coords.y, tile_coords_comp.x, tile_coords_comp.y); // load triangle to shared triangle_buffer.loadTriangleWarp(triangleId, &mytriangle.M, &mytriangle.uz); mytriangle.fs_input.loadWarp(triangle_buffer, triangleId); __threadfence_block(); } FBAccess fba(shared_memory.fbaccess_shared, global_tile, wip, numTiles); BinTileSpace::traverseStampsWarp(make_int2(binX, binY), tile_coords, [&](int stamp, int tile_start_x, int tile_start_y, int x, int y, int part) { math::float3 p; float f1, f2, f3; { Instrumentation::BlockObserver<6, 2> observer; p = clipcoordsFromRaster(x, y); f1 = dot(mytriangle.M.row1(), p); f2 = dot(mytriangle.M.row2(), p); f3 = dot(mytriangle.M.row3(), p); } float z = -1.0f; math::float4 color; bool write = false; { Instrumentation::BlockObserver<13, 2> observer; if (DRAW_BOUNDING_BOX) { if (f1 >= 0.0f && f2 >= 0.0f && f3 >= 0.0f) { z = abs(dot(mytriangle.uz, p));// *0.5f + 0.5f; color = math::float4(z, z, z , 1.0f); write = z >= -1.0f && z < 1.0f; } else { color = math::float4(0, 0, 0, 1.0f); write = true; } } else { if (f1 >= 0.0f && f2 >= 0.0f && f3 >= 0.0f) { z = dot(mytriangle.uz, p); if (z >= -1.0f && z <= 1.0f) // clipping! { { math::float3 uw = math::float3(1.0f, 1.0f, 1.0f) * mytriangle.M; float rcpw = dot(uw, p); float w = 1.0f / rcpw; math::float3 u = math::float3(f1 * w, f2 * w, f3 * w); FragmentShader shader { { x, y }, { p.x, p.y, z, rcpw }, u, { f1 / length(mytriangle.M.row1().xy()), f2 / length(mytriangle.M.row2().xy()), f3 / length(mytriangle.M.row3().xy()) }, triangleId }; color = callFragmentShader(shader, mytriangle.fs_input, u); write = !shader.discarded(); } } } } } fba.access(shared_memory.fbaccess_shared, make_int2(tile_start_x, tile_start_y), write, wip, part, numTiles, [write, x, y, z, color]() { if (write) { float z_dest = FrameBuffer::readDepth(x, y); if (!DEPTH_TEST || z < z_dest) { if (DEPTH_WRITE) FrameBuffer::writeDepth(x, y, z); FrameBuffer::template writeColor<BlendOp>(x, y, color); } } }); }); } }; #endif // INCLUDED_CURE_TILE_RASTERIZER
the_stack
//------------------------------------------------------------------------------ // block_apply_chunk macro //------------------------------------------------------------------------------ // A = A - V*T'*V'*A, for a single chunk of N columns of A, starting at column // j1 and ending at j1+N-1. // // This function uses fixed thread geometry and loop unrolling, which requires // the geometry to be known at compile time for best efficiency. It is then // #include'd by the block_apply_x function (block_apply.cu). The following // terms are #define'd by each specific version: // // ROW_PANELSIZE # of row tiles in V and A // COL_PANELSIZE # of column tiles in C and A // CBITTYROWS # of rows in the C bitty block // CBITTYCOLS # of cols in the C bitty block // ABITTYROWS # of rows in the A bitty block // ABITTYCOLS # of cols in the A bitty block // // The C bitty must cannot be larger than the A bitty block, since additional // registers are used to buffer the A matrix while the C bitty block is being // computed. These buffer registers are not used while computing with the A // bitty block, so for some variants of this kernel, they can be overlapped // with the A bitty block. // // The ROW_PANELSIZE, COL_PANELSIZE, ROW_EDGE_CASE, and COL_EDGE_CASE are // #define'd by the parent file(s) that include this file. The *_EDGE_CASE // macros are then #undefined here. The bitty block dimensions are defined // below. This file is #include'd into block_apply.cu. It is not a standalone // function. { //-------------------------------------------------------------------------- // bitty block sizes //-------------------------------------------------------------------------- #if (ROW_PANELSIZE == 3) #if (COL_PANELSIZE == 2) //------------------------------------------------------------------ // 3-by-2 block apply //------------------------------------------------------------------ // V is 3-by-1, C is 1-by-2, A is 3-by-2 (in # tiles) // 256 threads, each does a 4-by-2 block of C = T'*V'*A #define CBITTYROWS 4 #define CBITTYCOLS 2 // 384 threads, each does a 4-by-4 block of A = A-V*C #define ABITTYROWS 4 #define ABITTYCOLS 4 #else //------------------------------------------------------------------ // 3-by-1 block apply //------------------------------------------------------------------ // V is 3-by-1, C is 1-by-1, A is 3-by-1 (in # tiles) // 256 threads, each does a 2-by-2 block of C = T'*V'*A #define CBITTYROWS 2 #define CBITTYCOLS 2 // 384 threads, each does a 2-by-4 block of A = A-V*C #define ABITTYROWS 2 #define ABITTYCOLS 4 #endif #elif (ROW_PANELSIZE == 2) #if (COL_PANELSIZE == 2) //------------------------------------------------------------------ // block_apply_2_by_2 //------------------------------------------------------------------ // V is 2-by-1, C is 1-by-2, A is 2-by-2 (in # tiles) // 256 threads, each does a 4-by-2 block of C = T'*V'*A #define CBITTYROWS 4 #define CBITTYCOLS 2 // 256 threads, each does a 4-by-4 block of A = A-V*C #define ABITTYROWS 4 #define ABITTYCOLS 4 #else //------------------------------------------------------------------ // block_apply_2_by_1 //------------------------------------------------------------------ // V is 2-by-1, C is 1-by-1, A is 2-by-1 (in # tiles) // 256 threads, each does a 2-by-2 block of C = T'*V'*A #define CBITTYROWS 2 #define CBITTYCOLS 2 // 256 threads, each does a 2-by-4 block of A = A-V*C #define ABITTYROWS 2 #define ABITTYCOLS 4 #endif #else #if (COL_PANELSIZE == 2) //------------------------------------------------------------------ // block_apply_1_by_2 //------------------------------------------------------------------ // V is 1-by-1, C is 1-by-2, A is 1-by-2 (in # tiles) // 256 threads, each does a 4-by-2 block of C = T'*V'*A #define CBITTYROWS 2 #define CBITTYCOLS 4 // 256 threads, each does a 4-by-2 block of A = A-V*C #define ABITTYROWS 2 #define ABITTYCOLS 4 #else //------------------------------------------------------------------ // block_apply_1_by_1 //------------------------------------------------------------------ // V is 1-by-1, C is 1-by-1, A is 1-by-1 (in # tiles) // 256 threads, each does a 2-by-2 block of C = T'*V'*A #define CBITTYROWS 2 #define CBITTYCOLS 2 // 256 threads, each does a 2-by-2 block of A = A-V*C #define ABITTYROWS 2 #define ABITTYCOLS 2 #endif #endif //-------------------------------------------------------------------------- // matrix sizes and thread geometry //-------------------------------------------------------------------------- // For each outer iteration, C is M-by-N, V is (K+1)-by-M (with an extra // row for T), and A is K-by-N. #define K (ROW_PANELSIZE * M) #define N (COL_PANELSIZE * M) // threads to use for C=T'*(V'*A) #define CTHREADS ((M * N) / (CBITTYROWS * CBITTYCOLS)) // threads to use for A=A-V*C #define ATHREADS ((K * N) / (ABITTYROWS * ABITTYCOLS)) //-------------------------------------------------------------------------- // bitty blocks for the computation //-------------------------------------------------------------------------- // Each thread owns a bitty block of C for C=T'*V'*A. The top left entry // owned by a thread is C(ic,jc). Thread 0 does C(0,0), thread 1 does // C(1,0) ... #define ic (threadIdx.x % (M/CBITTYROWS)) #define jc (threadIdx.x / (M/CBITTYROWS)) #define MYCBITTYROW(ii) (ii * (M/CBITTYROWS) + ic) #define MYCBITTYCOL(jj) (jj * (N/CBITTYCOLS) + jc) // Each thread owns a bitty block of A for A=A-V*C, with top left entry // A(ia,ja). Thread 0 does A(0,0), thread 1 does A(0,1), thread 2 does // A(0,2), ... so that global memory loads/stores are coallesced across a // warp. #define ia (threadIdx.x / (N/ABITTYCOLS)) #define ja (threadIdx.x % (N/ABITTYCOLS)) #define MYABITTYROW(ii) (ii * (K/ABITTYROWS) + ia) #define MYABITTYCOL(jj) (jj * (N/ABITTYCOLS) + ja) //-------------------------------------------------------------------------- // loading the A matrix //-------------------------------------------------------------------------- // Each thread loads a set of entries of A defined by iaload and jaload. // The first entry loaded by a thread is A(iaload,jaload), and then it // loads entries every ACHUNKSIZE rows after that (in the same column // jaload). #define iaload (threadIdx.x / N) #define jaload (threadIdx.x % N) #define ACHUNKSIZE (NUMTHREADS / N) #define NACHUNKS CEIL (HALFTILE*N, NUMTHREADS) int fjload = j1 + jaload ; //-------------------------------------------------------------------------- // register allocation //-------------------------------------------------------------------------- // C bitty block is no larger than the A bitty block, in both dimensions. double rbit [ABITTYROWS][ABITTYCOLS] ; double rrow [ABITTYROWS] ; double rcol [ABITTYCOLS] ; #if (CBITTYCOLS == ABITTYCOLS) // the A bitty block is too small to hold the A buffer double abuffer [NACHUNKS] ; #define rbitA(i) abuffer [i] #else // use the last column of the A bitty block for the A buffer #define rbitA(i) (rbit [i][ABITTYCOLS-1]) #endif //-------------------------------------------------------------------------- // edge case //-------------------------------------------------------------------------- #ifdef ROW_EDGE_CASE // check if a row is inside the front. #define INSIDE_ROW(test) (test) #else // the row is guaranteed to reside inside the frontal matrix. #define INSIDE_ROW(test) (1) #endif #ifdef COL_EDGE_CASE // check if a column is inside the front. #define INSIDE_COL(test) (test) #else // the column is guaranteed to reside inside the frontal matrix. #define INSIDE_COL(test) (1) #endif bool aloader = INSIDE_COL (fjload < fn) ; //-------------------------------------------------------------------------- // C = V'*A, where V is now in shared, and A is loaded from global //-------------------------------------------------------------------------- // prefetch the first halftile of A from global to register #pragma unroll for (int ii = 0 ; ii < NACHUNKS ; ii++) { rbitA (ii) = 0 ; } #pragma unroll for (int ii = 0 ; ii < NACHUNKS ; ii++) { int i = ii * ACHUNKSIZE + iaload ; if (ii < NACHUNKS-1 || i < HALFTILE) { int fi = IFRONT (0, i) ; if (aloader && INSIDE_ROW (fi < fm)) { rbitA (ii) = glF [fi * fn + fjload] ; } } } // The X=V*C computation in the prior iteration reads shC, but the same // space is used to load A from the frontal matrix in this iteration. __syncthreads ( ) ; // clear the C bitty block #pragma unroll for (int ii = 0 ; ii < CBITTYROWS ; ii++) { #pragma unroll for (int jj = 0 ; jj < CBITTYCOLS ; jj++) { rbit [ii][jj] = 0 ; } } // C=V'*A for the first tile of V, which is lower triangular #define FIRST_TILE #include "cevta_tile.cu" // Subsequent tiles of V are square. Result is in C bitty block. for (int t = 1 ; t < ROW_PANELSIZE ; t++) { #include "cevta_tile.cu" } //-------------------------------------------------------------------------- // write result of C=V'*A into shared, and clear the C bitty block //-------------------------------------------------------------------------- if (CTHREADS == NUMTHREADS || threadIdx.x < CTHREADS) { #pragma unroll for (int ii = 0 ; ii < CBITTYROWS ; ii++) { int i = MYCBITTYROW (ii) ; #pragma unroll for (int jj = 0 ; jj < CBITTYCOLS ; jj++) { int j = MYCBITTYCOL (jj) ; shC [i][j] = rbit [ii][jj] ; rbit [ii][jj] = 0 ; } } } // make sure all of shC is available to all threads __syncthreads ( ) ; //-------------------------------------------------------------------------- // C = triu(T)'*C, leaving the result in the C bitty block //-------------------------------------------------------------------------- if (CTHREADS == NUMTHREADS || threadIdx.x < CTHREADS) { #pragma unroll for (int i = 0 ; i < M ; i++) { #pragma unroll for (int ii = 0 ; ii < CBITTYROWS ; ii++) { int j = MYCBITTYROW (ii) ; if (i <= j) { rrow [ii] = ST (i,j) ; } } #pragma unroll for (int jj = 0 ; jj < CBITTYCOLS ; jj++) { int j = MYCBITTYCOL (jj) ; rcol [jj] = shC [i][j] ; } #pragma unroll for (int ii = 0 ; ii < CBITTYROWS ; ii++) { int j = MYCBITTYROW (ii) ; if (i <= j) { #pragma unroll for (int jj = 0 ; jj < CBITTYCOLS ; jj++) { rbit [ii][jj] += rrow [ii] * rcol [jj] ; } } } } } // We need syncthreads here because of the write-after-read hazard. Each // thread reads the old C, above, and then C is modified below with the new // C, where newC = triu(T)'*oldC. __syncthreads ( ) ; //-------------------------------------------------------------------------- // write the result of C = T'*C to shared memory //-------------------------------------------------------------------------- if (CTHREADS == NUMTHREADS || threadIdx.x < CTHREADS) { #pragma unroll for (int ii = 0 ; ii < CBITTYROWS ; ii++) { int i = MYCBITTYROW (ii) ; #pragma unroll for (int jj = 0 ; jj < CBITTYCOLS ; jj++) { int j = MYCBITTYCOL (jj) ; shC [i][j] = rbit [ii][jj] ; } } } // All threads come here. We need a syncthreads because // shC has been written above and must be read below in A=A-V*C. __syncthreads ( ) ; //-------------------------------------------------------------------------- // A = A - V*C //-------------------------------------------------------------------------- if (ATHREADS == NUMTHREADS || threadIdx.x < ATHREADS) { //---------------------------------------------------------------------- // clear the A bitty block //---------------------------------------------------------------------- #pragma unroll for (int ii = 0 ; ii < ABITTYROWS ; ii++) { #pragma unroll for (int jj = 0 ; jj < ABITTYCOLS ; jj++) { rbit [ii][jj] = 0 ; } } //---------------------------------------------------------------------- // X = tril(V)*C, store result into register (rbit) //---------------------------------------------------------------------- #pragma unroll for (int p = 0 ; p < M ; p++) { #pragma unroll for (int ii = 0 ; ii < ABITTYROWS ; ii++) { int i = MYABITTYROW (ii) ; if (i >= p) { rrow [ii] = shV [1+i][p] ; } } #pragma unroll for (int jj = 0 ; jj < ABITTYCOLS ; jj++) { int j = MYABITTYCOL (jj) ; rcol [jj] = shC [p][j] ; } #pragma unroll for (int ii = 0 ; ii < ABITTYROWS ; ii++) { int i = MYABITTYROW (ii) ; if (i >= p) { #pragma unroll for (int jj = 0 ; jj < ABITTYCOLS ; jj++) { rbit [ii][jj] += rrow [ii] * rcol [jj] ; } } } } //---------------------------------------------------------------------- // A = A - X, which finalizes the computation A = A - V*(T'*(V'*A)) //---------------------------------------------------------------------- #if (COL_PANELSIZE == 2) #pragma unroll for (int ii = 0 ; ii < ABITTYROWS ; ii++) { int i = MYABITTYROW (ii) ; int fi = IFRONT (i / M, i % M) ; #pragma unroll for (int jj = 0 ; jj < ABITTYCOLS ; jj++) { int fj = j1 + MYABITTYCOL (jj) ; if (INSIDE_ROW (fi < fm) && INSIDE_COL (fj < fn)) { glF [fi * fn + fj] -= rbit [ii][jj] ; } } } #else #pragma unroll for (int ii = 0 ; ii < ABITTYROWS ; ii++) { int i = MYABITTYROW (ii) ; int fi = IFRONT (i / M, i % M) ; #pragma unroll for (int jj = 0 ; jj < ABITTYCOLS ; jj++) { int fj = j1 + MYABITTYCOL (jj) ; if (INSIDE_ROW (fi < fm) && INSIDE_COL (fj < fn)) { shV[i][MYABITTYCOL(jj)] = glF[fi*fn+fj] - rbit[ii][jj]; } else { shV[i][MYABITTYCOL(jj)] = 0.0; } } } #endif } //-------------------------------------------------------------------------- // sync //-------------------------------------------------------------------------- // The X=V*C computation in this iteration reads shC, but the same space is // used to load A from the frontal matrix in C=V'*A in the next iteration. // This final sync also ensures that all threads finish the block_apply // at the same time. Thus, no syncthreads is needed at the start of a // subsequent function (the pipelined apply+factorize, for example). __syncthreads ( ) ; } //------------------------------------------------------------------------------ // undef's //------------------------------------------------------------------------------ // The following #define's appear above. Note that FIRST_TILE is not #undef'd // since that is done by cevta_tile.cu. #undef CBITTYROWS #undef CBITTYCOLS #undef ABITTYROWS #undef ABITTYCOLS #undef K #undef N #undef CTHREADS #undef ATHREADS #undef ic #undef jc #undef MYCBITTYROW #undef MYCBITTYCOL #undef ia #undef ja #undef MYABITTYROW #undef MYABITTYCOL #undef iaload #undef jaload #undef ACHUNKSIZE #undef NACHUNKS #undef rbitA #undef INSIDE_ROW #undef INSIDE_COL // Defined in the parent file that includes this one. Note that ROW_PANELSIZE // is not #undef'd, since that is done in the parent. #undef ROW_EDGE_CASE #undef COL_EDGE_CASE
the_stack
namespace cgbn { template<class env> __device__ __forceinline__ void core_t<env>::mont_mul(uint32_t r[LIMBS], const uint32_t a[LIMBS], const uint32_t b[LIMBS], const uint32_t n[LIMBS], const uint32_t np0) { uint32_t sync=sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t u1, ru[LIMBS], r1=0, ra[LIMBS], c=0, t, q; uint64_t sum; mpzero<LIMBS>(ra); mpzero<LIMBS>(ru); #pragma nounroll for(int32_t thread=0;thread<TPI;thread++) { #pragma unroll for(int32_t word=0;word<LIMBS;word++) { t=__shfl_sync(sync, b[word], thread, TPI); chain_t<LIMBS+1> chain1; #pragma unroll for(int32_t index=0;index<LIMBS;index++) ru[index]=chain1.xmadlh(a[index], t, ru[index]); chain_t<LIMBS+1> chain2; #pragma unroll for(int32_t index=0;index<LIMBS;index++) ru[index]=chain2.xmadhl(a[index], t, ru[index]); u1=chain2.add(0, 0); chain_t<LIMBS+1> chain3; #pragma unroll for(int32_t index=0;index<LIMBS;index++) ra[index]=chain3.xmadll(a[index], t, ra[index]); r1=chain3.add(r1, 0); chain_t<LIMBS> chain4; #pragma unroll for(int32_t index=0;index<LIMBS-1;index++) ra[index+1]=chain4.xmadhh(a[index], t, ra[index+1]); r1=chain4.xmadhh(a[LIMBS-1], t, r1); // split u[0] and add it into r t=ru[0]<<16; ra[0]=add_cc(ra[0], t); t=ru[0]>>16; c=addc(c, t); q=__shfl_sync(sync, ra[0], 0, TPI)*np0; // skip u[0] chain_t<LIMBS> chain5; #pragma unroll for(int32_t index=1;index<LIMBS;index++) ru[index]=chain5.xmadlh(n[index], q, ru[index]); u1=chain5.add(u1, 0); // skip u[0], shift chain_t<LIMBS> chain6; #pragma unroll for(int32_t index=1;index<LIMBS;index++) ru[index-1]=chain6.xmadhl(n[index], q, ru[index]); ru[LIMBS-1]=chain6.add(u1, 0); // push the carry along ra[1]=add_cc(ra[1], c); c=addc(0, 0); // handles four XMADs for the q * n0 terms ra[0]=madlo_cc(n[0], q, ra[0]); ra[1]=madhic_cc(n[0], q, ra[1]); c=addc(c, 0); t=__shfl_sync(sync, ra[0], threadIdx.x+1, TPI); chain_t<LIMBS> chain7; #pragma unroll for(int32_t index=1;index<LIMBS;index++) ra[index]=chain7.xmadll(n[index], q, ra[index]); r1=chain7.add(r1, 0); ra[0]=ra[1]; chain_t<LIMBS> chain8; // should be limbs-1 #pragma unroll for(int32_t index=1;index<LIMBS-1;index++) ra[index]=chain8.xmadhh(n[index], q, ra[index+1]); ra[LIMBS-1]=chain8.xmadhh(n[LIMBS-1], q, 0); sum=((uint64_t)ra[LIMBS-1]) + ((uint64_t)r1) + ((uint64_t)t); ra[LIMBS-1]=sum; r1=sum>>32; } } #pragma unroll for(int32_t index=LIMBS-1;index>0;index--) ru[index]=__byte_perm(ru[index-1], ru[index], 0x5432); ru[0]=__byte_perm(0, ru[0], 0x5432); chain_t<LIMBS+1> chain10; for(int32_t index=0;index<LIMBS;index++) r[index]=chain10.add(ra[index], ru[index]); r1=chain10.add(r1, 0); // r1:r0 <= 0x00000002 0xFFFFFFFD t=__shfl_up_sync(sync, r1, 1, TPI); // all but most significant thread clears r1 if(group_thread!=TPI-1) r1=0; if(group_thread==0) t=0; chain_t<LIMBS+1> chain11; r[0]=chain11.add(r[0], t); r[1]=chain11.add(r[1], c); #pragma unroll for(int32_t index=2;index<LIMBS;index++) r[index]=chain11.add(r[index], 0); c=chain11.add(r1, 0); c=-fast_propagate_add(c, r); // compute -n t=n[0]-(group_thread==0); // n must be odd, so there is no chance for a carry ripple chain_t<LIMBS+1> chain12; r[0]=chain12.add(r[0], ~t & c); #pragma unroll for(int32_t index=1;index<LIMBS;index++) r[index]=chain12.add(r[index], ~n[index] & c); c=chain12.add(0, 0); fast_propagate_add(c, r); clear_padding(r); } template<class env> __device__ __forceinline__ void core_t<env>::mont_reduce_wide(uint32_t r[LIMBS], const uint32_t lo[LIMBS], const uint32_t hi[LIMBS], const uint32_t n[LIMBS], const uint32_t np0, const bool zero) { uint32_t sync=sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t t0, t1, t, q, carry0, carry1, ru[LIMBS], ra[LIMBS], top; uint64_t sum; #pragma unroll for(int32_t index=0;index<LIMBS;index++) { ra[index]=lo[index]; ru[index]=0; } carry0=0; carry1=0; #pragma nounroll for(int32_t thread=0;thread<TPI;thread++) { #pragma unroll for(int32_t word=0;word<LIMBS;word++) { ra[0]=add_cc(ra[0], carry0); carry0=addc(0, 0); t=ra[0] + (ru[0]<<16); q=__shfl_sync(sync, t, 0, TPI)*np0; chain_t<LIMBS+1> chain1; #pragma unroll for(int32_t index=0;index<LIMBS;index++) ra[index]=chain1.xmadll(n[index], q, ra[index]); carry1=chain1.add(carry1, 0); chain_t<LIMBS> chain2; #pragma unroll for(int32_t index=0;index<LIMBS;index++) ru[index]=chain2.xmadhl(n[index], q, ru[index]); chain_t<LIMBS+1> chain3; t1=chain3.xmadlh(n[0], q, ru[0]); #pragma unroll for(int32_t index=1;index<LIMBS;index++) ru[index-1]=chain3.xmadlh(n[index], q, ru[index]); ru[LIMBS-1]=chain3.add(0, 0); chain_t<LIMBS> chain4; t0=ra[0]; #pragma unroll for(int32_t index=0;index<LIMBS-1;index++) ra[index]=chain4.xmadhh(n[index], q, ra[index+1]); ra[LIMBS-1]=chain4.xmadhh(n[LIMBS-1], q, 0); t=t1<<16; t0=add_cc(t0, t); t=t1>>16; carry0=addc(carry0, t); // shift right by 32 bits (top thread gets zero) t0=__shfl_sync(sync, t0, threadIdx.x+1, TPI); if(!zero) { top=__shfl_sync(sync, hi[word], thread, TPI); t0=(group_thread==TPI-1) ? top : t0; } sum=((uint64_t)ra[LIMBS-1]) + ((uint64_t)t0) + ((uint64_t)carry1); ra[LIMBS-1]=sum; carry1=sum>>32; } } #pragma unroll for(int32_t index=LIMBS-1;index>=1;index--) ru[index]=uleft_wrap(ru[index-1], ru[index], 16); ru[0]=ru[0]<<16; mpadd32<LIMBS>(ru, ru, carry0); chain_t<LIMBS+1> chain5; #pragma unroll for(int32_t index=0;index<LIMBS;index++) ra[index]=chain5.add(ra[index], ru[index]); carry1=chain5.add(carry1, 0); carry1=fast_propagate_add(carry1, ra); if(!zero && carry1!=0) { t=n[0]-(group_thread==0); // n must be odd, so there is no chance for a carry ripple chain_t<LIMBS+1> chain6; ra[0]=chain6.add(ra[0], ~t); #pragma unroll for(int32_t index=1;index<LIMBS;index++) ra[index]=chain6.add(ra[index], ~n[index]); carry1=chain6.add(0, 0); fast_propagate_add(carry1, ra); clear_padding(ra); } mpset<LIMBS>(r, ra); } /****************************************************************************************************] * FIX FIX FIX - figure out why this code doesn't work * * This code doesn't work, but it's more elegant than the other implementation. * Keep it around for now. Note -- Q values are correct, wrap arounds are 0 ****************************************************************************************************/ #if 0 template<class env> __device__ __forceinline__ void core_t<env>::mont_mul(uint32_t r[LIMBS], const uint32_t a[LIMBS], const uint32_t b[LIMBS], const uint32_t n[LIMBS], const uint32_t np0) { uint32_t sync=sync_mask(), group_thread=threadIdx.x & TPI-1; uint32_t u1, ru[LIMBS], r1=0, ra[LIMBS], c=0, t, q, t0, t1; uint64_t sum; #pragma unroll for(int32_t index=0;index<LIMBS;index++) { ra[index]=0; ru[index]=0; } #pragma nounroll for(int32_t thread=0;thread<TPI;thread++) { #pragma unroll for(int32_t word=0;word<LIMBS;word++) { t=__shfl_sync(sync, b[word], thread, TPI); chain_t<> chain1; #pragma unroll for(int32_t index=0;index<LIMBS;index++) ru[index]=chain1.xmadlh(a[index], t, ru[index]); chain_t<> chain2; #pragma unroll for(int32_t index=0;index<LIMBS;index++) ru[index]=chain2.xmadhl(a[index], t, ru[index]); u1=chain2.add(0, 0); chain_t<> chain3; #pragma unroll for(int32_t index=0;index<LIMBS;index++) ra[index]=chain3.xmadll(a[index], t, ra[index]); r1=chain3.add(r1, 0); chain_t<> chain4; #pragma unroll for(int32_t index=0;index<LIMBS-1;index++) ra[index+1]=chain4.xmadhh(a[index], t, ra[index+1]); r1=chain4.xmadhh(a[LIMBS-1], t, r1); ra[0]=add_cc(ra[0], c); c=addc(0, 0); t=ra[0]+(ru[0]<<16); q=__shfl_sync(sync, t, 0, TPI)*np0; //if(blockIdx.x==0 && threadIdx.x==0) printf("Q=%08X\n", q); chain_t<> chain01; #pragma unroll for(int32_t index=0;index<LIMBS;index++) ra[index]=chain01.xmadll(n[index], q, ra[index]); r1=chain01.add(r1, 0); chain_t<> chain02; #pragma unroll for(int32_t index=0;index<LIMBS;index++) ru[index]=chain02.xmadhl(n[index], q, ru[index]); u1=chain02.add(u1, 0); chain_t<> chain03; t1=chain03.xmadlh(n[0], q, ru[0]); #pragma unroll for(int32_t index=1;index<LIMBS;index++) ru[index-1]=chain03.xmadlh(n[index], q, ru[index]); ru[LIMBS-1]=chain03.add(u1, 0); t=t1<<16; t0=add_cc(ra[0], t); t=t1>>16; c=addc(c, t); chain_t<> chain04; #pragma unroll for(int32_t index=0;index<LIMBS-1;index++) ra[index]=chain04.xmadhh(n[index], q, ra[index+1]); ra[LIMBS-1]=chain04.xmadhh(n[LIMBS-1], q, 0); // shift right by 32 bits (top thread gets zero) t=__shfl_sync(sync, t0, threadIdx.x+1, TPI); sum=((uint64_t)ra[LIMBS-1]) + ((uint64_t)r1) + ((uint64_t)t); ra[LIMBS-1]=sum; r1=sum>>32; } } #pragma unroll for(int32_t index=LIMBS-1;index>0;index--) ru[index]=__byte_perm(ru[index-1], ru[index], 0x5432); ru[0]=__byte_perm(0, ru[0], 0x5432); mpadd32<LIMBS>(ru, ru, c); chain_t<LIMBS+1> chain10; for(int32_t index=0;index<LIMBS;index++) r[index]=chain10.add(ra[index], ru[index]); r1=chain10.add(r1, 0); // r1:r0 <= 0x00000002 0xFFFFFFFD t=__shfl_up_sync(sync, r1, 1, TPI); // all but most significant thread clears r1 if(group_thread!=TPI-1) r1=0; if(group_thread==0) t=0; chain_t<LIMBS+1> chain11; r[0]=chain11.add(r[0], t); r[1]=chain11.add(r[1], c); #pragma unroll for(int32_t index=2;index<LIMBS;index++) r[index]=chain11.add(r[index], 0); c=chain11.add(r1, 0); c=-fast_propagate_add(c, r); // compute -n t=n[0]-(group_thread==0); // n must be odd, so there is no chance for a carry ripple chain_t<LIMBS+1> chain12; r[0]=chain12.add(r[0], ~t & c); #pragma unroll for(int32_t index=1;index<LIMBS;index++) r[index]=chain12.add(r[index], ~n[index] & c); c=chain12.add(0, 0); fast_propagate_add(c, r); clear_padding(r); } #endif } /* namespace cgbn */
the_stack
// CHECK: #include <hip/hip_runtime.h> #include <cuda_runtime.h> #include <stdint.h> // Random predefiend 32 and 64 bit values constexpr int32_t value32 = 0x70F0F0FF; constexpr int64_t value64 = 0x7FFF0000FFFF0000; constexpr unsigned int writeFlag = 0; __global__ void fn(float* px, float* py) { bool a[42]; __shared__ double b[69]; for (auto&& x : b) x = *py++; for (auto&& x : a) x = *px++ > 0.0; for (auto&& x : a) if (x) *--py = *--px; } void testWrite() { int64_t* signalPtr; // CHECK: hipStream_t stream; cudaStream_t stream; // CHECK: hipStreamCreate(&stream); cudaStreamCreate(&stream); int64_t* host_ptr64 = (int64_t*)malloc(sizeof(int64_t)); int32_t* host_ptr32 = (int32_t*)malloc(sizeof(int32_t)); // hipExtMallocWithFlags((void**)&signalPtr, 8, hipMallocSignalMemory); void* device_ptr64; void* device_ptr32; *host_ptr64 = 0x0; *host_ptr32 = 0x0; *signalPtr = 0x0; // CHECK: hipHostRegister(host_ptr64, sizeof(int64_t), 0); cudaHostRegister(host_ptr64, sizeof(int64_t), 0); // CHECK: hipHostRegister(host_ptr32, sizeof(int32_t), 0); cudaHostRegister(host_ptr32, sizeof(int32_t), 0); // CHECK: hipStreamWriteValue64(stream, hipDeviceptr_t(host_ptr64), int64_t(value64), writeFlag); cuStreamWriteValue64(stream, CUdeviceptr(host_ptr64), value64, writeFlag); // CHECK: hipStreamWriteValue32(stream, hipDeviceptr_t(host_ptr32), int32_t(value32), writeFlag); cuStreamWriteValue32(stream, CUdeviceptr(host_ptr32), value32, writeFlag); // CHECK: hipStreamSynchronize(stream); cudaStreamSynchronize(stream); // CHECK: hipHostGetDevicePointer((void**)&device_ptr64, host_ptr64, 0); cudaHostGetDevicePointer((void**)&device_ptr64, host_ptr64, 0); // CHECK: hipHostGetDevicePointer((void**)&device_ptr32, host_ptr32, 0); cudaHostGetDevicePointer((void**)&device_ptr32, host_ptr32, 0); // Reset values *host_ptr64 = 0x0; *host_ptr32 = 0x0; // CHECK: hipStreamWriteValue64(stream, hipDeviceptr_t(device_ptr64), int64_t(value64), writeFlag); cuStreamWriteValue64(stream, CUdeviceptr(device_ptr64), value64, writeFlag); // CHECK: hipStreamWriteValue32(stream, hipDeviceptr_t(device_ptr32), int32_t(value32), writeFlag); cuStreamWriteValue32(stream, CUdeviceptr(device_ptr32), value32, writeFlag); // CHECK: hipStreamSynchronize(stream); cudaStreamSynchronize(stream); // Test Writing to Signal Memory // CHECK: hipStreamWriteValue64(stream, hipDeviceptr_t(signalPtr), int64_t(value64), writeFlag); cuStreamWriteValue64(stream, CUdeviceptr(signalPtr), value64, writeFlag); // CHECK: hipStreamSynchronize(stream); cudaStreamSynchronize(stream); // Cleanup // CHECK: hipStreamDestroy(stream); cudaStreamDestroy(stream); // CHECK: hipHostUnregister(host_ptr64); cudaHostUnregister(host_ptr64); // CHECK: hipHostUnregister(host_ptr32); cudaHostUnregister(host_ptr32); // CHECK: hipFree(signalPtr); cudaFree(signalPtr); free(host_ptr32); free(host_ptr64); } void testWait() { int64_t* signalPtr; // random data values int32_t DATA_INIT = 0x1234; int32_t DATA_UPDATE = 0X4321; struct TEST_WAIT { int compareOp; uint64_t mask; int64_t waitValue; int64_t signalValueFail; int64_t signalValuePass; }; TEST_WAIT testCases[] = { { // mask will ignore few MSB bits // CHECK: hipStreamWaitValueGte, CU_STREAM_WAIT_VALUE_GEQ, 0x0000FFFFFFFFFFFF, 0x000000007FFF0001, 0x7FFF00007FFF0000, 0x000000007FFF0001 }, { // CHECK: hipStreamWaitValueGte, CU_STREAM_WAIT_VALUE_GEQ, 0xF, 0x4, 0x3, 0x6 }, { // mask will ignore few MSB bits // CHECK: hipStreamWaitValueEq, CU_STREAM_WAIT_VALUE_EQ, 0x0000FFFFFFFFFFFF, 0x000000000FFF0001, 0x7FFF00000FFF0000, 0x7F0000000FFF0001 }, { // CHECK: hipStreamWaitValueEq, CU_STREAM_WAIT_VALUE_EQ, 0xFF, 0x11, 0x25, 0x11 }, { // mask will discard bits 8 to 11 // CHECK: hipStreamWaitValueAnd, CU_STREAM_WAIT_VALUE_AND, 0xFF, 0xF4A, 0xF35, 0X02 }, { // mask is set to ignore the sign bit. // CHECK: hipStreamWaitValueNor, CU_STREAM_WAIT_VALUE_NOR, 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFFFFFFF247, 0x7FFFFFFFFFFFFdbd, 0x7FFFFFFFFFFFFdb5 }, { // mask is set to apply NOR for bits 0 to 3. // CHECK: hipStreamWaitValueNor, CU_STREAM_WAIT_VALUE_NOR, 0xF, 0x7E, 0x7D, 0x76 } }; struct TEST_WAIT32_NO_MASK { int compareOp; int32_t waitValue; int32_t signalValueFail; int32_t signalValuePass; }; // default mask 0xFFFFFFFF will be used. TEST_WAIT32_NO_MASK testCasesNoMask32[] = { { // CHECK: hipStreamWaitValueGte, CU_STREAM_WAIT_VALUE_GEQ, 0x7FFF0001, 0x7FFF0000, 0x7FFF0010 }, { // CHECK: hipStreamWaitValueEq, CU_STREAM_WAIT_VALUE_EQ, 0x7FFFFFFF, 0x7FFF0000, 0x7FFFFFFF }, { // CHECK: hipStreamWaitValueAnd, CU_STREAM_WAIT_VALUE_AND, 0x70F0F0F0, 0x0F0F0F0F, 0X1F0F0F0F }, { // CHECK: hipStreamWaitValueNor, CU_STREAM_WAIT_VALUE_NOR, 0x7AAAAAAA, static_cast<int32_t>(0x85555555), static_cast<int32_t>(0x9AAAAAAA) } }; struct TEST_WAIT64_NO_MASK { int compareOp; int64_t waitValue; int64_t signalValueFail; int64_t signalValuePass; }; // default mask 0xFFFFFFFFFFFFFFFF will be used. TEST_WAIT64_NO_MASK testCasesNoMask64[] = { { // CHECK: hipStreamWaitValueGte, CU_STREAM_WAIT_VALUE_GEQ, 0x7FFFFFFFFFFF0001, 0x7FFFFFFFFFFF0000, 0x7FFFFFFFFFFF0001 }, { // CHECK: hipStreamWaitValueEq, CU_STREAM_WAIT_VALUE_EQ, 0x7FFFFFFFFFFFFFFF, 0x7FFFFFFF0FFF0000, 0x7FFFFFFFFFFFFFFF }, { // CHECK: hipStreamWaitValueAnd, CU_STREAM_WAIT_VALUE_AND, 0x70F0F0F0F0F0F0F0, 0x0F0F0F0F0F0F0F0F, 0X1F0F0F0F0F0F0F0F }, { // CHECK: hipStreamWaitValueNor, CU_STREAM_WAIT_VALUE_NOR, 0x4724724747247247, static_cast<int64_t>(0xbddbddbdbddbddbd), static_cast<int64_t>(0xbddbddbdbddbddb3) } }; // CHECK: hipStream_t stream; cudaStream_t stream; // CHECK: hipStreamCreate(&stream); cudaStreamCreate(&stream); // hipExtMallocWithFlags((void**)&signalPtr, 8, hipMallocSignalMemory); int64_t* dataPtr64 = (int64_t*)malloc(sizeof(int64_t)); int32_t* dataPtr32 = (int32_t*)malloc(sizeof(int32_t)); // hipHostRegister(dataPtr64, sizeof(int64_t), 0); cudaHostRegister(dataPtr64, sizeof(int64_t), 0); // CHECK: hipHostRegister(dataPtr32, sizeof(int32_t), 0); cudaHostRegister(dataPtr32, sizeof(int32_t), 0); // Run-1: streamWait is blocking (wait conditions is false) // Run-2: streamWait is non-blocking (wait condition is true) for (int run = 0; run < 2; run++) { bool isBlocking = run == 0; for (const auto & tc : testCases) { *signalPtr = isBlocking ? tc.signalValueFail : tc.signalValuePass; *dataPtr64 = DATA_INIT; // CHECK: hipStreamWaitValue64(stream, hipDeviceptr_t(signalPtr), int64_t(tc.waitValue), tc.compareOp); cuStreamWaitValue64(stream, CUdeviceptr(signalPtr), tc.waitValue, tc.compareOp); // CHECK: hipStreamWriteValue64(stream, hipDeviceptr_t(dataPtr64), int64_t(DATA_UPDATE), writeFlag); cuStreamWriteValue64(stream, CUdeviceptr(dataPtr64), DATA_UPDATE, writeFlag); if (isBlocking) { // Trigger an implict flush and verify stream has pending work. // CHECK: if (hipStreamQuery(stream) != hipErrorNotReady) {} if (cudaStreamQuery(stream) != cudaErrorNotReady) {} // update signal to unblock the wait. *signalPtr = tc.signalValuePass; } // CHECK: if (hipStreamQuery(stream) != hipSuccess) {} if (cudaStreamQuery(stream) != cudaSuccess) {} // CHECK: hipStreamSynchronize(stream); cudaStreamSynchronize(stream); if (*dataPtr64 != DATA_UPDATE) {} // 32-bit API *signalPtr = isBlocking ? tc.signalValueFail : tc.signalValuePass; *dataPtr32 = DATA_INIT; // CHECK: hipStreamWaitValue32(stream, hipDeviceptr_t(signalPtr), int32_t(tc.waitValue), tc.compareOp); cuStreamWaitValue32(stream, CUdeviceptr(signalPtr), tc.waitValue, tc.compareOp); // CHECK: hipStreamWriteValue32(stream, hipDeviceptr_t(dataPtr32), int32_t(DATA_UPDATE), writeFlag); cuStreamWriteValue32(stream, CUdeviceptr(dataPtr32), DATA_UPDATE, writeFlag); if (isBlocking) { // Trigger an implict flush and verify stream has pending work. // CHECK: if (hipStreamQuery(stream) != hipErrorNotReady) {} if (cudaStreamQuery(stream) != cudaErrorNotReady) {} // update signal to unblock the wait. *signalPtr = static_cast<int32_t>(tc.signalValuePass); } // CHECK: hipStreamSynchronize(stream); cudaStreamSynchronize(stream); if (*dataPtr32 != DATA_UPDATE) {} } } // Run-1: streamWait is blocking (wait conditions is false) // Run-2: streamWait is non-blocking (wait condition is true) for (int run = 0; run < 2; run++) { bool isBlocking = run == 0; for (const auto& tc : testCasesNoMask32) { *signalPtr = isBlocking ? tc.signalValueFail : tc.signalValuePass; *dataPtr32 = DATA_INIT; // CHECK: hipStreamWaitValue32(stream, hipDeviceptr_t(signalPtr), int32_t(tc.waitValue), tc.compareOp); cuStreamWaitValue32(stream, CUdeviceptr(signalPtr), tc.waitValue, tc.compareOp); // CHECK: hipStreamWriteValue32(stream, hipDeviceptr_t(dataPtr32), int32_t(DATA_UPDATE), writeFlag); cuStreamWriteValue32(stream, CUdeviceptr(dataPtr32), DATA_UPDATE, writeFlag); if (isBlocking) { // Trigger an implict flush and verify stream has pending work. // CHECK: if (hipStreamQuery(stream) != hipErrorNotReady) {} if (cudaStreamQuery(stream) != cudaErrorNotReady) {} // update signal to unblock the wait. *signalPtr = tc.signalValuePass; } // CHECK: hipStreamSynchronize(stream); cudaStreamSynchronize(stream); if (*dataPtr32 != DATA_UPDATE) {} } } // Run-1: streamWait is blocking (wait conditions is false) // Run-2: streamWait is non-blocking (wait condition is true) for (int run = 0; run < 2; run++) { bool isBlocking = run == 0; for (const auto& tc : testCasesNoMask64) { *signalPtr = isBlocking ? tc.signalValueFail : tc.signalValuePass; *dataPtr64 = DATA_INIT; // CHECK: hipStreamWaitValue64(stream, hipDeviceptr_t(signalPtr), int64_t(tc.waitValue), tc.compareOp); cuStreamWaitValue64(stream, CUdeviceptr(signalPtr), tc.waitValue, tc.compareOp); // CHECK: hipStreamWriteValue64(stream, hipDeviceptr_t(dataPtr64), int64_t(DATA_UPDATE), writeFlag); cuStreamWriteValue64(stream, CUdeviceptr(dataPtr64), DATA_UPDATE, writeFlag); if (isBlocking) { // Trigger an implict flush and verify stream has pending work. // CHECK: if (hipStreamQuery(stream) != hipErrorNotReady) {} if (cudaStreamQuery(stream) != cudaErrorNotReady) {} // update signal to unblock the wait. *signalPtr = tc.signalValuePass; } // CHECK: hipStreamSynchronize(stream); cudaStreamSynchronize(stream); if (*dataPtr64 != DATA_UPDATE) {} } } // Cleanup // CHECK: hipFree(signalPtr); cudaFree(signalPtr); // CHECK: hipHostUnregister(dataPtr64); cudaHostUnregister(dataPtr64); // CHECK: hipHostUnregister(dataPtr32); cudaHostUnregister(dataPtr32); free(dataPtr64); free(dataPtr32); // CHECK: hipStreamDestroy(stream); cudaStreamDestroy(stream); } int main() { // CHECK: hipFuncCache_t cacheConfig; cudaFuncCache cacheConfig; void* func; // CHECK: hipFuncSetCacheConfig(reinterpret_cast<const void*>(func), cacheConfig); cudaFuncSetCacheConfig(func, cacheConfig); // CHECK: hipFuncAttributes attr{}; cudaFuncAttributes attr{}; // CHECK: auto r = hipFuncGetAttributes(&attr, reinterpret_cast<const void*>(&fn)); auto r = cudaFuncGetAttributes(&attr, &fn); // CHECK: if (r != hipSuccess || attr.maxThreadsPerBlock == 0) { if (r != cudaSuccess || attr.maxThreadsPerBlock == 0) { return 1; } testWrite(); testWait(); return 0; }
the_stack
#include <torch/extension.h> #include <vector> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #include <stdio.h> #define Idx3(n, c, d, N, C, D) (((n)*(C)*(D)) + ((c)*(D)) + (d)) #define Idx2(n, c, N, C) (((n)*(C)) + (c)) #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) /* * shared mem reduction within a warp */ __device__ void warp_reduce( volatile float *s_mem, const unsigned int t_id, const unsigned int d) { for (unsigned int ridx = 32; ridx > 0; ridx /= 2) { if (d > ridx) { if ((t_id < ridx) && ((t_id + ridx) < d)) { s_mem[t_id] += s_mem[t_id + ridx]; } __syncwarp(); } } } /* * OnlineNorm forward kernel implementation * The ON fwd algorithm is: * * scale = sqrt(s_var + eps) * out = (input - s_mu) / scale * mu, var = moments(input) * diff = mu - s_mu * s_var = afwd * s_var + (1 - afwd) * var + afwd * (1 - afwd) * diff * diff * s_mu = s_mu + (1 - afwd) * diff * * where out is the output of ON, scale is the std. dev. used to scale the data * in the fwd pass and is cached for the bwd pass, eps is used for numerical * stability, s_mu and s_var are the streaming mean and variance, * mu and var are the sample mean and variance of the input, diff is an * intermediate stored variable, and afwd is the forward decay factor. * * The ON algorithm loops over N samples. s_mem_mu and s_mem_var are * shared memory used in the reduction (reduction over D) needed to update * s_mu and s_var. Each thread block operates on an one of C features * (ie. channel when operating on spatial data). Each channel has a s_mu and * s_var streaming statistics which are updated per sample by the reductions * per thread block. * * The kernel assumes contiguous inputs inputs of shape (N, C, *) where D is * the product of *. */ template <typename scalar_t> __global__ void norm_fwd_kernel( const scalar_t* __restrict__ input, float* s_mu, float* s_var, scalar_t* __restrict__ scale, scalar_t* __restrict__ out, const unsigned int C, const unsigned int N, const unsigned int D, const float afwd, const float eps) { const unsigned int t_id = threadIdx.x; const unsigned int c = blockIdx.x; const unsigned int d = blockDim.x; unsigned int idx3, idx; extern __shared__ float s_mem_mu[]; float *s_mem_var = &s_mem_mu[d]; float in_elem_f, sample_mu, sample_var, diff; scalar_t in_elem, m, s; for(int n = 0; n < N; ++n){ s_mem_mu[t_id] = 0; // reset sample mu shared mem s_mem_var[t_id] = 0; // reset sample var shared mem // propagate fwd activations and start reduction to compute input mu and var m = (scalar_t)(s_mu[c]); s = (scalar_t)(sqrt(s_var[c] + eps)); if (t_id == 0) { scale[Idx2(n, c, N, C)] = s; } // store scale used for (idx = t_id; idx < D; idx += d) { idx3 = Idx3(n, c, idx, N, C, D); // idx in global mem in_elem = input[idx3]; // get input element out[idx3] = (in_elem - m) / s; // compute output // start 1st and 2nd moment reductions in_elem_f = (float)(in_elem); s_mem_mu[t_id] += in_elem_f; // 1st moment reduction s_mem_var[t_id] += in_elem_f * in_elem_f; // 2nd moment reduction } __syncthreads(); // reduce within thread block % warp reduction for (idx = 512; idx > 32; idx /= 2) { if (d > idx) { if ((t_id < idx) && ((t_id + idx) < d)) { s_mem_mu[t_id] += s_mem_mu[t_id + idx]; // 1st moment reduction s_mem_var[t_id] += s_mem_var[t_id + idx]; // 2nd moment reduction } __syncthreads(); } } // reduce smem within warp if (t_id < 32) { warp_reduce(s_mem_mu, t_id, d); // 1st moment reduction warp_reduce(s_mem_var, t_id, d); // 2nd moment reduction } if (t_id == 0) { // compute sample mu and var to update streaming stats sample_mu = s_mem_mu[0] / D; sample_var = (s_mem_var[0] / D) - (sample_mu * sample_mu); // update streaming stats diff = sample_mu - s_mu[c]; s_var[c] = afwd * s_var[c] + (1. - afwd) * sample_var + afwd * (1. - afwd) * diff * diff; s_mu[c] = s_mu[c] + (1. - afwd) * diff; } __syncthreads(); } } std::vector<at::Tensor> norm_fwd_cuda( const at::Tensor input, at::Tensor s_mu, at::Tensor s_var, const float afwd, const float eps) { CHECK_INPUT(input); CHECK_INPUT(s_mu); CHECK_INPUT(s_var); // Assumes channel_first contiguous data const unsigned int N = input.size(0); const unsigned int C = input.size(1); const unsigned int D = input[0][0].numel(); auto scale = at::empty({N, C}, input.type()); auto out = at::empty_like(input); const unsigned int threads = min(int(D), 512); const dim3 blocks(C); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "norm_fwd", ([&] { norm_fwd_kernel<scalar_t><<<blocks, threads, 2 * threads * sizeof(float)>>>( input.data<scalar_t>(), s_mu.data<float>(), s_var.data<float>(), scale.data<scalar_t>(), out.data<scalar_t>(), C, N, D, afwd, eps); })); THCudaCheck(cudaGetLastError()); return {out, scale, s_mu, s_var}; } /* * OnlineNorm backward kernel implementation * The ON bwd algorithm is: * * grad_tmp = grad_out - (1 - abwd) v_ctrl * out * v_ctrl = v_ctrl + mean(grad_tmp * out) * grad_tmp = grad_tmp / scale * grad_in = grad_tmp - (1 - abwd) u_ctrl * u_ctrl = u_ctrl + mean(grad_in) * * where out is the output of ON, scale is the std. dev. used to scale the data * in the fwd pass, grad_out is the gradient of the output, grad_in is the * gradient of the input, v_ctrl is the v control variable, u_ctrl is the u * control variable, abwd is the backward decay factor, and mean(.) is the mean * operator. * * The ON algorithm loops over N samples. Each sample has an associated * grad_out, out, and scale. The v and u control variables are applied to the * the gradient to produce the gradient of the input. s_mem_v and s_mem_u are * shared memory used in the reduction (reduction over D) needed to update * v_ctrl and u_ctrl. Each thread block operates on an one of C features * (ie. channel when operating on spatial data). Each channel has a v and u * control variable which are updated per sample by the reductions per thread * block. * * The kernel assumes contiguous inputs inputs of shape (N, C, *) where D is * the product of *. */ template <typename scalar_t> __global__ void norm_bwd_kernel( const scalar_t* __restrict__ grad_out, float* v_ctrl, float* u_ctrl, const scalar_t* __restrict__ out, const scalar_t* __restrict__ scale, scalar_t* __restrict__ grad_in, const unsigned int C, const unsigned int N, const unsigned int D, const float abwd) { const unsigned int t_id = threadIdx.x; const unsigned int c = blockIdx.x; const unsigned int d = blockDim.x; unsigned int idx3, idx; extern __shared__ float s_mem_v[]; float *s_mem_u = &s_mem_v[d]; scalar_t grad_tmp; for(int n = 0; n < N; ++n){ s_mem_v[t_id] = 0; // reset v_ctrl shared mem s_mem_u[t_id] = 0; // reset u_ctrl shared mem for (idx = t_id; idx < D; idx += d) { idx3 = Idx3(n, c, idx, N, C, D); // idx in global mem // v_ctrl logic grad_tmp = grad_out[idx3] - \ (scalar_t)((1. - abwd)) * (scalar_t)(v_ctrl[c]) * out[idx3]; // start reduction for v_ctrl updt s_mem_v[t_id] += (float)(grad_tmp) * (float)(out[idx3]); // scale grad grad_tmp = grad_tmp / scale[Idx2(n, c, N, C)]; // u_ctrl logic grad_tmp = grad_tmp - (scalar_t)((1. - abwd)) * (scalar_t)(u_ctrl[c]); grad_in[idx3] = grad_tmp; // start reduction for u_ctrl updt s_mem_u[t_id] += (float)(grad_tmp); } __syncthreads(); // reduce within thread block % warp reduction for (idx = 512; idx > 32; idx /= 2) { if (d > idx) { if ((t_id < idx) && ((t_id + idx) < d)) { s_mem_v[t_id] += s_mem_v[t_id + idx]; s_mem_u[t_id] += s_mem_u[t_id + idx]; } __syncthreads(); } } // reduce smem within warp if (t_id < 32) { warp_reduce(s_mem_v, t_id, d); warp_reduce(s_mem_u, t_id, d); } // move reduction to global mem to updt ctrl variables if (t_id == 0) { v_ctrl[c] += (s_mem_v[0] / D); // update v_ctrl u_ctrl[c] += (s_mem_u[0] / D); // update u_ctrl } __syncthreads(); } __syncthreads(); } std::vector<at::Tensor> norm_bwd_cuda( const at::Tensor grad_out, at::Tensor u, at::Tensor v, const at::Tensor out, const at::Tensor scale, const float abwd) { CHECK_INPUT(grad_out); CHECK_INPUT(u); CHECK_INPUT(v); CHECK_INPUT(out); CHECK_INPUT(scale); // Assumes channel_first contiguous data const unsigned int N = grad_out.size(0); const unsigned int C = grad_out.size(1); const unsigned int D = grad_out[0][0].numel(); auto grad_in = at::empty_like(grad_out); const unsigned int threads = min(int(D), 512); const dim3 blocks(C); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_out.scalar_type(), "norm_bwd", ([&] { norm_bwd_kernel<scalar_t><<<blocks, threads, 2 * threads * sizeof(float)>>>( grad_out.data<scalar_t>(), v.data<float>(), u.data<float>(), out.data<scalar_t>(), scale.data<scalar_t>(), grad_in.data<scalar_t>(), C, N, D, abwd); })); THCudaCheck(cudaGetLastError()); return {grad_in, u, v}; }
the_stack
#include "lite/backends/cuda/cuda_utils.h" #include "lite/core/op_registry.h" #include "lite/kernels/cuda/softmax_compute.h" namespace paddle { namespace lite { namespace kernels { namespace cuda { using Tensor = lite::Tensor; extern __shared__ char tile[]; template <typename dtype> __global__ void sharemem_softmax_kernel(int total_size, const dtype* in_data, dtype* out_data, int inner_num, int outer_num, int axis_size) { dtype* data = reinterpret_cast<dtype*>(tile) + threadIdx.x; //! compute thread index and real data index int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < total_size) { int idx_inner = idx % inner_num; int idx_outer = (idx / inner_num) * axis_size; int blocksize = blockDim.x; int real_index = idx_outer * inner_num + idx_inner; int loop_idx = real_index; //! read all data to sharemem in softmax channel #pragma unroll for (int i = 0; i < axis_size; ++i) { data[i * blocksize] = in_data[loop_idx]; loop_idx += inner_num; } //! get maximum value in softmax channel dtype max_data = data[0]; #pragma unroll for (int i = 1; i < axis_size; ++i) { dtype dt = data[i * blocksize]; if (max_data < dt) { max_data = dt; } } //! subtract then summarize dtype sum = 0; #pragma unroll for (int i = 0; i < axis_size; ++i) { dtype* dt = data + i * blocksize; *dt = expf(*dt - max_data); sum += *dt; } //! write back result loop_idx = real_index; #pragma unroll for (int i = 0; i < axis_size; ++i) { out_data[loop_idx] = data[i * blocksize] / sum; loop_idx += inner_num; } } } template <> __global__ void sharemem_softmax_kernel(int total_size, const half* in_data, half* out_data, int inner_num, int outer_num, int axis_size) { half* data = reinterpret_cast<half*>(tile) + threadIdx.x; //! compute thread index and real data index int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < total_size) { int idx_inner = idx % inner_num; int idx_outer = (idx / inner_num) * axis_size; int blocksize = blockDim.x; int real_index = idx_outer * inner_num + idx_inner; int loop_idx = real_index; //! read all data to sharemem in softmax channel #pragma unroll for (int i = 0; i < axis_size; ++i) { data[i * blocksize] = in_data[loop_idx]; loop_idx += inner_num; } //! get maximum value in softmax channel half max_data = data[0]; #pragma unroll for (int i = 1; i < axis_size; ++i) { half dt = data[i * blocksize]; #if __CUDA_ARCH__ >= 530 if (__hlt(max_data, dt)) { #else if (__half2float(max_data) < __half2float(dt)) { #endif max_data = dt; } } //! subtract then summarize half sum = 0; #pragma unroll for (int i = 0; i < axis_size; ++i) { half* dt = data + i * blocksize; #if __CUDA_ARCH__ >= 530 *dt = hexp(__hsub(*dt, max_data)); sum = __hadd(sum, *dt); #else *dt = __float2half(expf(__half2float(*dt) - __half2float(max_data))); sum = __float2half(__half2float(sum) + __half2float(*dt)); #endif } //! write back result loop_idx = real_index; #pragma unroll for (int i = 0; i < axis_size; ++i) { #if __CUDA_ARCH__ >= 530 out_data[loop_idx] = __hdiv(data[i * blocksize], sum); #else out_data[loop_idx] = __float2half(__half2float(data[i * blocksize]) / __half2float(sum)); #endif loop_idx += inner_num; } } } //! general kernel for softmax template <typename dtype> __global__ void softmax_max_kernel(int total_size, const dtype* in_data, dtype* out_data, dtype min_data, int inner_num, int outer_num, int axis_size) { //! compute data index int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < total_size) { int idx_inner = idx % inner_num; int idx_outer = (idx / inner_num) * axis_size; int real_index = idx_outer * inner_num + idx_inner; //! get maximum data across softmax axis dtype max_data = min_data; for (int i = 0; i < axis_size; ++i) { max_data = in_data[real_index] > max_data ? in_data[real_index] : max_data; real_index += inner_num; } out_data[idx] = max_data; } } template <> __global__ void softmax_max_kernel(int total_size, const half* in_data, half* out_data, half min_data, int inner_num, int outer_num, int axis_size) { //! compute data index int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < total_size) { int idx_inner = idx % inner_num; int idx_outer = (idx / inner_num) * axis_size; int real_index = idx_outer * inner_num + idx_inner; //! get maximum data across softmax axis half max_data = min_data; for (int i = 0; i < axis_size; ++i) { #if __CUDA_ARCH__ >= 530 max_data = __hgt(in_data[real_index], max_data) ? in_data[real_index] : max_data; #else float a = __half2float(in_data[real_index]); float b = __half2float(max_data); float res = a > b ? a : b; max_data = __float2half(res); #endif real_index += inner_num; } out_data[idx] = max_data; } } template <typename dtype> __global__ void softmax_sub_exp_sum_kernel(int total_size, const dtype* in_data, dtype* out_data, const dtype* max_data, dtype* sum_data, int inner_num, int outer_num, int axis_size) { //! compute data index int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < total_size) { int idx_inner = idx % inner_num; int idx_outer = (idx / inner_num) * axis_size; dtype max_data_cur = max_data[idx]; dtype sum_data_cur = 0; int real_index = idx_outer * inner_num + idx_inner; //! compute exp and summarize across the softmax axis for (int i = 0; i < axis_size; ++i) { dtype sub_data = in_data[real_index] - max_data_cur; sub_data = expf(sub_data); sum_data_cur += sub_data; out_data[real_index] = sub_data; real_index += inner_num; } sum_data[idx] = sum_data_cur; } } template <> __global__ void softmax_sub_exp_sum_kernel(int total_size, const half* in_data, half* out_data, const half* max_data, half* sum_data, int inner_num, int outer_num, int axis_size) { //! compute data index int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < total_size) { int idx_inner = idx % inner_num; int idx_outer = (idx / inner_num) * axis_size; half max_data_cur = max_data[idx]; half sum_data_cur = 0; int real_index = idx_outer * inner_num + idx_inner; //! compute exp and summarize across the softmax axis for (int i = 0; i < axis_size; ++i) { #if __CUDA_ARCH__ >= 530 half sub_data = __hsub(in_data[real_index], max_data_cur); sub_data = hexp(sub_data); sum_data_cur = __hadd(sum_data_cur, sub_data); #else half sub_data = __float2half(__half2float(in_data[real_index]) - __half2float(max_data_cur)); sub_data = __float2half(expf(__half2float(sub_data))); sum_data_cur = __float2half(__half2float(sum_data_cur) + __half2float(sub_data)); #endif out_data[real_index] = sub_data; real_index += inner_num; } sum_data[idx] = sum_data_cur; } } template <typename dtype> __global__ void softmax_divid_output_kernel(int total_size, dtype* io_data, const dtype* sum_data, int inner_num, int outer_num, int axis_size) { //! compute data index int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < total_size) { int idx_inner = idx % inner_num; int idx_outer = (idx / inner_num) * axis_size; dtype sum_data_cur = 1.f / sum_data[idx]; int real_index = idx_outer * inner_num + idx_inner; //! compute final result for (int i = 0; i < axis_size; ++i) { io_data[real_index] = io_data[real_index] * sum_data_cur; real_index += inner_num; } } } template <> __global__ void softmax_divid_output_kernel(int total_size, half* io_data, const half* sum_data, int inner_num, int outer_num, int axis_size) { //! compute data index int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < total_size) { int idx_inner = idx % inner_num; int idx_outer = (idx / inner_num) * axis_size; #if __CUDA_ARCH__ >= 530 half sum_data_cur = __hdiv(__float2half(1.f), sum_data[idx]); #else half sum_data_cur = __float2half(1.f / __half2float(sum_data[idx])); #endif int real_index = idx_outer * inner_num + idx_inner; //! compute final result for (int i = 0; i < axis_size; ++i) { #if __CUDA_ARCH__ >= 530 io_data[real_index] = __hmul(io_data[real_index], sum_data_cur); #else io_data[real_index] = __float2half(__half2float(io_data[real_index]) * __half2float(sum_data_cur)); #endif real_index += inner_num; } } } template <typename Dtype, PrecisionType Ptype> void SoftmaxCompute<Dtype, Ptype>::PrepareForRun() { auto& param = this->template Param<param_t>(); auto& ctx = this->ctx_->template As<CUDAContext>(); int device_id; cudaGetDevice(&device_id); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device_id); sharedmem_size_ = deviceProp.sharedMemPerBlock; max_dimsize_ = sharedmem_size_ / sizeof(float) / CUDA_NUM_THREADS; if (param.use_cudnn) { cudnn_softmax_.Init(param, &ctx); } } template <typename Dtype, PrecisionType Ptype> void SoftmaxCompute<Dtype, Ptype>::Run() { auto& param = this->template Param<param_t>(); auto& ctx = this->ctx_->template As<CUDAContext>(); auto stream = ctx.exec_stream(); if (param.use_cudnn) { cudnn_softmax_.Create(param, &ctx); cudnn_softmax_.Run(param); } else { auto x_dims = param.x->dims(); auto x_rank = x_dims.size(); int axis = param.axis; if (axis < 0) { axis += x_rank; } int outer_num = x_dims.Slice(0, axis).production(); int inner_num = x_dims.Slice(axis + 1, x_rank).production(); int total_threads = inner_num * outer_num; axis_size_ = x_dims[axis]; const int threads = CUDA_NUM_THREADS; const int blocks = (total_threads + threads - 1) / threads; auto input_data = param.x->template data<Dtype>(); auto output_data = param.output->template mutable_data<Dtype>(TARGET(kCUDA)); if (axis_size_ <= max_dimsize_) { int use_sharemem_size = axis_size_ * threads * sizeof(Dtype); sharemem_softmax_kernel< Dtype><<<blocks, threads, use_sharemem_size, stream>>>(total_threads, input_data, output_data, inner_num, outer_num, axis_size_); } else { //! re_alloc device memory tmax_data_.Resize({1, 1, 1, outer_num * inner_num}); tsum_data_.Resize({1, 1, 1, outer_num * inner_num}); auto max_data = tmax_data_.mutable_data<Dtype>(TARGET(kCUDA)); auto sum_data = tsum_data_.mutable_data<Dtype>(TARGET(kCUDA)); //! firstly, get maximum data float min_data = std::numeric_limits<float>::lowest(); softmax_max_kernel<Dtype><<<blocks, threads, 0, stream>>>(total_threads, input_data, max_data, min_data, inner_num, outer_num, axis_size_); //! then, compute exp and sum data softmax_sub_exp_sum_kernel<Dtype><<<blocks, threads, 0, stream>>>( total_threads, input_data, output_data, max_data, sum_data, inner_num, outer_num, axis_size_); //! last, compute divided output softmax_divid_output_kernel<Dtype><<<blocks, threads, 0, stream>>>( total_threads, output_data, sum_data, inner_num, outer_num, axis_size_); } } CUDA_POST_KERNEL_CHECK; } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle using SoftmaxFp32 = paddle::lite::kernels::cuda::SoftmaxCompute<float, PRECISION(kFloat)>; using SoftmaxFp16 = paddle::lite::kernels::cuda::SoftmaxCompute<half, PRECISION(kFP16)>; REGISTER_LITE_KERNEL(softmax, kCUDA, kFloat, kNCHW, SoftmaxFp32, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindInput("axis", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .Finalize(); REGISTER_LITE_KERNEL(softmax, kCUDA, kFP16, kNCHW, SoftmaxFp16, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16), DATALAYOUT(kNCHW))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16), DATALAYOUT(kNCHW))}) .BindOutput("Out_log", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))}) .Finalize(); REGISTER_LITE_KERNEL(search_seq_softmax, kCUDA, kFloat, kNCHW, SoftmaxFp32, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindOutput("Out_log", {LiteType::GetTensorTy(TARGET(kCUDA))}) .Finalize(); REGISTER_LITE_KERNEL(search_seq_softmax, kCUDA, kFP16, kNCHW, SoftmaxFp16, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16), DATALAYOUT(kNCHW))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16), DATALAYOUT(kNCHW))}) .BindOutput("Out_log", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))}) .Finalize();
the_stack
#if __CUDA_ARCH__ >= 300 #define edcellupdate(RR,RP1,RP2,RPP,WUN,TMP) \ asm("vmin4.s32.s32.s32.add" "%0, %1.b3210, %2.b4321, %3;": "=r" (RR) : "r" (RP1), "r" (RP2), "r" (WUN)); \ asm("vadd4.s32.s32.s32" "%0, %1, %2, %3;": "=r" (TMP) : "r" (MM), "r" (RZ), "r" (RR)); \ asm("vmin4.s32.s32.s32" "%0, %1, %2, %3;": "=r" (RR) : "r" (TMP), "r" (RR), "r" (RR)); __device__ void hammingcell(int &a0, int a1, int b0, int w0, int &c, int tmp, int zero) { asm("and.b32" "%0, %1, %2;": "=r" (tmp) : "r" (a0), "r" (b0)); asm("vset4.s32.s32.eq" "%0, %1, %2, %3;": "=r" (tmp) : "r" (tmp), "r" (zero), "r" (zero)); asm("vsub4.s32.s32.s32" "%0, %1, %2, %3;": "=r" (tmp) : "r" (zero), "r" (tmp), "r" (zero)); asm("vmin4.u32.u32.u32.add" "%0, %1, %2, %3;": "=r" (c) : "r" (w0), "r" (tmp), "r" (c)); asm("vmax4.u32.u32.u32" "%0, %1.b4321, %2.b4321, %3;": "=r" (a0) : "r" (a0), "r" (a1), "r" (a0)); } __device__ void rotate1(int &a0) { asm("shr.b32" "%0, %1, 8;": "=r" (a0) : "r" (a0)); } __device__ void editcell(unsigned int a0, unsigned int a1, unsigned int m, unsigned int &b0, unsigned int &b1) { unsigned int a, am, c, nd, ne, f0, f1; a = a0 & ~ a1; // a = 1 am = a & m; c = (a + am) ^ a ^ am; // carry bit nd = m | c | (a0 & a1); // complement of diagonal bit d ne = nd >> 1; // shifted diagonal bit f0 = nd ^ ne; // f = bits of e - d f1 = ne & ~ nd; b0 = a0 ^ f0; b1 = (a1 & ~ f0) | (f1 & ~ a0) ; } __device__ void shlc(unsigned int &a0, unsigned int &a1) { asm("add.cc.u32" "%0, %1, %2;": "=r" (a0) : "r" (a0), "r" (a0)); asm("addc.cc.u32" "%0, %1, %2;": "=r" (a1) : "r" (a1), "r" (a1)); } template<int VECLEN, int NVEC, int TLEN> __global__ void __hammingdists(int *a, int *b, int *w, int *op, int *ow, int n) { __shared__ int sa[TLEN]; __shared__ int sb[32][VECLEN*NVEC+1]; __shared__ int sw[32][VECLEN*NVEC+1]; __shared__ int sop[32]; __shared__ int sow[32]; register int aa[VECLEN+1]; register int bb[VECLEN]; register int ww[VECLEN]; int i, ioff, ioffmv, ip, tmp, tmp1, j, k, c, cmin, imin; int zero = 0; int sid = threadIdx.x + blockDim.x * threadIdx.y; if (threadIdx.y + blockDim.y * blockIdx.x < n) { // Load data into shared memory for (i = 0; i < TLEN/1024; i++) { sa[sid + i*1024] = a[sid + i*1024 + TLEN*blockIdx.x]; } for (i = 0; i < VECLEN*NVEC/32; i++) { sb[threadIdx.y][threadIdx.x + i*blockDim.x] = b[sid + i*1024 + VECLEN*NVEC*blockIdx.x]; sw[threadIdx.y][threadIdx.x + i*blockDim.x] = w[sid + i*1024 + VECLEN*NVEC*blockIdx.x]; } __syncthreads(); ip = threadIdx.x / NVEC; ioffmv = (threadIdx.x % NVEC) * VECLEN; ioff = ioffmv + ip * (TLEN*NVEC/32); cmin = 0x7fffffff; imin = -1; // Load data for this thread into registers #pragma unroll for (j = 0; j < VECLEN; j++) { tmp = j + ioff; if (tmp < TLEN) { aa[j] = sa[tmp]; } bb[j] = sb[threadIdx.y][j + ioffmv]; ww[j] = sw[threadIdx.y][j + ioffmv]; } // Step through offsets in A string for (j = 0; j < TLEN*NVEC/8; j++) { tmp = VECLEN + ioff + j / 4; if (tmp - ioffmv < TLEN - VECLEN * NVEC) { if (j % 4 == 0) { aa[VECLEN] = sa[tmp]; } c = 0; // Inner loop over the length of the vector in registers #pragma unroll for (k = 0; k < VECLEN; k++) { hammingcell(aa[k], aa[k+1], bb[k], ww[k], c, tmp, zero); } rotate1(aa[VECLEN]); // Need to sum over NVEC to get complete score for a string #pragma unroll for (k = 1; k < NVEC; k *= 2) { tmp = __shfl_down(c, k); c = c + tmp; } // Now compare with the accumulated min if (c < cmin) { cmin = c; imin = 4 * ioff + j; } } } // Compute the min across groups of NVEC threads in this warp for (k = NVEC; k < 32; k *= 2) { tmp = __shfl_down(cmin, k); tmp1 = __shfl_down(imin, k); if (tmp < cmin) { cmin = tmp; imin = tmp1; } } // Save to shared memory in prep for saving to main memory if (threadIdx.x == 0) { sop[threadIdx.y] = imin; sow[threadIdx.y] = cmin; } __syncthreads(); // Save to main memory if (threadIdx.y == 0) { op[threadIdx.x + 32*blockIdx.x] = sop[threadIdx.x]; ow[threadIdx.x + 32*blockIdx.x] = sow[threadIdx.x]; } } } __global__ void __veccmp(int *a, int *b, int *d) { int xa = *a; int xb = *b; int xc = 0; int xd = 0; asm("vset4.s32.s32.ne" "%0, %1.b0000, %2, %3;": "=r" (xd) : "r" (xa), "r" (xb), "r" (xc)); *d++ = xd; asm("vset4.s32.s32.ne" "%0, %1.b1111, %2, %3;": "=r" (xd) : "r" (xa), "r" (xb), "r" (xc)); *d++ = xd; asm("vset4.s32.s32.ne" "%0, %1.b2222, %2, %3;": "=r" (xd) : "r" (xa), "r" (xb), "r" (xc)); *d++ = xd; asm("vset4.s32.s32.ne" "%0, %1.b3333, %2, %3;": "=r" (xd) : "r" (xa), "r" (xb), "r" (xc)); *d = xd; } #else __global__ void __veccmp(int *a, int *b, int *d) { printf("__veccmp() not defined for CUDA Arch < 300\n"); } template<int VECLEN, int NVEC, int TLEN> __global__ void __hammingdists(int *a, int *b, int *w, int *op, int *ow, int n) { printf("__hammingdists() not defined for CUDA Arch < 300\n"); } #endif int veccmp(int *a, int *b, int *d) { __veccmp<<<1,1>>>(a, b, d); return 0; } int hammingdists(int *a, int *b, int *w, int *op, int *ow, int n) { int nb = 1+((n-1)/32); dim3 blockdims(32,32,1); __hammingdists<16,2,1024><<<nb,blockdims>>>(a, b, w, op, ow, n); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } #define DBSIZE (5*1024) __global__ void __multinomial2(int nrows, int ncols, float *A, int *B, curandState *rstates, int nvals) { __shared__ float vec[DBSIZE]; __shared__ float tvec[DBSIZE]; int i, j, jcol, jcolnr, jbase, shift, *ivec; int imin, imax, imid; int fitcols = DBSIZE/nrows; float vv, rval, *avec, *bvec, *cvec; float nrowsinv = 1.0f / nrows; float nvalsinv = 1.0f / nvals; curandState *prstate = &rstates[threadIdx.x]; __syncthreads(); for (i = fitcols * blockIdx.x; i < ncols; i += fitcols * gridDim.x) { avec = vec; bvec = tvec; __syncthreads(); for (j = threadIdx.x; j < nrows * fitcols; j += blockDim.x) { vec[j] = A[j + i * nrows]; } __syncthreads(); for (shift = 1; shift < nrows; shift *= 2) { for (j = threadIdx.x; j < nrows * fitcols; j += blockDim.x) { vv = avec[j]; jbase = ((int)floor((j + 0.5f)*nrowsinv))*nrows; // jbase = (j / nrows) * nrows; if (j - shift >= jbase) { vv += avec[j-shift]; } bvec[j] = vv; } __syncthreads(); cvec = avec; avec = bvec; bvec = cvec; } ivec = (int *)bvec; for (j = threadIdx.x; j < nrows*fitcols; j += blockDim.x) { ivec[j] = 0; } __syncthreads(); for (j = threadIdx.x; j < nvals*fitcols; j += blockDim.x) { jcol = (int)floor((j + 0.5f)*nvalsinv); jcolnr = jcol * nrows; rval = avec[jcolnr+nrows-1]*curand_uniform(prstate); imin = 0; imax = nrows; while (imax - imin > 1) { imid = (imin + imax) >> 1; if (rval >= avec[imid + jcolnr]) { imin = imid; } else { imax = imid; } } atomicAdd(&ivec[imin + jcolnr], 1); } __syncthreads(); for (j = threadIdx.x; j < nrows*fitcols; j += blockDim.x) { B[j + i * nrows] = ivec[j]; } __syncthreads(); } } // // __forceinline__ __device__ int __waittimeval(curandState *prstate, float p, int n) { float q = - log(1-p); float X = 0; float sum = 0; int i = 0; while (i < 100 && sum <= q) { float E = - log(curand_uniform(prstate)); // safe since curand_uniform wont return 0 sum += E / (n - X); X += 1; i += 1; } return X - 1; } __forceinline__ __device__ int binorndval(float p, int n, curandState *prstate) { bool pflipped; float X, Y, V; const float pi = 3.1415926f; if (p > 0.5f) { // flip p so that its less than 1/2. pflipped = true; p = 1.0f - p; } else { pflipped = false; } float np = n * p; if (np < 21) { X = __waittimeval(prstate, p, n); // Use a wait time method if small expected output } else { float oldp = p; p = floor(np) / n; // round np to an integral value for the rejection stage p = max(1e-7f, min(1 - 1e-7f, p)); // prevent divide-by-zeros np = n * p; float n1mp = n * (1-p); float pvar = np * (1-p); float delta1 = max(1.0f, floor(sqrt(pvar * log(128 * np / (81 * pi * (1-p)))))); float delta2 = max(1.0f, floor(sqrt(pvar * log(128 * n1mp / (pi * p))))); float sigma1 = sqrt(pvar)*(1+delta1/(4*np)); float sigma2 = sqrt(pvar)*(1+delta2/(4*n1mp)); float sigma1sq = sigma1 * sigma1; float sigma2sq = sigma2 * sigma2; float c = 2 * delta1 / np; float a1 = 0.5f * exp(c) * sigma1 * sqrt(2*pi); float a2 = 0.5f * sigma2 * sqrt(2*pi); float a3 = exp(delta1/n1mp - delta1*delta1/(2*sigma1sq))*2*sigma1sq/delta1; float a4 = exp(-delta2*delta2/(2*sigma2sq))*2*sigma2sq/delta2; float s = a1 + a2 + a3 + a4; int i = 0; while (i < 100) { // Give up eventually i += 1; float U = s * curand_uniform(prstate); float E1 = - log(curand_uniform(prstate)); // safe since curand_uniform wont return 0 if (U <= a1 + a2) { float N = curand_normal(prstate); if (U <= a1) { Y = sigma1 * abs(N); if (Y >= delta1) continue; X = floor(Y); V = - E1 - N * N/2 + c; } else { Y = sigma2 * abs(N); if (Y >= delta2) continue; X = floor(-Y); V = - E1 - N * N/2; } } else { float E2 = - log(curand_uniform(prstate)); if (U <= a1 + a2 + a3) { Y = delta1 + 2*sigma1sq*E1/delta1; X = floor(Y); V = - E2 - delta1*Y/(2*sigma1sq) + delta1/n1mp; } else { Y = delta2 + 2*sigma2sq*E1/delta2; X = floor(-Y); V = - E2 - delta2*Y/(2*sigma2sq); } } if (X < - np || X > n1mp) continue; if (V > lgamma(np+1) + lgamma(n1mp+1) - lgamma(np+X+1) - lgamma(n1mp-X+1) + X*log(p/(1-p))) continue; break; } X += np; X += __waittimeval(prstate, (oldp-p)/(1-p), n-X); // Now correct for rounding np to integer } if (pflipped) { // correct for flipped p. X = n - X; } return (int)X; } // // i steps over blocks of 256 columns // j steps down blocks of 32 rows // Load a block of 32 x 256 words // k steps down the rows of this block // compute local p get bino sample "count" from remaining n // decrement remaining n. // __global__ void __multinomial(int nrows, int ncols, float *A, int *B, float *Norm, curandState *rstates, int nvals) { __shared__ float mat[256][33]; int (*imat)[33] = (int (*)[33])mat; int i, j, k, valsleft, count, iv; float vnorm, vv; int tid = threadIdx.x + blockDim.x * threadIdx.y; curandState *prstate = &rstates[tid + blockIdx.x*blockDim.x*blockDim.y]; __syncthreads(); for (i = 256*blockIdx.x; i < ncols; i += 256*gridDim.x) { // Loop across blocks of 256 columns vnorm = 1.0f; if (tid + i < ncols) { // Load the norms for these 256 cols vnorm = Norm[tid+i]; } valsleft = nvals; // Initialize the count of samples for this column for (j = 0; j < nrows; j += blockDim.x) { // Loop over blocks of 32 rows __syncthreads(); for (k = 0; k < min(256, ncols-i); k += 8) { // Copy this 32x256 word block into SHMEM vv = 0; if (j+threadIdx.x < nrows) { vv = A[j+threadIdx.x + (i+k+threadIdx.y)*nrows]; } mat[k+threadIdx.y][threadIdx.x] = vv; } __syncthreads(); for (k = 0; k < min(32, nrows-j); k += 1) { // Now walk down the columns with 256 threads vv = min(mat[tid][k], vnorm); count = binorndval(vv/vnorm, valsleft, prstate); // get a binomial random count count = min(count, valsleft); valsleft -= count; // subtract it from remaining count vnorm -= vv; // adjust remaining probability imat[tid][k] = count; // store count in the aliased SHMEM matrix } __syncthreads(); for (k = 0; k < min(256, ncols-i); k += 8) { // Save this 32x256 block back into main memory iv = imat[k+threadIdx.y][threadIdx.x]; if (j+threadIdx.x < nrows) { B[j+threadIdx.x + (i+k+threadIdx.y)*nrows] = iv; } } __syncthreads(); } } } __global__ void __prandinit(curandState *rstates) { int id = threadIdx.x + blockDim.x * blockIdx.x; curand_init(1234, id, 0, &rstates[id]); } int multinomial2(int nrows, int ncols, float *A, int *B, int nvals) { int fitcols = DBSIZE/nrows; int nthreads = 1024; int nb = 1 + (ncols-1)/fitcols; int nblocks = min(128, nb); curandState *rstates; cudaError_t err = cudaMalloc(( void **)& rstates , nblocks * nthreads * sizeof(curandState)); if (err > 0) { fprintf(stderr, "Error in cudaMalloc %d", err); return err; } cudaStreamSynchronize(SYNC_STREAM); __prandinit<<<nblocks,nthreads>>>(rstates); cudaStreamSynchronize(SYNC_STREAM); __multinomial2<<<nblocks,nthreads>>>(nrows, ncols, A, B, rstates, nvals); cudaStreamSynchronize(SYNC_STREAM); cudaFree(rstates); err = cudaGetLastError(); return err; } int multinomial(int nrows, int ncols, float *A, int *B, float *Norm, int nvals) { dim3 threads(32, 8, 1); int nthreads = 256; int nblocks = min(128, 1+ (ncols-1)/256); curandState *rstates; cudaError_t err = cudaMalloc(( void **)& rstates , nblocks * nthreads * sizeof(curandState)); if (err > 0) { fprintf(stderr, "Error in cudaMalloc %d", err); return err; } cudaStreamSynchronize(SYNC_STREAM); __prandinit<<<nblocks,nthreads>>>(rstates); cudaStreamSynchronize(SYNC_STREAM); __multinomial<<<nblocks,threads>>>(nrows, ncols, A, B, Norm, rstates, nvals); cudaStreamSynchronize(SYNC_STREAM); cudaFree(rstates); err = cudaGetLastError(); return err; } template<typename KEY, typename V1, typename V2, typename RET, class C> __global__ void prodSelect(int n, int *groups, KEY *keys1, KEY *keys2, V1 *vals1, V2 *vals2, KEY *kout, RET *ret) { }
the_stack
#include "my_lib_kernel.h" #define min(a,b) ((a<b)?(a):(b)) #define max(a,b) ((a>b)?(a):(b)) #define DEBUG (0) #ifndef BLOCKDIMX #define BLOCKDIMX (32) #endif #ifndef BLOCKDIMY #define BLOCKDIMY (16) #endif //forward path of our layer __global__ void SeparableConvFlowLayer_gpu_forward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, //const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const int flow_output_b_stride, const int flow_output_c_stride, const int flow_output_h_stride, const int flow_output_w_stride, const float* input1, const float* input2, const float* input3, float* flow_output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w - filter_size + 1; const bool withinYbounds = h_i < h - filter_size + 1; const int batch_i = blockIdx.z; // __syncthreads(); // const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { float flow_y = 0.0f; float sum_weights = 0.0f; for ( int intFilterY = 0; intFilterY < filter_size; intFilterY += 1) { float temp2 = input2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ]; flow_y += (float)(intFilterY) * temp2 ; sum_weights += temp2; } //sum_weights = fabs(sum_weights); flow_y = flow_y / sum_weights - ((float)(filter_size)-1.0)/2.0; flow_output[batch_i * flow_output_b_stride + 1 * flow_output_c_stride+ h_i* flow_output_h_stride + w_i] = fabs(sum_weights) > 0.0f ? flow_y : -2000; float flow_x = 0.0f; float sum_weights_x = 0.0f; for ( int intFilterX = 0; intFilterX < filter_size; intFilterX += 1) { float temp3 = input3[batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ]; flow_x += (float)(intFilterX) * temp3; sum_weights_x += temp3; } //sum_weights_x = fabs(sum_weights_x); flow_x = flow_x / sum_weights_x - ((float)(filter_size)-1.0)/2.0; // what if the sum_weight is less than zeros. flow_output[batch_i * flow_output_b_stride + 0 * flow_output_c_stride + h_i* flow_output_h_stride + w_i] = fabs(sum_weights_x) >0.0f ? flow_x : -2000; } return ; } __global__ void SeparableConvFlowLayer_gpu_backward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, //const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const int flow_output_b_stride, const int flow_output_c_stride, const int flow_output_h_stride, const int flow_output_w_stride, const float* input1, const float* input2, const float* input3, const float* gradflow_output, float* gradinput1, float* gradinput2, float* gradinput3 ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w - filter_size + 1; const bool withinYbounds = h_i < h - filter_size + 1; const int batch_i = blockIdx.z; if(withinXbounds && withinYbounds){ float flow_y = 0.0f; float sum_weights = 0.0f; for ( int intFilterY = 0; intFilterY < filter_size; intFilterY += 1) { float temp2 = input2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ]; flow_y += (float)(intFilterY) * temp2 ; sum_weights += temp2; } //flow_y = flow_y / sum_weights - ((float)(filter_size)-1.0)/2.0; //flow_output_data[batch_i * flow_output_b_stride + 1 * flow_output_c_stride+ h_i* flow_output_h_stride + w_i] = // sum_weights >0.0f ? flow_y : -2000; //float sign = sum_weights >0.0f ? 1.0f : -1.0f; //sum_weights = fabs(sum_weights); if(fabs(sum_weights) >0.0f ){ float gradflow_y = gradflow_output[batch_i * flow_output_b_stride + 1* flow_output_c_stride + h_i * flow_output_h_stride + w_i ] ; float offset = flow_y / ( sum_weights * sum_weights); for (int intFilterY = 0; intFilterY < filter_size; intFilterY += 1) { gradinput2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ] = gradflow_y * ((float)(intFilterY) / sum_weights - offset); } } float flow_x = 0.0f; float sum_weights_x = 0.0f; for ( int intFilterX = 0; intFilterX < filter_size; intFilterX += 1) { float temp3 = input3[batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ]; flow_x += (float)(intFilterX) * temp3; sum_weights_x += temp3; } //flow_x = flow_x / sum_weights_x - ((float)(filter_size)-1.0)/2.0; //flow_output_data[batch_i * flow_output_b_stride + 0 * flow_output_c_stride + h_i* flow_output_h_stride + w_i] = // sum_weights_x >0 ? flow_x : -2000; //float sign_x = sum_weights_x >0.0f ? 1.0f : -1.0f; //sum_weights_x = fabs(sum_weights_x); if(fabs(sum_weights_x) > 0.0f ){ float gradflow_x = gradflow_output[batch_i * flow_output_b_stride + 0 * flow_output_c_stride + h_i * flow_output_h_stride + w_i]; float offset = flow_x / (sum_weights_x * sum_weights_x); for ( int intFilterX = 0; intFilterX < filter_size; intFilterX += 1) { gradinput3[batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ] += gradflow_x * ((float)(intFilterX) /sum_weights_x - offset); } } } return ; } int SeparableConvFlowLayer_gpu_forward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch,const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, //const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const int flow_output_b_stride, const int flow_output_c_stride, const int flow_output_h_stride, const int flow_output_w_stride, const float* input1, const float* input2, const float* input3, float* flow_output ) { int error = -1; dim3 grid; dim3 block; // blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w - filter_size + 1 + BLOCKDIMX - 1)/ BLOCKDIMX, (h - filter_size + 1 + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); //extract the data of CudaTensor and use kernel to calculate. SeparableConvFlowLayer_gpu_forward_kernelfunc<<<grid,block,0, stream >>>( nElement, //to let the nummous w,h,channel, filter_size, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, //output_b_stride,output_c_stride,output_h_stride,output_w_stride, flow_output_b_stride,flow_output_c_stride,flow_output_h_stride,flow_output_w_stride, input1,input2,input3, flow_output ); // THCudaCheck(cudaGetLastError()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in SeparableConvFlowLayer_gpu_forward_kernel: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } int SeparableConvFlowLayer_gpu_backward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, //const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const int flow_output_b_stride, const int flow_output_c_stride, const int flow_output_h_stride, const int flow_output_w_stride, const float* input1, const float* input2, const float* input3, const float* gradflow_output, float* gradinput1, float* gradinput2, float* gradinput3 ) { int error = -1; dim3 grid; dim3 block; //blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w - filter_size + 1 + BLOCKDIMX - 1)/ BLOCKDIMX, (h - filter_size + 1+ BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); // cudaMemset((void*)gradinput1, 0, input1_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput2, 0, input2_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput3, 0, input3_b_stride * batch * sizeof(float)); SeparableConvFlowLayer_gpu_backward_kernelfunc <<<grid,block,0, stream>>>( nElement, //to let the nummous w,h,channel, filter_size, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, //output_b_stride,output_c_stride,output_h_stride,output_w_stride, flow_output_b_stride,flow_output_c_stride,flow_output_h_stride,flow_output_w_stride, input1, input2, input3, gradflow_output, gradinput1, gradinput2, gradinput3 ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } //forward path of our layer __global__ void SeparableConvLayer_gpu_forward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input1, const float* input2, const float* input3, float* output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w - filter_size + 1; const bool withinYbounds = h_i < h - filter_size + 1; const int batch_i = blockIdx.z; // __syncthreads(); // const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { for ( int c_i = 0 ; c_i < channel ; c_i ++){ float out = 0.0f; for (int intFilterY = 0; intFilterY < filter_size; intFilterY += 1) { for (int intFilterX = 0; intFilterX < filter_size; intFilterX += 1) { float temp1 = input1[batch_i * input1_b_stride + c_i * input1_c_stride + (h_i + intFilterY )* input1_h_stride + (w_i + intFilterX)]; float temp2 = input2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ]; float temp3 = input3[batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ]; out += temp1* temp2 * temp3; } } output[batch_i * output_b_stride + c_i* output_c_stride + h_i * output_h_stride + w_i ] = out; } } return ; } __global__ void SeparableConvLayer_gpu_backward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input1, const float* input2, const float* input3, const float* gradoutput, float* gradinput1, float* gradinput2, float* gradinput3 ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w - filter_size + 1; const bool withinYbounds = h_i < h - filter_size + 1; const int batch_i = blockIdx.z; if(withinXbounds && withinYbounds){ for (int c_i = 0 ; c_i < channel ; c_i ++){ for (int intFilterY = 0; intFilterY < filter_size; intFilterY += 1) { for ( int intFilterX = 0; intFilterX < filter_size; intFilterX += 1) { float temp1 = input1[batch_i * input1_b_stride + c_i * input1_c_stride + (h_i + intFilterY )* input1_h_stride + (w_i + intFilterX)]; float temp2 = input2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ]; float temp3 = input3[batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ]; float gradout = gradoutput[batch_i * output_b_stride + c_i* output_c_stride + h_i * output_h_stride + w_i ]; atomicAdd(&gradinput1[batch_i * input1_b_stride + c_i * input1_c_stride + (h_i + intFilterY )* input1_h_stride + (w_i + intFilterX)], gradout * temp2 * temp3); atomicAdd(&gradinput2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ], gradout * temp1 * temp3); atomicAdd(&gradinput3 [batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ] , gradout * temp1 * temp2); } } } } return ; } int SeparableConvLayer_gpu_forward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch,const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input1, const float* input2, const float* input3, float* output ) { int error = -1; dim3 grid; dim3 block; // blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w - filter_size + 1 + BLOCKDIMX - 1)/ BLOCKDIMX, (h - filter_size + 1 + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); //extract the data of CudaTensor and use kernel to calculate. SeparableConvLayer_gpu_forward_kernelfunc<<<grid,block,0, stream >>>( nElement, //to let the nummous w,h,channel, filter_size, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, output_b_stride,output_c_stride,output_h_stride,output_w_stride, input1,input2,input3, output ); // THCudaCheck(cudaGetLastError()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } int SeparableConvLayer_gpu_backward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input1, const float* input2, const float* input3, const float* gradoutput, float* gradinput1, float* gradinput2, float* gradinput3 ) { int error = -1; dim3 grid; dim3 block; //blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w - filter_size + 1 + BLOCKDIMX - 1)/ BLOCKDIMX, (h - filter_size + 1+ BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); // cudaMemset((void*)gradinput1, 0, input1_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput2, 0, input2_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput3, 0, input3_b_stride * batch * sizeof(float)); SeparableConvLayer_gpu_backward_kernelfunc <<<grid,block,0, stream>>>( nElement, //to let the nummous w,h,channel, filter_size, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, output_b_stride,output_c_stride,output_h_stride,output_w_stride, input1, input2, input3, gradoutput, gradinput1, gradinput2, gradinput3 ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } //forward path of our layer __global__ void InterpolationLayer_gpu_forward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const float* input1, const float* input2, float* output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { float fx = input2[batch_i * input2_b_stride + 0 * input2_c_stride + h_i * input2_h_stride + w_i ]; float fy = input2[batch_i * input2_b_stride + 1 * input2_c_stride + h_i * input2_h_stride + w_i ]; float x2 = (float)(w_i) + fx; float y2 = (float)(h_i) + fy; if(x2 >= 0.0f && y2 >=0.0f && x2 < (float)w && y2 < (float)h){ int ix2_L = int(x2); int iy2_T = int(y2); int ix2_R = min(ix2_L + 1, w - 1); int iy2_B = min(iy2_T + 1, h - 1); float alpha = x2 - ix2_L; float beta = y2 - iy2_T; for(int c_i = 0 ; c_i < channel ; c_i ++){ float TL = input1[off + c_i * input1_c_stride + iy2_T * input1_h_stride + ix2_L]; float TR = input1[off + c_i * input1_c_stride + iy2_T * input1_h_stride + ix2_R]; float BL = input1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_L]; float BR = input1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_R]; output[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i] = (1- alpha ) *(1-beta) *TL + alpha *(1- beta) * TR + (1-alpha) *beta *BL + alpha *beta * BR; } } else{ //the warping data is out of range, we fill it with zeros for(int c_i = 0 ; c_i < channel; c_i ++){ output[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i] = fillvalue; } } } return ; } __global__ void InterpolationLayer_gpu_backward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const float* input1, const float* input2, const float* gradoutput, float* gradinput1, float* gradinput2 ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); if(withinXbounds && withinYbounds){ float fx= input2[batch_i * input2_b_stride + 0 * input2_c_stride + h_i * input2_h_stride + w_i ]; float fy = input2[batch_i * input2_b_stride + 1* input2_c_stride + h_i * input2_h_stride + w_i]; float x2 = float(w_i) + fx; float y2 = float(h_i) + fy; if(x2 >= 0.0f && y2 >= 0.0f && x2 < (float)w && y2 < (float)h){ int ix2_L = int(x2); int iy2_T = int(y2); int ix2_R = min(ix2_L+ 1, w - 1); int iy2_B = min(iy2_T + 1, h - 1); float alpha = x2 - ix2_L; float beta = y2 - iy2_T; for (int c_i = 0 ; c_i < channel; c_i++){ float gradoutput_value = gradoutput[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i]; atomicAdd( & gradinput1[off + c_i * input1_c_stride + iy2_T * input1_h_stride + ix2_L], gradoutput_value * ( 1- alpha) * (1- beta)); atomicAdd( & gradinput1[off + c_i * input1_c_stride + iy2_T * input1_h_stride + ix2_R], gradoutput_value * alpha * (1-beta)); atomicAdd( & gradinput1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_L], gradoutput_value * (1-alpha ) * beta); atomicAdd( & gradinput1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_R], gradoutput_value * alpha * beta); } float gamma = iy2_B - y2; float bot_diff = 0.0f; for(int c_i =0 ; c_i< channel; c_i ++ ){ float temp = 0; temp += gamma * (input1[off + c_i * input1_c_stride + iy2_T * input1_h_stride +ix2_R] - input1[off + c_i* input1_c_stride+ iy2_T * input1_h_stride + ix2_L]); temp += (1 - gamma) *( input1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_R] - input1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_L]); float warped_diff_value = gradoutput[off+ c_i * input1_c_stride+ h_i* input1_h_stride + w_i]; bot_diff += warped_diff_value * temp ; } //the gradients of the x direction/ horizontal direction gradinput2[batch_i * input2_b_stride + 0 * input2_c_stride + h_i * input2_h_stride + w_i] = bot_diff; gamma = ix2_R- x2; bot_diff = 0.0f; for(int c_i = 0 ; c_i < channel;c_i ++ ){ float temp = 0.0f; temp += gamma * (input1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_L] - input1[off + c_i * input1_c_stride + iy2_T * input1_h_stride + ix2_L]); temp += (1-gamma) *( input1[off + c_i * input1_c_stride+ iy2_B* input1_h_stride+ix2_R] - input1[off+ c_i* input1_c_stride+ iy2_T * input1_h_stride +ix2_R]); float warped_diff_value = gradoutput[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i]; bot_diff += warped_diff_value * temp; } gradinput2[batch_i * input2_b_stride + 1 * input2_c_stride + h_i * input2_h_stride + w_i]= bot_diff; } } return ; } //#define __cplusplus //#ifdef __cplusplus // extern "C" { //#endif int InterpolationLayer_gpu_forward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const float* input1, const float* input2, float* output ) { int error = -1; dim3 grid; dim3 block; // blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); //extract the data of CudaTensor and use kernel to calculate. InterpolationLayer_gpu_forward_kernelfunc<<<grid,block,0, stream >>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input1,input2,output ); // THCudaCheck(cudaGetLastError()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } int InterpolationLayer_gpu_backward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const float* input1, const float* input2, const float* gradoutput, float* gradinput1, float* gradinput2 ) { int error = -1; dim3 grid; dim3 block; //blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); InterpolationLayer_gpu_backward_kernelfunc <<<grid,block,0, stream>>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input1, input2, gradoutput, gradinput1, gradinput2 ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpu error in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } //forward path of our layer __global__ void InterpolationChLayer_gpu_forward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const float* input1, const float* input2, float* output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { float fx = input2[batch_i * input2_b_stride + 0 * input2_c_stride + h_i * input2_h_stride + w_i ]; float fy = input2[batch_i * input2_b_stride + 1 * input2_c_stride + h_i * input2_h_stride + w_i ]; float x2 = (float)(w_i) + fx; float y2 = (float)(h_i) + fy; if(x2 >= 0.0f && y2 >=0.0f && x2 < (float)w && y2 < (float)h){ int ix2_L = int(x2); int iy2_T = int(y2); int ix2_R = min(ix2_L + 1, w - 1); int iy2_B = min(iy2_T + 1, h - 1); float alpha = x2 - ix2_L; float beta = y2 - iy2_T; for(int c_i = 0 ; c_i < channel ; c_i ++){ float TL = input1[off + c_i * input1_c_stride + iy2_T * input1_h_stride + ix2_L]; float TR = input1[off + c_i * input1_c_stride + iy2_T * input1_h_stride + ix2_R]; float BL = input1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_L]; float BR = input1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_R]; output[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i] = (1- alpha ) *(1-beta) *TL + alpha *(1- beta) * TR + (1-alpha) *beta *BL + alpha *beta * BR; } } else{ //the warping data is out of range, we fill it with zeros for(int c_i = 0 ; c_i < channel; c_i ++){ output[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i] = fillvalue; } } } return ; } __global__ void InterpolationChLayer_gpu_backward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const float* input1, const float* input2, const float* gradoutput, float* gradinput1, float* gradinput2 ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); if(withinXbounds && withinYbounds){ float fx= input2[batch_i * input2_b_stride + 0 * input2_c_stride + h_i * input2_h_stride + w_i ]; float fy = input2[batch_i * input2_b_stride + 1* input2_c_stride + h_i * input2_h_stride + w_i]; float x2 = float(w_i) + fx; float y2 = float(h_i) + fy; if(x2 >= 0.0f && y2 >= 0.0f && x2 < (float)w && y2 < (float)h){ int ix2_L = int(x2); int iy2_T = int(y2); int ix2_R = min(ix2_L+ 1, w - 1); int iy2_B = min(iy2_T + 1, h - 1); float alpha = x2 - ix2_L; float beta = y2 - iy2_T; for (int c_i = 0 ; c_i < channel; c_i++){ float gradoutput_value = gradoutput[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i]; atomicAdd( & gradinput1[off + c_i * input1_c_stride + iy2_T * input1_h_stride + ix2_L], gradoutput_value * ( 1- alpha) * (1- beta)); atomicAdd( & gradinput1[off + c_i * input1_c_stride + iy2_T * input1_h_stride + ix2_R], gradoutput_value * alpha * (1-beta)); atomicAdd( & gradinput1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_L], gradoutput_value * (1-alpha ) * beta); atomicAdd( & gradinput1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_R], gradoutput_value * alpha * beta); } float gamma = iy2_B - y2; float bot_diff = 0.0f; for(int c_i =0 ; c_i< channel; c_i ++ ){ float temp = 0; temp += gamma * (input1[off + c_i * input1_c_stride + iy2_T * input1_h_stride +ix2_R] - input1[off + c_i* input1_c_stride+ iy2_T * input1_h_stride + ix2_L]); temp += (1 - gamma) *( input1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_R] - input1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_L]); float warped_diff_value = gradoutput[off+ c_i * input1_c_stride+ h_i* input1_h_stride + w_i]; bot_diff += warped_diff_value * temp ; } //the gradients of the x direction/ horizontal direction gradinput2[batch_i * input2_b_stride + 0 * input2_c_stride + h_i * input2_h_stride + w_i] = bot_diff; gamma = ix2_R- x2; bot_diff = 0.0f; for(int c_i = 0 ; c_i < channel;c_i ++ ){ float temp = 0.0f; temp += gamma * (input1[off + c_i * input1_c_stride + iy2_B * input1_h_stride + ix2_L] - input1[off + c_i * input1_c_stride + iy2_T * input1_h_stride + ix2_L]); temp += (1-gamma) *( input1[off + c_i * input1_c_stride+ iy2_B* input1_h_stride+ix2_R] - input1[off+ c_i* input1_c_stride+ iy2_T * input1_h_stride +ix2_R]); float warped_diff_value = gradoutput[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i]; bot_diff += warped_diff_value * temp; } gradinput2[batch_i * input2_b_stride + 1 * input2_c_stride + h_i * input2_h_stride + w_i]= bot_diff; } } return ; } //#define __cplusplus //#ifdef __cplusplus // extern "C" { //#endif int InterpolationChLayer_gpu_forward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const float* input1, const float* input2, float* output ) { int error = -1; dim3 grid; dim3 block; // blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); //extract the data of CudaTensor and use kernel to calculate. InterpolationChLayer_gpu_forward_kernelfunc<<<grid,block,0, stream >>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input1,input2,output ); // THCudaCheck(cudaGetLastError()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } int InterpolationChLayer_gpu_backward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const float* input1, const float* input2, const float* gradoutput, float* gradinput1, float* gradinput2 ) { int error = -1; dim3 grid; dim3 block; //blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); InterpolationChLayer_gpu_backward_kernelfunc <<<grid,block,0, stream>>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input1, input2, gradoutput, gradinput1, gradinput2 ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpu error in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } //forward path of our layer __global__ void FilterInterpolationLayer_gpu_forward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const float* input1, const float* input2, const float* input3, float* output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); // const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { float fx = input2[batch_i * input2_b_stride + 0 * input2_c_stride + h_i * input2_h_stride + w_i ]; float fy = input2[batch_i * input2_b_stride + 1 * input2_c_stride + h_i * input2_h_stride + w_i ]; float x2 = (float)(w_i) + fx; float y2 = (float)(h_i) + fy; if(x2 >= 0.0f && y2 >=0.0f && x2 <= (float)(w -1) && y2 <= (float)(h-1) && fabs(fx) < (float)(w)/2.0f && fabs(fy) < (float)(h)/2.0f){ int ix2_L = int(x2) + 1 - (int)(filter_size / 2); int iy2_T = int(y2) + 1 - (int)(filter_size / 2); int ix2_R = ix2_L + filter_size; int iy2_B = iy2_T + filter_size; float alpha = x2 - (int)(x2); float beta = y2 - (int)(y2); //TODO: here is a bug that if the iy2_B or ix2_R gets out of the border, than there is no enough pixels to warp the target one. for (int c_i = 0 ; c_i < channel ; c_i++){ float TL = 0.0f; for(int filter_j = iy2_T; filter_j <= (int)(y2); filter_j ++){ int _filter_j = min(max(0, filter_j), h - 1); for( int filter_i = ix2_L; filter_i <= (int) ( x2) ; filter_i ++ ){ int _filter_i = min(max(0, filter_i ), w - 1); TL += input1[off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i ] * input3 [batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i] ; } } float TR = 0.0f; for (int filter_j = iy2_T; filter_j <= (int) (y2); filter_j ++ ){ int _filter_j = min(max(0, filter_j),h - 1); // only used for input1 for (int filter_i = (int) (x2) + 1 ; filter_i < ix2_R; filter_i ++ ){ int _filter_i = min(max(0, filter_i),w - 1);// only used for input1 TR += input1 [off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i] * input3 [batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i]; } } float BL = 0.0f; for (int filter_j = (int) (y2) + 1; filter_j < iy2_B; filter_j ++ ){ int _filter_j = min(max(0, filter_j),h - 1); // only used for input1 for (int filter_i = ix2_L; filter_i <= (int) (x2); filter_i ++ ){ int _filter_i = min(max(0, filter_i),w - 1);// only used for input1 BL += input1 [off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i] * input3 [batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i]; } } float BR = 0.0f; for (int filter_j = (int) (y2) + 1; filter_j < iy2_B; filter_j ++ ){ int _filter_j = min(max(0, filter_j),h - 1); // only used for input1 for (int filter_i = (int) (x2) + 1; filter_i < ix2_R; filter_i ++ ){ int _filter_i = min(max(0, filter_i),w - 1);// only used for input1 BR += input1 [off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i] * input3 [batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i]; } } output[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i ] = (1-alpha)*(1-beta)*TL + alpha*(1-beta)*TR + (1-alpha)*beta*BL + alpha*beta*BR; // for( int filter_i = ix2_L; filter_i < ix2_R ; filter_i ++ ){ // int _filter_i = min(max(0, filter_i),w - 1); // output[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i ] += // input1[off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i ] * // input3 [batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i] * //// exp( -(fabs((float) filter_j - y2) + fabs((float) filter_i - x2)) / (float)(filter_size)); // the distance weight // exp( -(fabs((float) filter_j - y2) + fabs((float) filter_i - x2)) ); // the distance weight // //// if(w_i == 141 && h_i == 316 && c_i == 0 ){ ////printf("gpu: %f, %f,%f,%f\n",input1[off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i ] , ////input3 [batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i], ////exp( -(fabs((float) filter_j - y2) + fabs((float) filter_i - x2)) / (float)(filter_size)), ////output[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i ] //// ); ////} // // } // } } } else{ //the warping data is out of range, we fill it with zeros for(int c_i = 0 ; c_i < channel; c_i ++){ output[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i] = input1[off + c_i* input1_c_stride+ h_i * input1_h_stride + w_i]; } } } return ; } __global__ void FilterInterpolationLayer_gpu_backward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const float* input1, const float* input2, const float* input3, const float* gradoutput, float* gradinput1, float* gradinput2, float* gradinput3 ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); if(withinXbounds && withinYbounds){ float fx = input2[batch_i * input2_b_stride + 0 * input2_c_stride + h_i * input2_h_stride + w_i]; float fy = input2[batch_i * input2_b_stride + 1 * input2_c_stride + h_i * input2_h_stride + w_i]; float x2 = float(w_i) + fx; float y2 = float(h_i) + fy; if(x2 >= 0.0f && y2 >= 0.0f && x2 <= (float)(w - 1) && y2 <= (float)(h -1) && fabs(fx) < (float)(w)/2.0f && fabs(fy) < (float)(h)/2.0f){ int ix2_L = int(x2) + 1 - (int) (filter_size/2); int iy2_T = int(y2) + 1 - (int) (filter_size/2); int ix2_R = ix2_L + filter_size; int iy2_B = iy2_T + filter_size; float alpha = x2 - (int)(x2); float beta = y2 - (int)(y2); /*** Step 1: calculate the gradients for input1, i.e. the input image; ***/ /*** STEP 3: calculate the gradients for input3, i.e. the filter ***/ /*** Step 1 and Step 3 are simultaneously computed ***/ for (int c_i = 0 ; c_i < channel; c_i++){ float gradoutput_value = gradoutput[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i]; float TL_grad = gradoutput_value * (1-alpha ) * (1-beta); for(int filter_j = iy2_T; filter_j <= (int) (y2) ; filter_j ++ ){ int _filter_j = min(max(0, filter_j), h - 1); for (int filter_i = ix2_L ; filter_i <= (int)(x2) ; filter_i ++){ int _filter_i = min(max(0, filter_i), w - 1); atomicAdd( &gradinput1[off +c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i ], TL_grad * input3[batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i]); atomicAdd( & gradinput3[batch_i * input3_b_stride + ((filter_j - iy2_T ) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i], TL_grad * input1[off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i]); } } float TR_grad= gradoutput_value * alpha * ( 1- beta); for (int filter_j = iy2_T; filter_j <= (int) (y2); filter_j ++ ){ int _filter_j = min(max(0, filter_j),h - 1); // only used for input1 for (int filter_i = (int) (x2) + 1 ; filter_i < ix2_R; filter_i ++ ){ int _filter_i = min(max(0, filter_i),w - 1);// only used for input1 atomicAdd( &gradinput1[off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i ], TR_grad * input3[batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i]); atomicAdd( & gradinput3[batch_i * input3_b_stride + ((filter_j - iy2_T ) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i], TR_grad * input1[off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i]); } } float BL_grad = gradoutput_value * ( 1 - alpha ) * beta; for (int filter_j = (int) (y2) + 1; filter_j < iy2_B; filter_j ++ ){ int _filter_j = min(max(0, filter_j),h - 1); // only used for input1 for (int filter_i = ix2_L; filter_i <= (int) (x2); filter_i ++ ){ int _filter_i = min(max(0, filter_i),w - 1);// only used for input1 atomicAdd( &gradinput1[off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i ], BL_grad * input3[batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i]); atomicAdd( & gradinput3[batch_i * input3_b_stride + ((filter_j - iy2_T ) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i], BL_grad * input1[off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i]); } } float BR_grad = gradoutput_value * alpha * beta; for (int filter_j = (int) (y2) + 1; filter_j < iy2_B; filter_j ++ ){ int _filter_j = min(max(0, filter_j),h - 1); // only used for input1 for (int filter_i = (int) (x2) + 1; filter_i < ix2_R; filter_i ++ ){ int _filter_i = min(max(0, filter_i),w - 1);// only used for input1 atomicAdd( &gradinput1[off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i ], BR_grad * input3[batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i]); atomicAdd( & gradinput3[batch_i * input3_b_stride + ((filter_j - iy2_T ) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i], BR_grad * input1[off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i]); } } // for ( int filter_j = iy2_T; filter_j < iy2_B ; filter_j ++ ){ // int _filter_j = min(max(0, filter_j), h - 1); // for( int filter_i = ix2_L; filter_i< ix2_R ; filter_i++){ // int _filter_i = min(max(0,filter_i), w - 1); // atomicAdd( & gradinput1[off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i], // gradoutput_value * // input3 [batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L))* input3_c_stride + h_i * input3_h_stride + w_i] * //// exp( -(fabs((float)filter_j - y2) + fabs((float)filter_i - x2))/(float)filter_size) // exp( -(fabs((float)filter_j - y2) + fabs((float)filter_i - x2))) // // ); // } // } } /*** Step 2: calculate the gradients for input2, i.e., the optical flow, STEP 2.1: for the x/horizonotal direction. ***/ float gamma = 1.0f - beta; //iy2_B - y2; float bot_diff = 0.0f; for(int c_i =0 ; c_i< channel; c_i ++ ){ float gradoutput_value = gradoutput[off + c_i * input1_c_stride + h_i * input1_h_stride + w_i]; float TL = 0.0f; for(int filter_j = iy2_T; filter_j <= (int)(y2); filter_j ++){ int _filter_j = min(max(0, filter_j), h - 1); for( int filter_i = ix2_L; filter_i <= (int) ( x2) ; filter_i ++ ){ int _filter_i = min(max(0, filter_i ), w - 1); TL += input1[off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i ] * input3 [batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i] ; } } float TR = 0.0f; for (int filter_j = iy2_T; filter_j <= (int) (y2); filter_j ++ ){ int _filter_j = min(max(0, filter_j),h - 1); // only used for input1 for (int filter_i = (int) (x2) + 1 ; filter_i < ix2_R; filter_i ++ ){ int _filter_i = min(max(0, filter_i),w - 1);// only used for input1 TR += input1 [off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i] * input3 [batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i]; } } float BL = 0.0f; for (int filter_j = (int) (y2) + 1; filter_j < iy2_B; filter_j ++ ){ int _filter_j = min(max(0, filter_j),h - 1); // only used for input1 for (int filter_i = ix2_L; filter_i <= (int) (x2); filter_i ++ ){ int _filter_i = min(max(0, filter_i),w - 1);// only used for input1 BL += input1 [off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i] * input3 [batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i]; } } float BR = 0.0f; for (int filter_j = (int) (y2) + 1; filter_j < iy2_B; filter_j ++ ){ int _filter_j = min(max(0, filter_j),h - 1); // only used for input1 for (int filter_i = (int) (x2) + 1; filter_i < ix2_R; filter_i ++ ){ int _filter_i = min(max(0, filter_i),w - 1);// only used for input1 BR += input1 [off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i] * input3 [batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i]; } } float temp = 0.0f; temp += gamma * (TR - TL); temp += (1-gamma) * (BR - BL); bot_diff += gradoutput_value * temp; // for( int filter_j = iy2_T; filter_j< iy2_B; filter_j++){ // int _filter_j = min(max(0, filter_j) , h - 1); // for( int filter_i = ix2_L; filter_i< ix2_R; filter_i ++){ // int _filter_i = min(max(0,filter_i), w-1); // // bot_diff += // gradoutput_value * // input1[off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i] * // input3 [batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L))* input3_c_stride + h_i * input3_h_stride + w_i ] * //// exp( - ( fabs((float) filter_j - y2 ) + fabs((float) filter_i - x2))/ (float)filter_size) * //// ((float) filter_i > x2 ? 1.0f : -1.0f) / (float)filter_size; // exp( - ( fabs((float) filter_j - y2 ) + fabs((float) filter_i - x2))) * // ((float) filter_i > x2 ? 1.0f : -1.0f); // } // } } //the gradients of the x direction/ horizontal direction gradinput2[batch_i * input2_b_stride + 0 * input2_c_stride + h_i * input2_h_stride + w_i] = bot_diff; /*** STEP 2.2: for the x/horizonotal direction. ***/ gamma = 1.0f - alpha; //ix2_R -x2; bot_diff = 0.0f; for(int c_i = 0 ; c_i < channel; c_i ++ ){ float gradoutput_value = gradoutput [ off + c_i * input1_c_stride + h_i * input1_h_stride +w_i]; float TL = 0.0f; for(int filter_j = iy2_T; filter_j <= (int)(y2); filter_j ++){ int _filter_j = min(max(0, filter_j), h - 1); for( int filter_i = ix2_L; filter_i <= (int) ( x2) ; filter_i ++ ){ int _filter_i = min(max(0, filter_i ), w - 1); TL += input1[off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i ] * input3 [batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i] ; } } float TR = 0.0f; for (int filter_j = iy2_T; filter_j <= (int) (y2); filter_j ++ ){ int _filter_j = min(max(0, filter_j),h - 1); // only used for input1 for (int filter_i = (int) (x2) + 1 ; filter_i < ix2_R; filter_i ++ ){ int _filter_i = min(max(0, filter_i),w - 1);// only used for input1 TR += input1 [off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i] * input3 [batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i]; } } float BL = 0.0f; for (int filter_j = (int) (y2) + 1; filter_j < iy2_B; filter_j ++ ){ int _filter_j = min(max(0, filter_j),h - 1); // only used for input1 for (int filter_i = ix2_L; filter_i <= (int) (x2); filter_i ++ ){ int _filter_i = min(max(0, filter_i),w - 1);// only used for input1 BL += input1 [off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i] * input3 [batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i]; } } float BR = 0.0f; for (int filter_j = (int) (y2) + 1; filter_j < iy2_B; filter_j ++ ){ int _filter_j = min(max(0, filter_j),h - 1); // only used for input1 for (int filter_i = (int) (x2) + 1; filter_i < ix2_R; filter_i ++ ){ int _filter_i = min(max(0, filter_i),w - 1);// only used for input1 BR += input1 [off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i] * input3 [batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i]; } } float temp = 0.0f; temp += gamma * (BL - TL); temp += (1.0f - gamma) * ( BR - TR); bot_diff += gradoutput_value * temp; // for( int filter_j = iy2_T; filter_j < iy2_B; filter_j ++ ){ // int _filter_j = min(max(0, filter_j), h - 1); // for( int filter_i = ix2_L; filter_i < ix2_R; filter_i ++){ // int _filter_i = min(max(0, filter_i), w - 1); // // bot_diff += // gradoutput_value * // input1[off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i] * // input3 [batch_i * input3_b_stride +((filter_j - iy2_T) * filter_size + ( filter_i - ix2_L)) * input3_c_stride + h_i * input3_h_stride + w_i ] * //// exp( - (fabs((float) filter_j - y2) + fabs((float) filter_i - x2))/ (float)filter_size ) * //// ((float) filter_j > y2 ? 1.0f : - 1.0f ) / (float)filter_size; // exp( - (fabs((float) filter_j - y2) + fabs((float) filter_i - x2)) ) * // ((float) filter_j > y2 ? 1.0f : - 1.0f ); // } // } } gradinput2[batch_i * input2_b_stride + 1 * input2_c_stride + h_i * input2_h_stride + w_i]= bot_diff; /*** STEP 3: calculate the gradients for input3, i.e. the filter ***/ // for(int c_i = 0 ; c_i <channel ; c_i ++ ){ // float gradoutput_value = gradoutput[ off + c_i * input1_c_stride + h_i * input1_h_stride + w_i ]; // for( int filter_j= iy2_T ; filter_j < iy2_B; filter_j ++ ){ // int _filter_j = min(max(0, filter_j), h -1 ); // for ( int filter_i = ix2_L; filter_i < ix2_R; filter_i ++ ){ // int _filter_i = min(max(0, filter_i ), w - 1); // // gradinput3 [ batch_i * input3_b_stride + ((filter_j - iy2_T) * filter_size + (filter_i - ix2_L ) ) * input3_c_stride + h_i * input3_h_stride + w_i] += // gradoutput_value * // input1[off + c_i * input1_c_stride + _filter_j * input1_h_stride + _filter_i] * //// exp( -(fabs((float) filter_j - y2 ) + fabs((float) filter_i - x2))/ (float)filter_size); // exp( -(fabs((float) filter_j - y2 ) + fabs((float) filter_i - x2))); // } // } // } } } return ; } int FilterInterpolationLayer_gpu_forward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const float* input1, const float* input2, const float* input3, float* output ) { int error = -1; dim3 grid; dim3 block; // blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); //extract the data of CudaTensor and use kernel to calculate. FilterInterpolationLayer_gpu_forward_kernelfunc<<<grid,block,0, stream >>>( nElement, //to let the nummous w,h,channel,filter_size, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, input1,input2,input3, output ); // THCudaCheck(cudaGetLastError()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } int FilterInterpolationLayer_gpu_backward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int filter_size, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const float* input1, const float* input2, const float* input3, const float* gradoutput, float* gradinput1, float* gradinput2, float* gradinput3 ) { int error = -1; dim3 grid; dim3 block; //blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); // cudaMemset((void*)gradinput1, 0, input1_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput2, 0, input2_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput3, 0, input3_b_stride * batch * sizeof(float)); FilterInterpolationLayer_gpu_backward_kernelfunc <<<grid,block,0, stream>>>( nElement, //to let the nummous w,h,channel,filter_size, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, input1, input2, input3, gradoutput, gradinput1, gradinput2, gradinput3 ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } //forward path of our layer __global__ void FlowProjection_gpu_forward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const float* input1, float* count, float* output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); // const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { float fx = input1[ off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ]; float fy = input1[ off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ]; float x2 = (float) (w_i) + fx; float y2 = (float) (h_i) + fy; if(x2>=0.0f && y2 >= 0.0f &&x2 <= (float) ( w-1) && y2 <= (float) (h -1 ) ){ int ix2_L = (int) (x2); int iy2_T = (int) (y2); int ix2_R = min(ix2_L + 1, w - 1); int iy2_B = min(iy2_T + 1, h - 1); atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L ] ,-fx); atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ],-fx); atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L ] ,-fx); atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R ],-fx); atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] , -fy); atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] , -fy); atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] , -fy); atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] , -fy); atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L], 1); atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] , 1); atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] , 1); atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] , 1); } } return ; } __global__ void FlowProjectionAveraging_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const float* input1, float* count, float* output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); // const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { float temp =count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ; if(temp > 0.0f){ output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp; output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp; } } return ; } __global__ void FlowFillhole_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const float* input1, float* count, float* output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); // const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { float temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ; if(temp <= 0.0f){ //search along the four directions,0/90/180/270, until finding at least one int left_offset = w_i; float left_temp = 0.0f; while(left_temp == 0.0f && left_offset - 1 >= 0){ left_offset = left_offset - 1; left_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + left_offset] ; } int right_offset = w_i ; float right_temp = 0.0f; while(right_temp ==0.0f && right_offset + 1 <= w - 1 ){ right_offset = right_offset + 1 ; right_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + right_offset] ; } int up_offset = h_i ; float up_temp = 0.0f; while(up_temp == 0.0f && up_offset - 1 >=0){ up_offset = up_offset - 1; up_temp = count[batch_i * count_b_stride + 0 + up_offset * count_h_stride + w_i ] ; } int down_offset = h_i; float down_temp = 0.0f; while(down_temp = 0.0f && down_offset + 1 <= h - 1 ){ down_offset = down_offset + 1; down_temp = count[batch_i * count_b_stride + 0 + down_offset * count_h_stride + w_i] ; } if(left_temp + right_temp + up_temp + down_temp <=0.0f){ //printf("Can't fill hole, find no neighbor vectors availabel\n"); return; } left_temp = (left_temp > 0.0f)?1:0; right_temp = (right_temp > 0.0f)?1:0; up_temp = (up_temp > 0.0f)?1:0; down_temp = (down_temp > 0.0f)?1:0; output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] = ( left_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + left_offset] + right_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + right_offset]+ up_temp * output[off + 0 * input1_c_stride + up_offset * input1_h_stride + w_i] + down_temp * output[off + 0 * input1_c_stride + down_offset * input1_h_stride + w_i] )/( left_temp + right_temp + up_temp + down_temp ) ; output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] =( left_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + left_offset] + right_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + right_offset]+ up_temp * output[off + 1 * input1_c_stride + up_offset * input1_h_stride + w_i] + down_temp * output[off + 1 * input1_c_stride + down_offset * input1_h_stride + w_i] )/( left_temp + right_temp + up_temp + down_temp ) ; } } return ; } __global__ void FlowProjection_gpu_backward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const float* input1, const float* count, const float* gradoutput, float* gradinput1 ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); if(withinXbounds && withinYbounds){ float fx = input1[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i] ; float fy = input1[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i] ; float x2 = (float) ( w_i ) + fx; float y2 = (float) ( h_i ) + fy; if( x2 >=0.0f && y2 >= 0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){ int ix2_L = (int)(x2); int iy2_T = (int)(y2); int ix2_R = min(ix2_L + 1, w-1); int iy2_B = min(iy2_T + 1, h-1); int iu_offset = off + 0 * input1_c_stride + h_i * input1_h_stride + w_i; gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L]/ count[batch_i * count_b_stride + 0+ iy2_T * count_h_stride + ix2_L] ; gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ]/ count[batch_i * count_b_stride +0 + iy2_T * count_h_stride + ix2_R] ; gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L]/ count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ; gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R]/ count[batch_i * count_b_stride + 0+ iy2_B * count_h_stride + ix2_R] ; int iv_offset = off + 1 * input1_c_stride + h_i * input1_h_stride + w_i; gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L]/ count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L] ; gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R]/ count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] ; gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L]/ count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ; gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R]/ count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] ; } } return ; } int FlowProjection_gpu_forward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int fillhole, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const float* input1, float* count, float* output ) { int error = -1; dim3 grid; dim3 block; // blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); // printf("I am here\n"); //extract the data of CudaTensor and use kernel to calculate. FlowProjection_gpu_forward_kernelfunc<<<grid,block,0, stream >>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, count_b_stride,count_c_stride,count_h_stride,count_w_stride, input1,count,output ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } // printf("I am there\n"); FlowProjectionAveraging_kernelfunc<<<grid,block,0,stream>>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, count_b_stride,count_c_stride,count_h_stride,count_w_stride, input1,count,output ); // printf("I am kao\n"); // THCudaCheck(cudaGetLastError()); err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } // printf("I am dd\n"); if(fillhole){ // printf("use flow fill hole\n"); FlowFillhole_kernelfunc<<<grid,block,0,stream>>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, count_b_stride,count_c_stride,count_h_stride,count_w_stride, input1,count,output ); err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); return error; } } error = 0; return error; } int FlowProjection_gpu_backward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const float* input1, const float* count, const float* gradoutput, float * gradinput1 ) { int error = -1; dim3 grid; dim3 block; //blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); FlowProjection_gpu_backward_kernelfunc <<<grid,block,0, stream>>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, count_b_stride,count_c_stride,count_h_stride,count_w_stride, input1, count, gradoutput, gradinput1 ); // printf("gpu I am there\n"); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpu error in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } // printf("gpu I am here\n"); error = 0; return error; } //forward path of our layer __global__ void DepthFlowProjection_gpu_forward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const float* input1, const float* input2, float* count, float* output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); // const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { float fx = input1[ off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ]; float fy = input1[ off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ]; float x2 = (float) (w_i) + fx; float y2 = (float) (h_i) + fy; if(x2>=0.0f && y2 >= 0.0f &&x2 <= (float) ( w-1) && y2 <= (float) (h -1 ) ){ int ix2_L = (int) (x2); int iy2_T = (int) (y2); int ix2_R = min(ix2_L + 1, w - 1); int iy2_B = min(iy2_T + 1, h - 1); float temp = input2[batch_i * input2_b_stride + 0 + h_i * input2_h_stride + w_i]; atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L ] ,- temp * fx); atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ],-temp * fx); atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L ] ,-temp * fx); atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R ],-temp * fx); atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] , -temp * fy); atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] , -temp * fy); atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] , -temp * fy); atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] , -temp * fy); atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L], temp * 1); atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] ,temp * 1); atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] , temp * 1); atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] ,temp * 1); } } return ; } __global__ void DepthFlowProjectionAveraging_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const float* input1, const float* input2, float* count, float* output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); // const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { float temp =count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ; if(temp > 0.0f){ output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp; output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp; } } return ; } __global__ void DepthFlowFillhole_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const float* input1, const float* input2, float* count, float* output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); // const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { float temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ; if(temp <= 0.0f){ //search along the four directions,0/90/180/270, until finding at least one int left_offset = w_i; float left_temp = 0.0f; while(left_temp == 0.0f && left_offset - 1 >= 0){ left_offset = left_offset - 1; left_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + left_offset] ; } int right_offset = w_i ; float right_temp = 0.0f; while(right_temp ==0.0f && right_offset + 1 <= w - 1 ){ right_offset = right_offset + 1 ; right_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + right_offset] ; } int up_offset = h_i ; float up_temp = 0.0f; while(up_temp == 0.0f && up_offset - 1 >=0){ up_offset = up_offset - 1; up_temp = count[batch_i * count_b_stride + 0 + up_offset * count_h_stride + w_i ] ; } int down_offset = h_i; float down_temp = 0.0f; while(down_temp = 0.0f && down_offset + 1 <= h - 1 ){ down_offset = down_offset + 1; down_temp = count[batch_i * count_b_stride + 0 + down_offset * count_h_stride + w_i] ; } if(left_temp + right_temp + up_temp + down_temp <=0.0f){ //printf("Can't fill hole, find no neighbor vectors availabel\n"); return; } left_temp = (left_temp > 0.0f)?1:0; right_temp = (right_temp > 0.0f)?1:0; up_temp = (up_temp > 0.0f)?1:0; down_temp = (down_temp > 0.0f)?1:0; output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] = ( left_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + left_offset] + right_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + right_offset]+ up_temp * output[off + 0 * input1_c_stride + up_offset * input1_h_stride + w_i] + down_temp * output[off + 0 * input1_c_stride + down_offset * input1_h_stride + w_i] )/( left_temp + right_temp + up_temp + down_temp ) ; output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] =( left_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + left_offset] + right_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + right_offset]+ up_temp * output[off + 1 * input1_c_stride + up_offset * input1_h_stride + w_i] + down_temp * output[off + 1 * input1_c_stride + down_offset * input1_h_stride + w_i] )/( left_temp + right_temp + up_temp + down_temp ) ; } } return ; } __global__ void DepthFlowProjection_gpu_backward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const float* input1, const float* input2, const float* count, const float* output, const float* gradoutput, float* gradinput1, float* gradinput2 ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); if(withinXbounds && withinYbounds){ float fx = input1[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i] ; float fy = input1[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i] ; float x2 = (float) ( w_i ) + fx; float y2 = (float) ( h_i ) + fy; if( x2 >=0.0f && y2 >= 0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){ int ix2_L = (int)(x2); int iy2_T = (int)(y2); int ix2_R = min(ix2_L + 1, w-1); int iy2_B = min(iy2_T + 1, h-1); float temp = input2[batch_i * input2_b_stride + 0 + h_i * input2_h_stride + w_i]; int iu_offset = off + 0 * input1_c_stride + h_i * input1_h_stride + w_i; gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] * temp / count[batch_i * count_b_stride + 0+ iy2_T * count_h_stride + ix2_L] ; gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ] * temp / count[batch_i * count_b_stride +0 + iy2_T * count_h_stride + ix2_R] ; gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] * temp / count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ; gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] * temp / count[batch_i * count_b_stride + 0+ iy2_B * count_h_stride + ix2_R] ; int iv_offset = off + 1 * input1_c_stride + h_i * input1_h_stride + w_i; gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] * temp / count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L] ; gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] * temp / count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] ; gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] * temp / count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ; gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] * temp / count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] ; int weight_offset = batch_i * input2_b_stride + 0 + h_i * input2_h_stride + w_i; gradinput2[weight_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] / count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L] * (fx - output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L ] ); gradinput2[weight_offset] += -gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] / count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] * (fx - output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ] ); gradinput2[weight_offset] += -gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] / count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] * (fx - output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L ] ); gradinput2[weight_offset] += -gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] / count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] * (fx - output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R ] ); gradinput2[weight_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] / count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L] * (fy - output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L ] ); gradinput2[weight_offset] += -gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] / count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] * (fy - output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ] ); gradinput2[weight_offset] += -gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] / count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] * (fy - output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L ] ); gradinput2[weight_offset] += -gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] / count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] * (fy - output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R ] ); } } return ; } int DepthFlowProjection_gpu_forward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int fillhole, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const float* input1, const float* input2, float* count, float* output ) { int error = -1; dim3 grid; dim3 block; // blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); // printf("I am here\n"); //extract the data of CudaTensor and use kernel to calculate. DepthFlowProjection_gpu_forward_kernelfunc<<<grid,block,0, stream >>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, count_b_stride,count_c_stride,count_h_stride,count_w_stride, input1,input2,count,output ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } // printf("I am there\n"); DepthFlowProjectionAveraging_kernelfunc<<<grid,block,0,stream>>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, count_b_stride,count_c_stride,count_h_stride,count_w_stride, input1,input2, count,output ); // printf("I am kao\n"); // THCudaCheck(cudaGetLastError()); err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } // printf("I am dd\n"); if(fillhole){ // printf("use flow fill hole\n"); DepthFlowFillhole_kernelfunc<<<grid,block,0,stream>>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, count_b_stride,count_c_stride,count_h_stride,count_w_stride, input1,input2, count,output ); err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); return error; } } error = 0; return error; } int DepthFlowProjection_gpu_backward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const float* input1, const float* input2, const float* count, const float* output, const float* gradoutput, float * gradinput1, float* gradinput2 ) { int error = -1; dim3 grid; dim3 block; //blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); DepthFlowProjection_gpu_backward_kernelfunc <<<grid,block,0, stream>>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, count_b_stride,count_c_stride,count_h_stride,count_w_stride, input1, input2, count, output, gradoutput, gradinput1, gradinput2 ); // printf("gpu I am there\n"); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpu error in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } // printf("gpu I am here\n"); error = 0; return error; } //forward path of our layer __global__ void WeightedFlowProjection_gpu_forward_kernelfunc( const int nElement, const int w, const int h, const int channel, const float threshhold, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const int weight_b_stride,const int weight_c_stride, const int weight_h_stride, const int weight_w_stride, const float* input1,const float* input2, const float* input3, float* count, float *weight, float* output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); // const float fillvalue =0.0f; // if (blockIdx.z == 1 && w_i == 32 && h_i == 32){ // printf("\nthere is a batch 1\n"); // } if( withinXbounds && withinYbounds) { // if (blockIdx.z == 1 && w_i == 32 && h_i == 32){ // printf("\nthere is a batch 1 A\n"); // } float fx = input1[ off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ]; float fy = input1[ off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ]; float x2 = (float) (w_i) + fx; float y2 = (float) (h_i) + fy; if(x2>=0.0f && y2 >= 0.0f &&x2 <= (float) ( w-1) && y2 <= (float) (h -1 ) ){ int x3 = (int)(max(min((float) (w_i ) + 2.0f * fx, (float) (w) - 1.0f), 0.0f));//for calculating the brightness constancy between input2 and input3 int y3 = (int)(max(min((float) (h_i ) + 2.0f * fy, (float) (h) - 1.0f), 0.0f)); float weight_i = 0.0f;//data1[3],data2[3]; int channel_i; for(channel_i = 0; channel_i < 3; channel_i ++){ float data1 = input2[batch_i * input2_b_stride + channel_i* input2_c_stride + h_i * input2_h_stride + w_i * input2_w_stride]; float data2 = input3[batch_i * input3_b_stride + channel_i *input3_c_stride + y3 * input3_h_stride + x3 * input3_w_stride]; weight_i += fabs(data1 - data2)/3.0f; // if (blockIdx.z == 1 && w_i == 32 && h_i == 32){ /// printf("\n%d,%d, %f,%f,%f\n" , x3,y3, data1,data2,weight_i); // } } weight_i += 1e-8f; //add a small constant for better verification //if (blockIdx.z == 1 && w_i == 32 && h_i == 32){ // printf("\nthere is a batch 1 B, weight i is %f, threshold is %f\n", weight_i, threshhold); // } if(weight_i <= threshhold){ // if (blockIdx.z == 1 && w_i == 32 && h_i == 32){ // printf("\nbatch 1 is processed\n"); // } int ix2_L = (int) (x2); int iy2_T = (int) (y2); int ix2_R = min(ix2_L + 1, w - 1); int iy2_B = min(iy2_T + 1, h - 1); atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L ],-fx); atomicAdd(&output[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ],-fx); atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L ],-fx); atomicAdd(&output[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R ],-fx); atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L] , -fy); atomicAdd(&output[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R] , -fy); atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L] , -fy); atomicAdd(&output[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R] , -fy); atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L], 1); atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] , 1); atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] , 1); atomicAdd(& count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] , 1); atomicAdd(& weight[batch_i * weight_b_stride + 0 + iy2_T * weight_h_stride + ix2_L] , weight_i); atomicAdd(& weight[batch_i * weight_b_stride + 0 + iy2_T * weight_h_stride + ix2_R] , weight_i); atomicAdd(& weight[batch_i * weight_b_stride + 0 + iy2_B * weight_h_stride + ix2_L] , weight_i); atomicAdd(& weight[batch_i * weight_b_stride + 0 + iy2_B * weight_h_stride + ix2_R] , weight_i); } } } return ; } __global__ void WeightedFlowProjectionAveraging_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const int weight_b_stride,const int weight_c_stride, const int weight_h_stride, const int weight_w_stride, const float* input1, float* count,float *weight, float* output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); // const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { float temp =count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ; if(temp > 0.0f){ output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp; output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] /= temp; weight[batch_i * weight_b_stride + 0 + h_i * weight_h_stride + w_i ] /= temp; } } return ; } __global__ void WeightedFlowFillhole_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const int weight_b_stride,const int weight_c_stride, const int weight_h_stride, const int weight_w_stride, const float* input1, float* count,float *weight, float* output ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); // const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { float temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + w_i] ; if(temp <= 0.0f){ //search along the four directions,0/90/180/270, until finding at least one int left_offset = w_i; float left_temp = 0.0f; while(left_temp == 0.0f && left_offset - 1 >= 0){ left_offset = left_offset - 1; left_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + left_offset] ; } int right_offset = w_i ; float right_temp = 0.0f; while(right_temp ==0.0f && right_offset + 1 <= w - 1 ){ right_offset = right_offset + 1 ; right_temp = count[batch_i * count_b_stride + 0 + h_i * count_h_stride + right_offset] ; } int up_offset = h_i ; float up_temp = 0.0f; while(up_temp == 0.0f && up_offset - 1 >=0){ up_offset = up_offset - 1; up_temp = count[batch_i * count_b_stride + 0 + up_offset * count_h_stride + w_i ] ; } int down_offset = h_i; float down_temp = 0.0f; while(down_temp = 0.0f && down_offset + 1 <= h - 1 ){ down_offset = down_offset + 1; down_temp = count[batch_i * count_b_stride + 0 + down_offset * count_h_stride + w_i] ; } if(left_temp + right_temp + up_temp + down_temp <=0.0f){ //printf("Can't fill hole, find no neighbor vectors availabel\n"); return; } left_temp = (left_temp > 0.0f)?1:0; right_temp = (right_temp > 0.0f)?1:0; up_temp = (up_temp > 0.0f)?1:0; down_temp = (down_temp > 0.0f)?1:0; output[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i ] = ( left_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + left_offset] + right_temp * output[off + 0 * input1_c_stride + h_i * input1_h_stride + right_offset]+ up_temp * output[off + 0 * input1_c_stride + up_offset * input1_h_stride + w_i] + down_temp * output[off + 0 * input1_c_stride + down_offset * input1_h_stride + w_i] )/( left_temp + right_temp + up_temp + down_temp ) ; output[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i ] =( left_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + left_offset] + right_temp * output[off + 1 * input1_c_stride + h_i * input1_h_stride + right_offset]+ up_temp * output[off + 1 * input1_c_stride + up_offset * input1_h_stride + w_i] + down_temp * output[off + 1 * input1_c_stride + down_offset * input1_h_stride + w_i] )/( left_temp + right_temp + up_temp + down_temp ) ; } } return ; } __global__ void WeightedFlowProjection_gpu_backward_kernelfunc( const int nElement, const int w, const int h, const int channel, const float threshhold, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const int weight_b_stride,const int weight_c_stride, const int weight_h_stride, const int weight_w_stride, const float* input1,const float* input2, const float* input3, const float* count,const float *weight, const float* gradoutput, float* gradinput1 ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int off = batch_i * input1_b_stride; // __syncthreads(); if(withinXbounds && withinYbounds){ float fx = input1[off + 0 * input1_c_stride + h_i * input1_h_stride + w_i] ; float fy = input1[off + 1 * input1_c_stride + h_i * input1_h_stride + w_i] ; float x2 = (float) ( w_i ) + fx; float y2 = (float) ( h_i ) + fy; if( x2 >=0.0f && y2 >= 0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){ int x3 = (int)(max(min((float) (w_i ) + 2.0f * fx, (float) (w) - 1.0f), 0.0f));//for calculating the brightness constancy between input2 and input3 int y3 = (int)(max(min((float) (h_i ) + 2.0f * fy, (float) (h) - 1.0f), 0.0f)); float weight_i = 0.0f;//data1[3],data2[3]; int channel_i; for(channel_i = 0; channel_i < 3; channel_i ++){ float data1 = input2[batch_i * input2_b_stride + channel_i* input2_c_stride + h_i * input2_h_stride + w_i * input2_w_stride]; float data2 = input3[batch_i * input3_b_stride + channel_i *input3_c_stride + y3 * input3_h_stride + x3 * input3_w_stride]; weight_i += fabs(data1 - data2)/3.0f; } weight_i += 1e-8f; //add a small constant for better verification if(weight_i <= threshhold){ int ix2_L = (int)(x2); int iy2_T = (int)(y2); int ix2_R = min(ix2_L + 1, w-1); int iy2_B = min(iy2_T + 1, h-1); int iu_offset = off + 0 * input1_c_stride + h_i * input1_h_stride + w_i; gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_L]/ count[batch_i * count_b_stride + 0+ iy2_T * count_h_stride + ix2_L] ; gradinput1[iu_offset] += - gradoutput[off + 0 * input1_c_stride + iy2_T * input1_h_stride + ix2_R ]/ count[batch_i * count_b_stride +0 + iy2_T * count_h_stride + ix2_R] ; gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_L]/ count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ; gradinput1[iu_offset ] += - gradoutput[off + 0 * input1_c_stride + iy2_B * input1_h_stride + ix2_R]/ count[batch_i * count_b_stride + 0+ iy2_B * count_h_stride + ix2_R] ; int iv_offset = off + 1 * input1_c_stride + h_i * input1_h_stride + w_i; gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_L]/ count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_L] ; gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_T * input1_h_stride + ix2_R]/ count[batch_i * count_b_stride + 0 + iy2_T * count_h_stride + ix2_R] ; gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_L]/ count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_L] ; gradinput1[iv_offset] += - gradoutput[off + 1 * input1_c_stride + iy2_B * input1_h_stride + ix2_R]/ count[batch_i * count_b_stride + 0 + iy2_B * count_h_stride + ix2_R] ; } } } return ; } int WeightedFlowProjection_gpu_forward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int fillhole, const float threshold, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const int weight_b_stride,const int weight_c_stride, const int weight_h_stride, const int weight_w_stride, const float* input1, const float* input2, const float* input3, float* count, float *weight, float* output ) { int error = -1; dim3 grid; dim3 block; // blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); // printf("I am here, grid size %d, %d, %d\n", grid.x, grid.y, grid.z); //printf("\ninput2 stride %d,%d,%d,%d", input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride); //printf("\ninput3 stride %d,%d,%d,%d", input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride); // printf("\ncount stride %d,%d,%d,%d", count_b_stride,count_c_stride,count_h_stride,count_w_stride); // printf("\nweight stride %d,%d,%d,%d", weight_b_stride,weight_c_stride,weight_h_stride,weight_w_stride); //extract the data of CudaTensor and use kernel to calculate. WeightedFlowProjection_gpu_forward_kernelfunc<<<grid,block,0, stream >>>( nElement, //to let the nummous w,h,channel, threshold, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, count_b_stride,count_c_stride,count_h_stride,count_w_stride, weight_b_stride,weight_c_stride,weight_h_stride,weight_w_stride, input1, input2, input3, count, weight, output ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } // printf("I am there\n"); WeightedFlowProjectionAveraging_kernelfunc<<<grid,block,0,stream>>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, count_b_stride,count_c_stride,count_h_stride,count_w_stride, weight_b_stride,weight_c_stride,weight_h_stride,weight_w_stride, input1,count, weight, output ); // printf("I am kao\n"); // THCudaCheck(cudaGetLastError()); err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } // printf("I am dd\n"); if(fillhole){ // printf("use flow fill hole\n"); WeightedFlowFillhole_kernelfunc<<<grid,block,0,stream>>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, count_b_stride,count_c_stride,count_h_stride,count_w_stride, weight_b_stride,weight_c_stride,weight_h_stride,weight_w_stride, input1,count,weight, output ); err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); return error; } } error = 0; return error; } int WeightedFlowProjection_gpu_backward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const float threshhold, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int count_b_stride, const int count_c_stride, const int count_h_stride, const int count_w_stride, const int weight_b_stride,const int weight_c_stride, const int weight_h_stride, const int weight_w_stride, const float* input1,const float* input2, const float* input3, const float* count, const float * weight, const float* gradoutput, float * gradinput1 ) { int error = -1; dim3 grid; dim3 block; //blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); WeightedFlowProjection_gpu_backward_kernelfunc <<<grid,block,0, stream>>>( nElement, //to let the nummous w,h,channel, threshhold, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, count_b_stride,count_c_stride,count_h_stride,count_w_stride, weight_b_stride,weight_c_stride,weight_h_stride,weight_w_stride, input1, input2, input3, count, weight, gradoutput, gradinput1 ); // printf("gpu I am there\n"); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpu error in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } // printf("gpu I am here\n"); error = 0; return error; } //forward path of our layer __global__ void WeightLayer_gpu_forward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, // const int flow1_grad_b_stride,const int flow1_grad_c_stride,const int flow1_grad_h_stride,const int flow1_grad_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input1, const float* input2, const float* input3, //const float * flow1_grad, float* output, float lambda_e, float lambda_v, float Nw ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int offset = batch_i * input1_b_stride; // __syncthreads(); // const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { //read the opticalflow float fx = input3[batch_i * input3_b_stride + 0 * input3_c_stride + h_i * input3_h_stride + w_i]; float fy = input3[batch_i * input3_b_stride + 1 * input3_c_stride + h_i * input3_h_stride + w_i]; //get the destination position float x2 = (float)(w_i) + fx; float y2 = (float)(h_i) + fy; //Guarrantee that the center position is in-border. if(x2 >= 0.0f && y2 >=0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){ int ix2_L = (int)(x2); int iy2_T = (int)(y2); int ix2_R = min(ix2_L+1, w - 1); int iy2_B = min(iy2_T+1, h - 1); float alpha = x2 - (int)(x2); float beta = y2 - (int)(y2); int m; int n; float err_sum = 0.0f; // float sv_sum = 0.0f ; // Nw must be 3, so that -1,0,1 is the range for(m = -1; m <= 1; m ++){ int patch1_m = min(max(0, m + h_i), h-1); for(n = -1; n <= 1; n ++){ int patch1_n = min(max(0, n + w_i), w-1); int patch2_mT = min(max(0, m + iy2_T), h-1); int patch2_nL = min(max(0, n + ix2_L), w-1); int patch2_mB = min(max(0, m + iy2_B), h-1); int patch2_nR = min(max(0, n + ix2_R), w-1); for ( int c_i = 0; c_i < channel; c_i ++){ float taget_data = (1-alpha)*(1-beta)*input2[offset + c_i * input2_c_stride + patch2_mT * input2_h_stride + patch2_nL] + alpha*(1-beta)*input2[offset + c_i * input2_c_stride + patch2_mT * input2_h_stride + patch2_nR] + (1-alpha)*beta*input2[offset + c_i * input2_c_stride + patch2_mB * input2_h_stride + patch2_nL] + alpha*beta*input2[offset + c_i * input2_c_stride + patch2_mB * input2_h_stride + patch2_nR]; err_sum += fabsf(input1[offset + c_i * input1_c_stride + patch1_m * input1_h_stride + patch1_n] - taget_data); } //sv_sum += flow1_grad[batch_i * flow1_grad_b_stride + 0 + patch1_m * flow1_grad_h_stride + patch1_n]; } } err_sum /= (channel * Nw * Nw); //sv_sum /= (Nw * Nw); output[batch_i * output_b_stride + 0 + h_i * output_h_stride + w_i * output_w_stride] = (1-err_sum/lambda_e)*(1-err_sum/lambda_e); //= expf( - err_sum/lambda_e - sv_sum/lambda_v); } else { output[batch_i * output_b_stride + 0 + h_i * output_h_stride + w_i * output_w_stride] = 1e-4f; //this dosen't mean that } } return ; } int WeightLayer_gpu_forward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, //const int flow1_grad_b_stride,const int flow1_grad_c_stride,const int flow1_grad_h_stride,const int flow1_grad_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input1, const float* input2, const float* input3, //const float * flow1_grad, float* output, float lambda_e, float lambda_v, float Nw ) { int error = -1; dim3 grid; dim3 block; // blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); //extract the data of CudaTensor and use kernel to calculate. WeightLayer_gpu_forward_kernelfunc<<<grid,block,0, stream >>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, //flow1_grad_b_stride,flow1_grad_c_stride,flow1_grad_h_stride,flow1_grad_w_stride, output_b_stride, output_c_stride, output_h_stride, output_w_stride, input1,input2,input3, //flow1_grad, output, lambda_e, lambda_v, Nw ); // THCudaCheck(cudaGetLastError()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } __global__ void WeightLayer_gpu_backward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, //const int flow1_grad_b_stride,const int flow1_grad_c_stride,const int flow1_grad_h_stride,const int flow1_grad_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input1, const float* input2, const float* input3, //const float * flow1_grad, const float* output, const float* gradoutput, float* gradinput1, float* gradinput2, float* gradinput3, //float* gradflow1_grad, float lambda_e, float lambda_v, float Nw ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int offset = batch_i * input1_b_stride; // __syncthreads(); if(withinXbounds && withinYbounds) { //read the opticalflow float fx = input3[batch_i * input3_b_stride + 0 * input3_c_stride + h_i * input3_h_stride + w_i]; float fy = input3[batch_i * input3_b_stride + 1 * input3_c_stride + h_i * input3_h_stride + w_i]; //get the destination position float x2 = (float)(w_i) + fx; float y2 = (float)(h_i) + fy; //Guarrantee that the center position is in-border. if(x2 >= 0.0f && y2 >=0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){ int ix2_L = (int)(x2); int iy2_T = (int)(y2); int ix2_R = min(ix2_L+1, w - 1); int iy2_B = min(iy2_T+1, h - 1); float alpha = x2 - (int)(x2); float beta = y2 - (int)(y2); float gradoutput_data_value = gradoutput[batch_i * output_b_stride + 0 + h_i * output_h_stride + w_i * output_w_stride]; float grad_err_sum = - gradoutput_data_value / (lambda_e * channel * Nw *Nw) * 2 *sqrtf(output [batch_i * output_b_stride + 0 + h_i * output_h_stride + w_i * output_w_stride]) ; // //float grad_err_sum = - gradoutput_data_value * // output [batch_i * output_b_stride + 0 + h_i * output_h_stride + w_i * output_w_stride] / // (lambda_e * channel * Nw *Nw) ; //float grad_sv_sum = - gradoutput_data_value * // output[batch_i * output_b_stride + 0 + h_i * output_h_stride + w_i * output_w_stride] / // (lambda_v * Nw * Nw) ; int m; int n; // Nw must be 3, so that -1,0,1 is the range for(m = -1; m <= 1; m ++){ int patch1_m = min(max(0, m + h_i), h-1); for(n = -1; n <= 1; n ++){ int patch1_n = min(max(0, n + w_i), w-1); int patch2_mT = min(max(0, m + iy2_T), h-1); int patch2_nL = min(max(0, n + ix2_L), w-1); int patch2_mB = min(max(0, m + iy2_B), h-1); int patch2_nR = min(max(0, n + ix2_R), w-1); for (int c_i = 0; c_i < channel; c_i ++){ float taget_data = (1-alpha)*(1-beta)*input2[offset + c_i * input2_c_stride + patch2_mT * input2_h_stride + patch2_nL] + alpha*(1-beta)*input2[offset + c_i * input2_c_stride + patch2_mT * input2_h_stride + patch2_nR] + (1-alpha)*beta*input2[offset + c_i * input2_c_stride + patch2_mB * input2_h_stride + patch2_nL] + alpha*beta*input2[offset + c_i * input2_c_stride + patch2_mB * input2_h_stride + patch2_nR] ; float i_data = input1[offset + c_i * input1_c_stride + patch1_m * input1_h_stride + patch1_n] ; //input1 gradients atomicAdd(& gradinput1[offset + c_i * input1_c_stride + patch1_m * input1_h_stride + patch1_n] , ( i_data > taget_data) ? grad_err_sum : - grad_err_sum); //input2 gradients atomicAdd(& gradinput2[offset + c_i * input2_c_stride + patch2_mT * input2_h_stride + patch2_nL] , (1-alpha)*(1-beta)*(( i_data> taget_data) ? - grad_err_sum : grad_err_sum) ); atomicAdd(& gradinput2[offset + c_i * input2_c_stride + patch2_mT * input2_h_stride + patch2_nR] , alpha*(1-beta)*( ( i_data> taget_data) ? - grad_err_sum : grad_err_sum)); atomicAdd(& gradinput2[offset + c_i * input2_c_stride + patch2_mB * input2_h_stride + patch2_nL] , (1-alpha)*beta*( ( i_data > taget_data) ? - grad_err_sum : grad_err_sum)); atomicAdd(& gradinput2[offset + c_i * input2_c_stride + patch2_mB * input2_h_stride + patch2_nR] , alpha*beta*( ( i_data > taget_data) ? - grad_err_sum : grad_err_sum)); //input3 gradients float gamma = 1.0f - beta; //iy2_B - y2; float temp = 0.0f; temp += gamma * (input2[offset + c_i * input2_c_stride + patch2_mT * input2_h_stride + patch2_nR]- input2[offset + c_i * input2_c_stride + patch2_mT * input2_h_stride + patch2_nL]); temp += (1-gamma) *( input2[offset + c_i * input2_c_stride + patch2_mB * input2_h_stride + patch2_nR] - input2[offset + c_i * input2_c_stride + patch2_mB * input2_h_stride + patch2_nL]); temp = temp * ( ( i_data > taget_data) ? - grad_err_sum : grad_err_sum); atomicAdd(& gradinput3[batch_i * input3_b_stride + 0 * input3_c_stride + h_i * input3_h_stride + w_i] , temp); gamma = 1.0f - alpha; //ix2_R -x2; temp = 0.0f; temp += gamma * ( input2[offset + c_i * input2_c_stride + patch2_mB * input2_h_stride + patch2_nL] - input2[offset + c_i * input2_c_stride + patch2_mT * input2_h_stride + patch2_nL]); temp += gamma *(input2[offset + c_i * input2_c_stride + patch2_mB * input2_h_stride + patch2_nR] - input2[offset + c_i * input2_c_stride + patch2_mT * input2_h_stride + patch2_nR] ); temp = temp * ( ( i_data > taget_data) ? - grad_err_sum : grad_err_sum); atomicAdd(& gradinput3[batch_i * input3_b_stride + 1 * input3_c_stride + h_i * input3_h_stride + w_i] , temp); } //flow1_grad's gradients //sv_sum += flow1_grad[batch_i * flow1_grad_b_stride + 0 + patch1_m * flow1_grad_h_stride + patch1_n]; //atomicAdd(& gradflow1_grad[ batch_i * flow1_grad_b_stride + 0 + patch1_m * flow1_grad_h_stride + patch1_n] , // grad_sv_sum); } } } } return ; } int WeightLayer_gpu_backward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, // const int flow1_grad_b_stride,const int flow1_grad_c_stride,const int flow1_grad_h_stride,const int flow1_grad_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input1, const float* input2, const float* input3, //const float * flow1_grad, const float* output, const float* gradoutput, float* gradinput1, float* gradinput2, float* gradinput3, //float* gradflow1_grad, float lambda_e, float lambda_v, float Nw ) { int error = -1; dim3 grid; dim3 block; //blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); // cudaMemset((void*)gradinput1, 0, input1_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput2, 0, input2_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput3, 0, input3_b_stride * batch * sizeof(float)); WeightLayer_gpu_backward_kernelfunc <<<grid,block,0, stream>>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, // flow1_grad_b_stride,flow1_grad_c_stride,flow1_grad_h_stride,flow1_grad_w_stride, output_b_stride, output_c_stride, output_h_stride, output_w_stride, input1, input2, input3, //flow1_grad, output, gradoutput, gradinput1, gradinput2, gradinput3, //gradflow1_grad, lambda_e, lambda_v, Nw ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } //forward path of our layer __global__ void PixelValueLayer_gpu_forward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, //const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int flow_weights_b_stride,const int flow_weights_c_stride,const int flow_weights_h_stride,const int flow_weights_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input1, const float* input3, const float * flow_weights, float* output, float sigma_d, float tao_r , float Prowindow ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int offset = batch_i * input1_b_stride; if( withinXbounds && withinYbounds) { //read the opticalflow float fx = input3[batch_i * input3_b_stride + 0 * input3_c_stride + h_i * input3_h_stride + w_i]; float fy = input3[batch_i * input3_b_stride + 1 * input3_c_stride + h_i * input3_h_stride + w_i]; //get the destination position float x2 = (float)(w_i) + fx/2.0f; //the intermediate position float y2 = (float)(h_i) + fy/2.0f; //Guarrantee that the center position is in-border. if(x2 >= 0.0f && y2 >=0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){ int ix2_L = (int)(x2); int iy2_T = (int)(y2); float alpha = x2 - (int)(x2); float beta = y2 - (int)(y2); int m; int n; // we interpolate 4 pixels, should we change the sigma_d ? for( m = -1; m <= 2; m ++){ for(n = -1; n <= 2; n ++){ int patch2_m = min(max(0, m + iy2_T), h-1); int patch2_n = min(max(0, n + ix2_L), w-1); // float g_d = expf( - ((beta - m) * (beta - m) + (alpha - n) *( alpha - n ))/(2 * sigma_d * sigma_d)); float g_d = (1.0f - ((beta - m) * (beta - m) + (alpha - n) *( alpha - n ))/(2.0f * sigma_d * sigma_d)); g_d = g_d * g_d; float f_w = flow_weights[batch_i * flow_weights_b_stride + 0 + h_i * flow_weights_h_stride + w_i] ; for( int c_i = 0 ; c_i < channel; c_i ++){ atomicAdd(& output[offset + c_i * output_c_stride + patch2_m * output_h_stride + patch2_n] , f_w * g_d * input1[offset + c_i * input1_c_stride + h_i * input1_h_stride + w_i]); } } } } } return ; } int PixelValueLayer_gpu_forward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, //const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int flow_weights_b_stride,const int flow_weights_c_stride,const int flow_weights_h_stride,const int flow_weights_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input1, const float* input3, const float * flow_weights, float* output, float sigma_d, float tao_r , float Prowindow ) { int error = -1; dim3 grid; dim3 block; // blockthread = 128; // the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z // the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); //extract the data of CudaTensor and use kernel to calculate. PixelValueLayer_gpu_forward_kernelfunc<<<grid,block,0, stream >>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, //input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, flow_weights_b_stride,flow_weights_c_stride,flow_weights_h_stride,flow_weights_w_stride, output_b_stride,output_c_stride,output_h_stride,output_w_stride, input1, input3, flow_weights, output, sigma_d, tao_r , Prowindow ); // THCudaCheck(cudaGetLastError()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } __global__ void PixelValueLayer_gpu_backward_kernelfunc( const int nElement, const int w, const int h, const int channel, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, //const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int flow_weights_b_stride,const int flow_weights_c_stride,const int flow_weights_h_stride,const int flow_weights_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input1, const float* input3, const float * flow_weights, const float* gradoutput, float * gradinput1, float * gradinput3, float* gradflow_weights, float sigma_d, float tao_r , float Prowindow ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; const int offset = batch_i * input1_b_stride; // __syncthreads(); if(withinXbounds && withinYbounds) { //read the opticalflow float fx = input3[batch_i * input3_b_stride + 0 * input3_c_stride + h_i * input3_h_stride + w_i]; float fy = input3[batch_i * input3_b_stride + 1 * input3_c_stride + h_i * input3_h_stride + w_i]; //get the destination position float x2 = (float)(w_i) + fx/2.0f; //the intermediate position float y2 = (float)(h_i) + fy/2.0f; //Guarrantee that the center position is in-border. if(x2 >= 0.0f && y2 >=0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){ int ix2_L = (int)(x2); int iy2_T = (int)(y2); float alpha = x2 - (int)(x2); float beta = y2 - (int)(y2); int m; int n; // we interpolate 4 pixels, should we change the sigma_d ? for( m = -1; m <= 2; m ++){ for(n = -1; n <= 2; n ++){ int patch2_m = min(max(0, m + iy2_T), h-1); int patch2_n = min(max(0, n + ix2_L), w-1); // float g_d = expf( - ((beta - m) * (beta - m) + (alpha - n) *( alpha - n ))/(2.0f * sigma_d * sigma_d)); float g_d = (1.0f - ((beta - m) * (beta - m) + (alpha - n) *( alpha - n ))/(2.0f * sigma_d * sigma_d)); // g_d = g_d * g_d; float f_w = flow_weights[batch_i * flow_weights_b_stride + 0 + h_i * flow_weights_h_stride + w_i] ; for(int c_i = 0 ; c_i < channel; c_i ++){ float gradoutput_data_value = gradoutput[offset + c_i * input1_c_stride + patch2_m * input1_h_stride + patch2_n]; //input1 gradients atomicAdd(& gradinput1[offset + c_i * input1_c_stride + h_i * input1_h_stride + w_i], gradoutput_data_value * f_w * g_d*g_d); // flow_weights_data gradients atomicAdd(& gradflow_weights[batch_i * flow_weights_b_stride + 0+ h_i * flow_weights_h_stride + w_i], gradoutput_data_value * g_d * g_d * input1[offset + c_i * input1_c_stride + h_i * input1_h_stride + w_i]); //flow gradients atomicAdd(& gradinput3[batch_i * input3_b_stride + 0 * input3_c_stride + h_i * input3_h_stride + w_i] , - gradoutput_data_value * f_w * input1[offset + c_i * input1_c_stride + h_i * input1_h_stride + w_i] * g_d * (n - alpha) / ( sigma_d * sigma_d) * 2.0f); atomicAdd(& gradinput3[batch_i * input3_b_stride + 1 * input3_c_stride + h_i * input3_h_stride + w_i] , - gradoutput_data_value * f_w * input1[offset + c_i * input1_c_stride + h_i * input1_h_stride + w_i] * g_d * (m - beta) / ( sigma_d * sigma_d) * 2.0f); } } } } } return ; } int PixelValueLayer_gpu_backward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int channel, const int batch, const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, //const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int flow_weights_b_stride,const int flow_weights_c_stride,const int flow_weights_h_stride,const int flow_weights_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input1, const float* input3, const float * flow_weights, const float* gradoutput, float * gradinput1, float * gradinput3, float* gradflow_weights, float sigma_d, float tao_r , float Prowindow ) { int error = -1; dim3 grid; dim3 block; //blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); // cudaMemset((void*)gradinput1, 0, input1_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput2, 0, input2_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput3, 0, input3_b_stride * batch * sizeof(float)); PixelValueLayer_gpu_backward_kernelfunc <<<grid,block,0, stream>>>( nElement, //to let the nummous w,h,channel, input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, //input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, flow_weights_b_stride,flow_weights_c_stride,flow_weights_h_stride,flow_weights_w_stride, output_b_stride,output_c_stride,output_h_stride,output_w_stride, input1, input3, flow_weights, gradoutput, gradinput1, gradinput3, gradflow_weights, sigma_d, tao_r , Prowindow ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } //forward path of our layer __global__ void PixelWeightLayer_gpu_forward_kernelfunc( const int nElement, const int w, const int h, //const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, //const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int flow_weights_b_stride,const int flow_weights_c_stride,const int flow_weights_h_stride,const int flow_weights_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input3, const float * flow_weights, float * output, float sigma_d, float tao_r , float Prowindow ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; if( withinXbounds && withinYbounds) { //read the opticalflow float fx = input3[batch_i * input3_b_stride + 0 * input3_c_stride + h_i * input3_h_stride + w_i]; float fy = input3[batch_i * input3_b_stride + 1 * input3_c_stride + h_i * input3_h_stride + w_i]; //get the destination position float x2 = (float)(w_i) + fx/2.0f; //the intermediate position float y2 = (float)(h_i) + fy/2.0f; //Guarrantee that the center position is in-border. if(x2 >= 0.0f && y2 >=0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){ int ix2_L = (int)(x2); int iy2_T = (int)(y2); float alpha = x2 - (int)(x2); float beta = y2 - (int)(y2); int m; int n; // we interpolate 4 pixels, should we change the sigma_d ? for( m = -1; m <= 2; m ++){ for(n = -1; n <= 2; n ++){ int patch2_m = min(max(0, m + iy2_T), h-1); int patch2_n = min(max(0, n + ix2_L), w-1); // float g_d = expf( - ((beta - m) * (beta - m) + (alpha - n) *( alpha - n ))/(2.0f * sigma_d * sigma_d)); float g_d = (1.0f - ((beta - m) * (beta - m) + (alpha - n) *( alpha - n ))/(2.0f * sigma_d * sigma_d)); g_d = g_d * g_d; float f_w = flow_weights[batch_i * flow_weights_b_stride + 0 + h_i * flow_weights_h_stride + w_i] ; atomicAdd(& output[batch_i * output_b_stride + 0 + patch2_m * output_h_stride + patch2_n] , f_w * g_d); } } } } return ; } int PixelWeightLayer_gpu_forward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int batch, //const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, //const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int flow_weights_b_stride,const int flow_weights_c_stride,const int flow_weights_h_stride,const int flow_weights_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input3, const float * flow_weights, float* output, float sigma_d, float tao_r , float Prowindow ) { int error = -1; dim3 grid; dim3 block; // blockthread = 128; // the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z // the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); //extract the data of CudaTensor and use kernel to calculate. PixelWeightLayer_gpu_forward_kernelfunc<<<grid,block,0, stream >>>( nElement, //to let the nummous w,h, //input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, //input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, flow_weights_b_stride,flow_weights_c_stride,flow_weights_h_stride,flow_weights_w_stride, output_b_stride,output_c_stride,output_h_stride,output_w_stride, input3, flow_weights, output, sigma_d, tao_r, Prowindow ); // THCudaCheck(cudaGetLastError()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } __global__ void PixelWeightLayer_gpu_backward_kernelfunc( const int nElement, const int w, const int h, //const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, //const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int flow_weights_b_stride,const int flow_weights_c_stride,const int flow_weights_h_stride,const int flow_weights_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input3, const float * flow_weights, const float* output, const float* gradoutput, float * gradinput3, float* gradflow_weights, float threshhold, float sigma_d, float tao_r , float Prowindow ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; if(withinXbounds && withinYbounds) { //read the opticalflow float fx = input3[batch_i * input3_b_stride + 0 * input3_c_stride + h_i * input3_h_stride + w_i]; float fy = input3[batch_i * input3_b_stride + 1 * input3_c_stride + h_i * input3_h_stride + w_i]; //get the destination position float x2 = (float)(w_i) + fx/2.0f; //the intermediate position float y2 = (float)(h_i) + fy/2.0f; //Guarrantee that the center position is in-border. if(x2 >= 0.0f && y2 >=0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){ int ix2_L = (int)(x2); int iy2_T = (int)(y2); float alpha = x2 - (int)(x2); float beta = y2 - (int)(y2); int m; int n; // we interpolate 4 pixels, should we change the sigma_d ? for( m = -1; m <= 2; m ++){ for(n = -1; n <= 2; n ++){ int patch2_m = min(max(0, m + iy2_T), h-1); int patch2_n = min(max(0, n + ix2_L), w-1); // float g_d = expf( - ((beta - m) * (beta - m) + (alpha - n) *( alpha - n ))/(2.0f * sigma_d * sigma_d)); float g_d = (1.0f - ((beta - m) * (beta - m) + (alpha - n) *( alpha - n ))/(2.0f * sigma_d * sigma_d)); // g_d = g_d * g_d; float f_w = flow_weights[batch_i * flow_weights_b_stride + 0+ h_i * flow_weights_h_stride + w_i] ; float gradoutput_data_value = gradoutput[batch_i * output_b_stride + 0 + patch2_m * output_h_stride + patch2_n]; // if(output[batch_i * output_b_stride + 0 + patch2_m * output_h_stride + patch2_n] < 0) // printf("Error g_d ==> %f \n",output[batch_i * output_b_stride + 0 + patch2_m * output_h_stride + patch2_n] ); if(output[batch_i * output_b_stride + 0 + patch2_m * output_h_stride + patch2_n] < threshhold) { //printf("pixelweigths gpu backward, under threshhold ==> %f\n", // output[batch_i * output_b_stride + 0 + patch2_m * output_h_stride + patch2_n]); continue;//to skip its gradients } //flow1_weights gradients atomicAdd(&gradflow_weights[batch_i * flow_weights_b_stride + 0+ h_i * flow_weights_h_stride + w_i], gradoutput_data_value * g_d * g_d); //flow gradients atomicAdd(& gradinput3[batch_i * input3_b_stride + 0 * input3_c_stride + h_i * input3_h_stride + w_i], - gradoutput_data_value * f_w * g_d * (n - alpha) / ( sigma_d * sigma_d) * 2.0f); atomicAdd(& gradinput3[batch_i * input3_b_stride + 1 * input3_c_stride + h_i * input3_h_stride + w_i] , - gradoutput_data_value * f_w * g_d *(m - beta) / ( sigma_d * sigma_d) * 2.0f); } } } } return ; } int PixelWeightLayer_gpu_backward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int batch, //const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, //const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, const int flow_weights_b_stride,const int flow_weights_c_stride,const int flow_weights_h_stride,const int flow_weights_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input3, const float * flow_weights, const float* output, const float* gradoutput, float* gradinput3, float* gradflow_weights, float threshhold, float sigma_d, float tao_r , float Prowindow ) { int error = -1; dim3 grid; dim3 block; //blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); // cudaMemset((void*)gradinput1, 0, input1_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput2, 0, input2_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput3, 0, input3_b_stride * batch * sizeof(float)); PixelWeightLayer_gpu_backward_kernelfunc <<<grid,block,0, stream>>>( nElement, //to let the nummous w,h, //input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, //input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, flow_weights_b_stride,flow_weights_c_stride,flow_weights_h_stride,flow_weights_w_stride, output_b_stride,output_c_stride,output_h_stride,output_w_stride, input3, flow_weights, output, gradoutput, gradinput3, gradflow_weights, threshhold, sigma_d, tao_r , Prowindow ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } //forward path of our layer __global__ void ReliableWeightLayer_gpu_forward_kernelfunc( const int nElement, const int w, const int h, //const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, //const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, //const int flow_weights_b_stride,const int flow_weights_c_stride,const int flow_weights_h_stride,const int flow_weights_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input3, float * output, float sigma_d, float tao_r , float Prowindow ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used //only use one dimensioon of the grid and block const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; //const int off = batch_i * input1_b_stride; // __syncthreads(); // const float fillvalue =0.0f; if( withinXbounds && withinYbounds) { //read the opticalflow float fx = input3[batch_i * input3_b_stride + 0 * input3_c_stride + h_i * input3_h_stride + w_i]; float fy = input3[batch_i * input3_b_stride + 1 * input3_c_stride + h_i * input3_h_stride + w_i]; //get the destination position float x2 = (float)(w_i) + fx/2.0f; //the intermediate position float y2 = (float)(h_i) + fy/2.0f; //G uarrantee that the center position is in-border. if(x2 >= 0.0f && y2 >=0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){ int ix2_L = (int)(x2); int iy2_T = (int)(y2); float alpha = x2 - (int)(x2); float beta = y2 - (int)(y2); int m; int n; // we interpolate 4 pixels, should we change the sigma_d ? for( m = -1; m <= 2; m ++){ for(n = -1; n <= 2; n ++){ int patch2_m = min(max(0, m + iy2_T), h-1); int patch2_n = min(max(0, n + ix2_L), w-1); // float g_d = expf( - ((beta - m) * (beta - m) + (alpha - n) *( alpha - n ))/(2.0f * sigma_d * sigma_d)); float g_d = (1.0f - ((beta - m) * (beta - m) + (alpha - n) *( alpha - n ))/(2.0f * sigma_d * sigma_d)); g_d = g_d * g_d; atomicAdd(&output[batch_i * output_b_stride + 0 + patch2_m * output_h_stride + patch2_n], g_d); } } }else{ ; } } return ; } int ReliableWeightLayer_gpu_forward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int batch, //const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, //const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, //const int flow_weights_b_stride,const int flow_weights_c_stride,const int flow_weights_h_stride,const int flow_weights_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input3, float* output, float sigma_d, float tao_r , float Prowindow ) { int error = -1; dim3 grid; dim3 block; // blockthread = 128; // the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z // the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); //extract the data of CudaTensor and use kernel to calculate. ReliableWeightLayer_gpu_forward_kernelfunc<<<grid,block,0, stream >>>( nElement, //to let the nummous w,h, //input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, //input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, //flow_weights_b_stride,flow_weights_c_stride,flow_weights_h_stride,flow_weights_w_stride, output_b_stride,output_c_stride,output_h_stride,output_w_stride, input3, output, sigma_d, tao_r, Prowindow ); // THCudaCheck(cudaGetLastError()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } __global__ void ReliableWeightLayer_gpu_backward_kernelfunc( const int nElement, const int w, const int h, //const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, //const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, //const int flow_weights_b_stride,const int flow_weights_c_stride,const int flow_weights_h_stride,const int flow_weights_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input3,const float* output, const float* gradoutput, float * gradinput3, float threshhold, float sigma_d, float tao_r , float Prowindow ) { //blockIdx.z : batch index from 0~B-1 //blockIdx.y : height patch index from ceil(h/16) //blockIdx.x : width patch index from ceil(w/32) //threadidx.x: width index 0~31 //threadIdx.y: height index 0~15 //threadIdx.z: Not used const int w_i = blockIdx.x * blockDim.x + threadIdx.x; const int h_i = blockIdx.y * blockDim.y + threadIdx.y; const bool withinXbounds = w_i < w; const bool withinYbounds = h_i < h; const int batch_i = blockIdx.z; //const int off = batch_i * input1_b_stride; // __syncthreads(); if(withinXbounds && withinYbounds) { //read the opticalflow float fx = input3[batch_i * input3_b_stride + 0 * input3_c_stride + h_i * input3_h_stride + w_i]; float fy = input3[batch_i * input3_b_stride + 1 * input3_c_stride + h_i * input3_h_stride + w_i]; //get the destination position float x2 = (float)(w_i) + fx/2.0f; //the intermediate position float y2 = (float)(h_i) + fy/2.0f; //Guarrantee that the center position is in-border. if(x2 >= 0.0f && y2 >=0.0f && x2 <= (float) (w -1) && y2 <= (float) (h-1)){ int ix2_L = (int)(x2); int iy2_T = (int)(y2); float alpha = x2 - (int)(x2); float beta = y2 - (int)(y2); int m; int n; // we interpolate 4 pixels, should we change the sigma_d ? for( m = -1; m <= 2; m ++){ for(n = -1; n <= 2; n ++){ int patch2_m = min(max(0, m + iy2_T), h-1); int patch2_n = min(max(0, n + ix2_L), w-1); // float g_d = expf( - ((beta - m) * (beta - m) + (alpha - n) *( alpha - n ))/(2.0f * sigma_d * sigma_d)); float g_d = (1.0f - ((beta - m) * (beta - m) + (alpha - n) *( alpha - n ))/(2.0f * sigma_d * sigma_d)); // g_d = g_d * g_d; float gradoutput_data_value = gradoutput[batch_i * output_b_stride + 0 + patch2_m * output_h_stride + patch2_n]; // if(output[batch_i * output_b_stride + 0 + patch2_m * output_h_stride + patch2_n] < 0) // printf("Error g_d ==> %f \n",output[batch_i * output_b_stride + 0 + patch2_m * output_h_stride + patch2_n] ); if(output[batch_i * output_b_stride + 0 + patch2_m * output_h_stride + patch2_n] < threshhold) { //printf("Reliable gpu backward, under threshhold ==> %f\n", // output[batch_i * output_b_stride + 0 + patch2_m * output_h_stride + patch2_n]); continue;//to skip its gradients } //flow gradients atomicAdd( & gradinput3[batch_i * input3_b_stride + 0 * input3_c_stride + h_i * input3_h_stride + w_i], - gradoutput_data_value * g_d * (n - alpha) / ( sigma_d * sigma_d) * 2.0f); atomicAdd( & gradinput3[batch_i * input3_b_stride + 1 * input3_c_stride + h_i * input3_h_stride + w_i], - gradoutput_data_value * g_d *(m - beta) / ( sigma_d * sigma_d) * 2.0f); } } } } return ; } int ReliableWeightLayer_gpu_backward_kernel( cudaStream_t stream, const int nElement, const int w, const int h, const int batch, //const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride, //const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride, const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride, //const int flow_weights_b_stride,const int flow_weights_c_stride,const int flow_weights_h_stride,const int flow_weights_w_stride, const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride, const float* input3, const float* output, const float* gradoutput, float* gradinput3, float threshhold, float sigma_d, float tao_r , float Prowindow ) { int error = -1; dim3 grid; dim3 block; //blockthread = 128; //the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z //the three channels are processsed in one kernel block = dim3(BLOCKDIMX,BLOCKDIMY,1); grid = dim3( (w + BLOCKDIMX - 1)/ BLOCKDIMX, (h + BLOCKDIMY - 1) / BLOCKDIMY, batch); if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG) printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY); // cudaMemset((void*)gradinput1, 0, input1_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput2, 0, input2_b_stride * batch * sizeof(float)); // cudaMemset((void*)gradinput3, 0, input3_b_stride * batch * sizeof(float)); ReliableWeightLayer_gpu_backward_kernelfunc <<<grid,block,0, stream>>>( nElement, //to let the nummous w,h, //input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride, //input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride, input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride, //flow_weights_b_stride,flow_weights_c_stride,flow_weights_h_stride,flow_weights_w_stride, output_b_stride,output_c_stride,output_h_stride,output_w_stride, input3, output, gradoutput, gradinput3, threshhold, sigma_d, tao_r , Prowindow ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpuerror in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err)); //THError("aborting"); return error; } error = 0; return error; } //#ifdef __cplusplus // } //#endif
the_stack
#ifndef __DEVICE_EMULATION__ #define _DEVICE_CODE_ #endif #include "octgravdefs.h" #include "dev_octgrav_tex.cuh" #ifndef LMEM_STACK_SIZE #define LMEM_STACK_SIZE 256 #endif #define LEAF_BIT (1 << (24)) __device__ bool open_node(float4 cell_com, float4 cell_pos, float4 node_pos, float4 node_com) { float3 dr = {fabs(node_com.x - cell_pos.x) - cell_pos.w, fabs(node_com.y - cell_pos.y) - cell_pos.w, fabs(node_com.z - cell_pos.z) - cell_pos.w}; dr.x += fabs(dr.x); dr.x *= 0.5f; dr.y += fabs(dr.y); dr.y *= 0.5f; dr.z += fabs(dr.z); dr.z *= 0.5f; float ds = sqrtf(dr.x*dr.x + dr.y*dr.y + dr.z*dr.z); return ( 2.0f*node_pos.w*inv_opening_angle > ds - cell_com.w); } /********************************************** * compute length of the interaction list * **********************************************/ template<int octant> __device__ int4 interact_len(int node, int node_old, float4 cell_com, float4 cell_pos, int *ids_stack, int4 stack) { /* if empty, exit */ if (node == 0) return stack; /* check if the leaf or node has to be opened */ float4 node_pos = tex1Dfetch(node_pos_tex, (node_old << (2)) + octant); float4 node_com = tex1Dfetch(node_com_tex, (node_old << (2)) + octant); stack.w += 8; if (open_node(cell_com, cell_pos, node_pos, node_com)) { if ((node & LEAF_BIT) == 0) { /* if node, */ ids_stack[stack.x] = node; /* store it in stack */ stack.x++; stack.w += 1; } else { stack.z++; /* otherwise account for this leaf */ } } else { stack.y++; /* account for the node */ } return stack; } __device__ int3 compute_interaction_list_len(float4 cell_com, float4 cell_pos) { int ids_stack[LMEM_STACK_SIZE]; int node = 0; int4 stack = {0,0,0,0}; ids_stack[stack.x] = node; stack.w += 1; stack.x++; while(stack.x > 0) { /* read node id & pos */ stack.x--; node = ids_stack[stack.x]; stack.w += 1; /* 1 for id & 4 for pos */ int4 up = tex1Dfetch(children_tex, node + 0); int4 dn = tex1Dfetch(children_tex, node + 1); stack.w += 8; #define INTERACT_LEN(oct, child) \ {stack = interact_len<oct>(child, \ node, \ cell_com, \ cell_pos, \ ids_stack, \ stack);} INTERACT_LEN(0, up.x); INTERACT_LEN(1, up.y); INTERACT_LEN(2, up.z); INTERACT_LEN(3, up.w); INTERACT_LEN(4, dn.x); INTERACT_LEN(5, dn.y); INTERACT_LEN(6, dn.z); INTERACT_LEN(7, dn.w); } /* * number of nodes, * number of leaves, * number of reads from + writes to memory. */ return make_int3(stack.y, stack.z, stack.w); } __global__ void dev_compute_interaction_list_len(int3 *interaction_list_len) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n_cells) index = threadIdx.x; float4 cell_com = tex1Dfetch(cell_com_tex, index); float4 cell_pos = tex1Dfetch(cell_pos_tex, index); cell_com.w = sqrtf((cell_com.x - cell_pos.x)*(cell_com.x - cell_pos.x)+ (cell_com.y - cell_pos.y)*(cell_com.y - cell_pos.y)+ (cell_com.z - cell_pos.z)*(cell_com.z - cell_pos.z)); interaction_list_len[index] = compute_interaction_list_len(cell_com, cell_pos); } /**************************** * build interaction list * ****************************/ template<int octant> __device__ int3 interact_bld(int node, int node_old, float4 cell_com, float4 cell_pos, int *ids_stack, int *interaction_node_list, int *interaction_leaf_list, int3 stack) { if (node == 0) return stack; float4 node_pos = tex1Dfetch(node_pos_tex, (node_old << (2)) + octant); float4 node_com = tex1Dfetch(node_com_tex, (node_old << (2)) + octant); if (open_node(cell_com, cell_pos, node_pos, node_com)) { if ((node & LEAF_BIT) == 0) { /* if node, */ ids_stack[stack.x] = node; /* store it in stack */ stack.x++; } else { interaction_leaf_list[stack.z++] = (node_old << (2)) + octant; } } else { interaction_node_list[stack.y++] = (node_old << (2)) + octant; } return stack; } __device__ void build_interaction_list(float4 cell_com, float4 cell_pos, int *interaction_node_list, int *interaction_leaf_list) { int ids_stack[LMEM_STACK_SIZE]; int node = 0; int3 stack = {0, 0, 0}; ids_stack[stack.x] = node; stack.x++; while(stack.x > 0) { /* read node id */ stack.x--; node = ids_stack[stack.x]; int4 up = tex1Dfetch(children_tex, node + 0); int4 dn = tex1Dfetch(children_tex, node + 1); #define INTERACT_BUILD(oct, child) \ {stack = interact_bld<oct>(child, \ node, \ cell_com, \ cell_pos, \ ids_stack, \ interaction_node_list, \ interaction_leaf_list, \ stack);} INTERACT_BUILD(0, up.x); INTERACT_BUILD(1, up.y); INTERACT_BUILD(2, up.z); INTERACT_BUILD(3, up.w); INTERACT_BUILD(4, dn.x); INTERACT_BUILD(5, dn.y); INTERACT_BUILD(6, dn.z); INTERACT_BUILD(7, dn.w); } } __global__ void dev_build_interaction_list(int cell_offset, int *interaction_node_list, int2 *interaction_node_offset, int *interaction_leaf_list, int2 *interaction_leaf_offset) { int index = cell_offset + (blockIdx.x * blockDim.x + threadIdx.x); if (index < n_cells) { float4 cell_com = tex1Dfetch(cell_com_tex, index); float4 cell_pos = tex1Dfetch(cell_pos_tex, index); cell_com.w = sqrtf((cell_com.x - cell_pos.x)*(cell_com.x - cell_pos.x)+ (cell_com.y - cell_pos.y)*(cell_com.y - cell_pos.y)+ (cell_com.z - cell_pos.z)*(cell_com.z - cell_pos.z)); build_interaction_list(cell_com, cell_pos, &interaction_node_list[interaction_node_offset[index].x], &interaction_leaf_list[interaction_leaf_offset[index].x]); } } /************************************************** *************************************************** *** *** ** evaluate gravity via the interaction list *** *** *** *************************************************** ***************************************************/ /***************************/ /* body-body interaction */ /***************************/ __device__ float4 body_body_interaction(float4 grav, float4 body_i, float4 body_j) { float3 dr; dr.x = body_i.x - body_j.x; dr.y = body_i.y - body_j.y; dr.z = body_i.z - body_j.z; float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z; float inv_ds = rsqrtf(ds2 + softening_squared) * (ds2 != 0.0f); float inv_s3 = body_j.w * inv_ds*inv_ds*inv_ds; grav.x -= inv_s3 * dr.x; grav.y -= inv_s3 * dr.y; grav.z -= inv_s3 * dr.z; grav.w -= body_j.w * inv_ds; return grav; } /***************************/ /* body-node Octupole interaction */ /***************************/ __device__ float4 body_node_Octupole(float4 grav, float4 body_i, float4 com, float4 Oct1, float4 Oct2, float2 Oct3) { float3 dr; dr.x = body_i.x - com.x; // 1 FLOP dr.y = body_i.y - com.y; // 1 FLOP dr.z = body_i.z - com.z; // 1 FLOP float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z; // 5 FLOP float inv_ds = rsqrt(ds2 + softening_squared) * (ds2 != 0.0f); // 3 FLOP float inv_ds2 = inv_ds*inv_ds; // 1 FLOP float inv_ds3 = inv_ds *inv_ds2; // 1 FLOP float inv_ds5 = inv_ds3*inv_ds2; // 1 FLOP float inv_ds7 = 0.5f*inv_ds5*inv_ds2; // 2 FLOP float SijRj1 = Oct1.x*dr.x + Oct1.y*dr.y + Oct1.z*dr.z; // 5 FLOP float SijRj2 = Oct2.x*dr.x + Oct2.y*dr.y + Oct2.z*dr.z; // 5 FLOP float SijRj3 = Oct1.w*dr.x + Oct2.w*dr.y + Oct3.x*dr.z; // 5 FLOP float SijRjRi_sq = SijRj1 * dr.x*dr.x + SijRj2 * dr.y*dr.y + SijRj3 * dr.z*dr.z; // 8 FLOP /******************/ /*** POTENTIAL ***/ /******************/ float pot = inv_ds7 * (SijRjRi_sq + Oct3.y*dr.x*dr.y*dr.z); // 5 FLOP grav.w -= pot; // 1 FLOP /*********************/ /*** ACCELERATION ***/ /*********************/ /*** part 1 ***/ float3 grav0 = {0.0f,0.0f,0.0f}; grav0.x -= 7.0f*inv_ds2 * dr.x * pot; // 4 FLOP grav0.y -= 7.0f*inv_ds2 * dr.y * pot; // 4 FLOP grav0.z -= 7.0f*inv_ds2 * dr.z * pot; // 4 FLOP /*** part 2 ***/ /* S11*dx^2 + S21*dy^2 + S31*dz^2 */ /* S12*dx^2 + S22*dy^2 + S32*dz^2 */ /* S13*dx^2 + S23*dy^2 + S33*dz^2 */ grav0.x += inv_ds7 * (2.0f*SijRj1*dr.x + dr.x*dr.x*Oct1.x + dr.y*dr.y*Oct2.x + dr.z*dr.z*Oct1.w); // 13 FLOP grav0.y += inv_ds7 * (2.0f*SijRj2*dr.y + dr.x*dr.x*Oct1.y + dr.y*dr.y*Oct2.y + dr.z*dr.z*Oct2.w); // 13 FLOP grav0.y += inv_ds7 * (2.0f*SijRj3*dr.z + dr.x*dr.x*Oct1.z + dr.y*dr.y*Oct2.z + dr.z*dr.z*Oct3.x); // 13 FLOP /*** part 2 ***/ grav0.x += inv_ds7*Oct3.y * dr.y*dr.z; // 4 FLOP grav0.y += inv_ds7*Oct3.y * dr.z*dr.x; // 4 FLOP grav0.z += inv_ds7*Oct3.y * dr.x*dr.y; // 4 FLOP grav.x += grav0.x; grav.y += grav0.y; grav.z += grav0.z; // TOTAL 108 FLOP return grav; } __device__ float4 evaluate_body_node_Octupole(float4 acc, float4 body_pos, int &n_inter, int2 list_len) { extern __shared__ float4 shared_com[]; float4 *shared_Oct1 = &shared_com[blockDim.x]; float4 *shared_Oct2 = &shared_Oct1[blockDim.x]; float2 *shared_Oct3 = (float2*)&shared_Oct2[blockDim.x]; n_inter = 0; for (int i = list_len.x; i < list_len.x + list_len.y; i += blockDim.x) { int node = tex1Dfetch(interaction_node_tex, i + threadIdx.x); if ( (node < 0) || (node >= n_nodes) ) node = 0; shared_com[threadIdx.x] = tex1Dfetch(node_com_tex, node); shared_Oct1[threadIdx.x] = tex1Dfetch(Oct1_tex, node); shared_Oct2[threadIdx.x] = tex1Dfetch(Oct2_tex, node); shared_Oct3[threadIdx.x] = tex1Dfetch(Oct3_tex, node); if (i + threadIdx.x >= list_len.x + list_len.y) { float4 null4 = {0,0,0,0}; float2 null2 = {0,0}; shared_Oct1[threadIdx.x] = null4; shared_Oct2[threadIdx.x] = null4; shared_Oct3[threadIdx.x] = null2; } __syncthreads(); /* check for body-node interaction */ for (int j = 0; j < blockDim.x; j++) { n_inter++; acc = body_node_Octupole(acc, body_pos, shared_com[j], shared_Oct1[j], shared_Oct2[j], shared_Oct3[j]); } __syncthreads(); } return acc; } /***************************/ /* body-node interaction */ /***************************/ __device__ float4 body_node_interaction(float4 grav, float4 body_i, float4 com, float4 Qu, float4 Qd) { float3 dr; dr.x = body_i.x - com.x; // 1 FLOP dr.y = body_i.y - com.y; // 1 FLOP dr.z = body_i.z - com.z; // 1 FLOP float ds2 = (((dr.x*dr.x) + dr.y*dr.y) + dr.z*dr.z); // 5 FLOP float inv_ds = rsqrt(ds2 + softening_squared) * (ds2 != 0.0f) ; // 3 FLOP float inv_ds2 = inv_ds*inv_ds; // 1 FLOP float inv_ds3 = inv_ds *inv_ds2; // 1 FLOP float inv_ds5 = inv_ds3*inv_ds2; // 1 FLOP /************/ /* potential */ /************/ grav.w -= com.w * inv_ds; // 2 FLOP float Qy0 = inv_ds5 * (Qd.x*dr.x + Qu.x*dr.y + Qu.y*dr.z); // 6 FLOP float Qy1 = inv_ds5 * (Qu.x*dr.x + Qd.y*dr.y + Qu.z*dr.z); // 6 FLOP float Qy2 = inv_ds5 * (Qu.y*dr.x + Qu.z*dr.y + Qd.z*dr.z); // 6 FLOP float yQy = Qy0 * dr.x + Qy1 * dr.y + Qy2 * dr.z; // 5 FLOP grav.w -= 0.5f * yQy; // 2 FLOP /* acceleartion */ yQy = com.w * inv_ds3 + inv_ds2*2.5f * yQy; // 4 FLOP grav.x += Qy0 - yQy * dr.x; // 3 FLOPS grav.y += Qy1 - yQy * dr.y; // 3 FLOPS grav.z += Qy2 - yQy * dr.z; // 3 FLOPS // TOTAL 54 FLOP return grav; } __device__ float4 evaluate_body_node(float4 acc, float4 body_pos, int &n_inter, int2 list_len, int n_in_cell) { extern __shared__ float4 shared_com[]; float4 *shared_Qu = &shared_com[blockDim.x]; float4 *shared_Qd = &shared_Qu [blockDim.x]; n_inter = 0; int i_thread = threadIdx.x/n_in_cell; int n_threads = blockDim.x/n_in_cell; int n_per_thread = blockDim.x/n_threads; int j0 = i_thread * n_per_thread; int j1 = (i_thread+1) * n_per_thread; if (i_thread + 1 == n_threads) j1 = blockDim.x; for (int i = list_len.x; i < list_len.x + list_len.y; i += blockDim.x) { int node = tex1Dfetch(interaction_node_tex, i + threadIdx.x); if ( (node < 0) || (node >= n_nodes) ) node = 0; shared_com[threadIdx.x] = tex1Dfetch(node_com_tex, node); shared_Qu[threadIdx.x] = tex1Dfetch(node_Qu_tex, node); shared_Qd[threadIdx.x] = tex1Dfetch(node_Qd_tex, node); if (i + threadIdx.x >= list_len.x + list_len.y) { float4 null4 = {0.0f,0.0f,0.0f,0.0f}; shared_com[threadIdx.x] = null4; shared_Qu [threadIdx.x] = null4; shared_Qd [threadIdx.x] = null4; } __syncthreads(); /* check for body-node interaction */ for (int j = j0; j < j1; j++) { n_inter++; acc = body_node_interaction(acc, body_pos, shared_com[j], shared_Qu[j], shared_Qd[j]); } __syncthreads(); } /*** now combine accelarations ****/ int *n_inter_sh = (int*)&shared_com[blockDim.x + 1]; shared_com[threadIdx.x] = acc; n_inter_sh[threadIdx.x] = n_inter; __syncthreads(); if (threadIdx.x < n_in_cell) { for (int i = n_in_cell + threadIdx.x; i < n_in_cell*n_threads; i += n_in_cell) { float4 acc1 = shared_com[i]; acc.x += acc1.x; acc.y += acc1.y; acc.z += acc1.z; acc.w += acc1.w; } for (int i = n_in_cell + threadIdx.x; i < blockDim.x; i += n_in_cell) { n_inter += n_inter_sh[i]; } } __syncthreads(); return acc; } __device__ float4 evaluate_body_leaf(float4 acc, float4 body_pos, int &n_inter, int2 list_len) { extern __shared__ int shared_offset[]; int *shared_len = (int*)&shared_offset[blockDim.x]; float4 *shared_pos = (float4*)&shared_len[blockDim.x]; n_inter = 0; int tile = 0; for (int i = list_len.x; i < list_len.x + list_len.y; i += blockDim.x, tile++) { int node_id = tex1Dfetch(interaction_leaf_tex, i + threadIdx.x); shared_len [threadIdx.x] = tex1Dfetch(n_in_node_tex, node_id); shared_offset[threadIdx.x] = tex1Dfetch(node_bodies_offset_tex, node_id); __syncthreads(); int j = min(blockDim.x, list_len.y - tile*blockDim.x); while (j-- > 0) { int len = shared_len[j]; __syncthreads(); shared_pos[threadIdx.x] = tex1Dfetch(bodies_pos_tex, shared_offset[j] + threadIdx.x); __syncthreads(); while(len-- > 0) { n_inter++; acc = body_body_interaction(acc, body_pos, shared_pos[len]); } __syncthreads(); } __syncthreads(); } return acc; } __global__ void dev_evaluate_gravity_node(int cell_offset, float4 *grav_acc, int *n_interactions, int2 *interaction_node_len) { int cellId = cell_offset + blockIdx.x; bool write_flag = true; if (cellId >= n_cells) { cellId = blockIdx.x; write_flag = false; } int index = tex1Dfetch(cell_bodies_offset_tex, cellId); int n_in_cell = tex1Dfetch(n_in_cell_tex, cellId); float4 body_pos = tex1Dfetch(bodies_pos_tex, index + threadIdx.x%n_in_cell); float4 acc = {0,0,0,0}; int n_inter; #ifdef OCTUPOLE acc = evaluate_body_node_Octupole(acc, body_pos, n_inter, interaction_node_len[cellId]); #endif #ifdef QUADRUPOLE acc = evaluate_body_node(acc, body_pos, n_inter, interaction_node_len[cellId], n_in_cell); #endif if (threadIdx.x < n_in_cell) { if (write_flag) { grav_acc[index + threadIdx.x] = acc; // fprintf(stderr, "cellId= %d index= %d n_in_cell= %d\n", // cellId, index + threadIdx.x, n_in_cell); // fprintf(stderr, " acc= [%f %f %f %f]\n", acc.x, acc.y, acc.z, acc.w); } n_interactions[index + threadIdx.x] = n_inter; } } __global__ void dev_evaluate_gravity_leaf(int cell_offset, float4 *grav_acc, int *n_interactions, int2 *interaction_leaf_len) { int cellId = cell_offset + blockIdx.x; bool write_flag = true; if (cellId >= n_cells) { cellId = blockIdx.x; write_flag = false; } int index = tex1Dfetch(cell_bodies_offset_tex, cellId); int n_in_cell = tex1Dfetch(n_in_cell_tex, cellId); float4 body_pos = tex1Dfetch(bodies_pos_tex, index + threadIdx.x%n_in_cell); float4 acc = grav_acc[index + threadIdx.x%n_in_cell]; int n_inter; acc = evaluate_body_leaf(acc, body_pos, n_inter, interaction_leaf_len[cellId]); if (threadIdx.x < n_in_cell) { if (write_flag) grav_acc[index + threadIdx.x] = acc; n_interactions[index + threadIdx.x] = n_inter; } } #endif
the_stack
#include <cuda_runtime.h> #include <algorithm> #include <cstdio> #include <numeric> #include "turbo_transformers/layers/kernels/gpu_transpose_kernel.h" namespace turbo_transformers { namespace layers { namespace kernels { /* input : (batch_size, seq_len, weight_num, head_num, size_per_head) -> output : (weight_num, batch_size, head_num, seq_len, size_per_head) bias (weight_num, head_num, size_per_head) */ static __global__ void split_add_bias_transpose_for_score( const float* input_data, const float* bias_data, const int batch_size, const int seq_len, const int head_num, const int weight_num, const int size_per_head, float* output_data) { int tid = threadIdx.x; int bid = blockIdx.x; int idx = tid; int batch_id = bid / (seq_len * weight_num * head_num); int seq_id = bid % (seq_len * weight_num * head_num) / (weight_num * head_num); int weight_id = bid % (weight_num * head_num) / head_num; int head_id = bid % head_num; int head_num_size_per_head = head_num * size_per_head; int weight_id_head_num_size_per_head = weight_id * head_num_size_per_head; int head_id_size_per_head = head_id * size_per_head; while (idx < size_per_head) { float bias_val = bias_data[weight_id_head_num_size_per_head + head_id_size_per_head + idx]; output_data[weight_id * batch_size * seq_len * head_num_size_per_head + batch_id * seq_len * head_num_size_per_head + head_id * seq_len * size_per_head + seq_id * size_per_head + idx] = input_data[batch_id * seq_len * weight_num * head_num_size_per_head + seq_id * weight_num * head_num_size_per_head + weight_id_head_num_size_per_head + head_id_size_per_head + idx] + bias_val; idx += blockDim.x; } } template <> void GPUSplitAddBiasTransposeForScore( const float* input_data, const float* bias_data, float* out_data, int64_t batch_size, int64_t seq_len, int64_t weight_num, int64_t num_attention_heads, int64_t size_per_head, cudaStream_t stream) { const int n = size_per_head; const int m = batch_size * seq_len * num_attention_heads * weight_num; dim3 grid(m); dim3 block(min(n, 1024)); split_add_bias_transpose_for_score<<<grid, block, 0, stream>>>( input_data, bias_data, batch_size, seq_len, num_attention_heads, weight_num, size_per_head, out_data); } /* Output transpose results into three tensors */ static __global__ void split_add_bias_transpose_for_score_3output( const float* input_data, const float* bias_data, const int batch_size, const int seq_len, const int head_num, const int weight_num, const int size_per_head, float* q_output_data, float* k_output_data, float* v_output_data) { int tid = threadIdx.x; int bid = blockIdx.x; int idx = tid; int batch_id = bid / (seq_len * weight_num * head_num); int seq_id = bid % (seq_len * weight_num * head_num) / (weight_num * head_num); int weight_id = bid % (weight_num * head_num) / head_num; int head_id = bid % head_num; int head_num_size_per_head = head_num * size_per_head; int weight_id_head_num_size_per_head = weight_id * head_num_size_per_head; int head_id_size_per_head = head_id * size_per_head; float* output_data = nullptr; if (weight_id == 0) { output_data = q_output_data; } else if (weight_id == 1) { output_data = k_output_data; } else if (weight_id == 2) { output_data = v_output_data; } while (idx < size_per_head) { float bias_val = bias_data[weight_id_head_num_size_per_head + head_id_size_per_head + idx]; output_data[batch_id * seq_len * head_num_size_per_head + head_id * seq_len * size_per_head + seq_id * size_per_head + idx] = input_data[batch_id * seq_len * weight_num * head_num_size_per_head + seq_id * weight_num * head_num_size_per_head + weight_id_head_num_size_per_head + head_id_size_per_head + idx] + bias_val; idx += blockDim.x; } } /* foward transpose (1, sum_seq_len, 3, head, hidden) -> 3 X (batch, head, max_seq_len, hidden) Output transpose results into three tensors with pad block dim size_per_head grid dim product of the rest dim */ static __global__ void split_add_bias_transpose_for_score_3output_pad( const float* input_data, const float* bias_data, const int batch_size, const int max_seq_len, const int64_t* seq_len_list, const int head_num, const int weight_num, const int size_per_head, float* q_output_data, float* k_output_data, float* v_output_data) { int tid = threadIdx.x; int bid = blockIdx.x; int idx = tid; int batch_id = bid / (max_seq_len * weight_num * head_num); int seq_id = bid % (max_seq_len * weight_num * head_num) / (weight_num * head_num); int weight_id = bid % (weight_num * head_num) / head_num; int head_id = bid % head_num; // if (seq_id >= seq_len_list[batch_id]) { // return; // } int head_num_size_per_head = head_num * size_per_head; int weight_id_head_num_size_per_head = weight_id * head_num_size_per_head; int head_id_size_per_head = head_id * size_per_head; float* output_data = nullptr; if (weight_id == 0) { output_data = q_output_data; } else if (weight_id == 1) { output_data = k_output_data; } else if (weight_id == 2) { output_data = v_output_data; } int acc_seq_len = 0; // std::accumulate(seq_len_list.begin(), // seq_len_list.begin() + batch_id, 0); for (size_t i = 0; i < batch_id; ++i) { acc_seq_len += seq_len_list[i]; } while (idx < size_per_head) { if (seq_id >= seq_len_list[batch_id]) { output_data[batch_id * max_seq_len * head_num_size_per_head + head_id * max_seq_len * size_per_head + seq_id * size_per_head + idx] = 0.f; } else { float bias_val = bias_data[weight_id_head_num_size_per_head + head_id_size_per_head + idx]; output_data[batch_id * max_seq_len * head_num_size_per_head + head_id * max_seq_len * size_per_head + seq_id * size_per_head + idx] = input_data[(acc_seq_len + seq_id) * weight_num * head_num_size_per_head + weight_id_head_num_size_per_head + head_id_size_per_head + idx] + bias_val; } idx += blockDim.x; } } template <> void GPUSplitAddBiasTransposeForScoreThreeOutput( const float* input_data, const float* bias_data, int64_t batch_size, int64_t seq_len, int64_t weight_num, int64_t num_attention_heads, int64_t size_per_head, cudaStream_t stream, float* q_out_data, float* k_out_data, float* v_out_data) { const int n = size_per_head; const int m = batch_size * seq_len * num_attention_heads * weight_num; dim3 grid(m); dim3 block(min(n, 1024)); split_add_bias_transpose_for_score_3output<<<grid, block, 0, stream>>>( input_data, bias_data, batch_size, seq_len, num_attention_heads, weight_num, size_per_head, q_out_data, k_out_data, v_out_data); } template <> void GPUSplitAddBiasTransposeForScoreThreeOutputPad( const float* input_data, const float* bias_data, const std::vector<int64_t>& seq_len_list, int64_t weight_num, int64_t num_attention_heads, int64_t size_per_head, cudaStream_t stream, float* q_out_data, float* k_out_data, float* v_out_data) { const int n = size_per_head; int64_t batch_size = seq_len_list.size(); int64_t max_seq_length = *std::max_element(seq_len_list.begin(), seq_len_list.end()); const int m = batch_size * max_seq_length * num_attention_heads * weight_num; dim3 grid(m); dim3 block(min(n, 1024)); int64_t* d_seq_len_list; cudaMalloc((void**)&(d_seq_len_list), batch_size * sizeof(int64_t)); cudaMemcpy(d_seq_len_list, seq_len_list.data(), batch_size * sizeof(int64_t), cudaMemcpyHostToDevice); split_add_bias_transpose_for_score_3output_pad<<<grid, block, 0, stream>>>( input_data, bias_data, batch_size, max_seq_length, d_seq_len_list, // device vector num_attention_heads, weight_num, size_per_head, q_out_data, k_out_data, v_out_data); cudaFree(d_seq_len_list); } namespace { // backward // batch, head, seq, size_per_head -> batch seq head size_per_head template <bool AddBias> __global__ void transpose(const float* src, const float* bias, const int batch_size, const int seq_len, const int head_num, const int size_per_head, float* dst) { int tid = threadIdx.x; int idx = tid; if (AddBias) { int batch_id = blockIdx.x / (seq_len * head_num); int seq_id = blockIdx.x / head_num % seq_len; int head_id = blockIdx.x % head_num; while (idx < size_per_head) { dst[batch_id * (head_num * seq_len * size_per_head) + head_id * seq_len * size_per_head + seq_id * size_per_head + idx] = src[blockIdx.x * size_per_head + idx] + bias[head_id * size_per_head + idx]; idx += blockDim.x; } } else { //(batch, head, seq_len, size_per_head) -> (batch, seq_len, head, // size_per_head) int batch_id = blockIdx.x / (head_num * seq_len); int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len; int seq_id = blockIdx.x % seq_len; while (idx < size_per_head) { dst[batch_id * (head_num * seq_len * size_per_head) + seq_id * head_num * size_per_head + head_id * size_per_head + idx] = src[blockIdx.x * size_per_head + idx]; idx += blockDim.x; } } } // batch, head, max_seq, size_per_head -> 1, sum_seq, head, size_per_head __global__ void transpose_back_pad(const float* src, const int batch_size, const int max_seq_len, const int64_t* seq_len_list, const int head_num, const int size_per_head, float* dst) { int tid = threadIdx.x; int idx = tid; //(batch, head, max_seq_len, size_per_head) -> (batch, seq_len, head, // size_per_head) int batch_id = blockIdx.x / (head_num * max_seq_len); int head_id = (blockIdx.x % (head_num * max_seq_len)) / max_seq_len; int seq_id = blockIdx.x % max_seq_len; if (seq_id >= seq_len_list[batch_id]) { return; } // int64_t acc_seq_len = std::accumulate(seq_len_list.begin(), // seq_len_list.begin() + batch_idx, 0); int64_t acc_seq_len = 0; for (size_t i = 0; i < batch_id; ++i) { acc_seq_len += seq_len_list[i]; } while (idx < size_per_head) { // set the invalid elements to 0. dst[(acc_seq_len + seq_id) * (head_num * size_per_head) + head_id * size_per_head + idx] = src[blockIdx.x * size_per_head + idx]; idx += blockDim.x; } } // 1, sum_seq, head size_per_head -> batch, head, max_seq, size_per_head __global__ void add_bias_transpose_forward_pad( const float* src, const float* bias, const int batch_size, const int max_seq_len, const int64_t* seq_len_list, const int head_num, const int size_per_head, float* dst) { int tid = threadIdx.x; int idx = tid; int batch_id = blockIdx.x / (head_num * max_seq_len); int head_id = (blockIdx.x % (head_num * max_seq_len)) / max_seq_len; int seq_id = blockIdx.x % max_seq_len; int64_t acc_seq_len = 0; for (size_t i = 0; i < batch_id; ++i) { acc_seq_len += seq_len_list[i]; } while (idx < size_per_head) { // set the invalid elements to 0. if (seq_id >= seq_len_list[batch_id]) { dst[blockIdx.x * size_per_head + idx] = 0.f; } else { dst[blockIdx.x * size_per_head + idx] = src[(acc_seq_len + seq_id) * (head_num * size_per_head) + head_id * size_per_head + idx] + bias[head_id * size_per_head + idx]; } idx += blockDim.x; } } } // namespace /* (batch_size, seq_len, num_attention_heads, size_per_head) -> (batch_size, head_num, seq_len, size_per_head) */ template <typename T, bool AddBias> void GPUTransposeForScore(const T* input_data, const T* bias, int64_t batch_size, int64_t seq_len, int64_t num_attention_heads, int64_t size_per_head, cudaStream_t stream, T* output_data) { dim3 grid, block; grid.x = batch_size * num_attention_heads * seq_len; block.x = min(1024, int(size_per_head)); transpose<AddBias><<<grid, block, 0, stream>>>(input_data, bias, batch_size, seq_len, num_attention_heads, size_per_head, output_data); } template void GPUTransposeForScore<float, true>( const float* input_data, const float* bias, int64_t batch_size, int64_t seq_len, int64_t num_attention_heads, int64_t size_per_head, cudaStream_t stream, float* output_data); template void GPUTransposeForScore<float, false>( const float* input_data, const float* bias, int64_t batch_size, int64_t seq_len, int64_t num_attention_heads, int64_t size_per_head, cudaStream_t stream, float* output_data); /* (1, sum_seq_len, num_attention_heads, size_per_head) -> (batch_size, head_num, max_seq_len, size_per_head) */ template <> void GPUTransposeForScorePad(const float* input_data, int64_t batch_size, const std::vector<int64_t>& seq_len_list, int64_t num_attention_heads, int64_t size_per_head, cudaStream_t stream, float* output_data) { dim3 grid, block; int64_t max_seq_length = *std::max_element(seq_len_list.begin(), seq_len_list.end()); grid.x = batch_size * num_attention_heads * max_seq_length; block.x = min(1024, int(size_per_head)); int64_t* d_seq_len_list; cudaMalloc((void**)&(d_seq_len_list), batch_size * sizeof(int64_t)); cudaMemcpy(d_seq_len_list, seq_len_list.data(), batch_size * sizeof(int64_t), cudaMemcpyHostToDevice); transpose_back_pad<<<grid, block, 0, stream>>>( input_data, batch_size, max_seq_length, d_seq_len_list, num_attention_heads, size_per_head, output_data); cudaFree(d_seq_len_list); } // (1, sum_seq_len, head, hidden_size) -> (batch, head, max_seq_len, // hidden_size) template <> void GPUAddBiasTransposeForScorePad(const float* input_data, const float* bias_data, const std::vector<int64_t>& seq_len_list, int64_t num_attention_heads, int64_t size_per_head, cudaStream_t stream, float* output_data) { dim3 grid, block; int64_t batch_size = seq_len_list.size(); int64_t max_seq_length = *std::max_element(seq_len_list.begin(), seq_len_list.end()); grid.x = batch_size * num_attention_heads * max_seq_length; block.x = min(1024, int(size_per_head)); int64_t* d_seq_len_list; cudaMalloc((void**)&(d_seq_len_list), batch_size * sizeof(int64_t)); cudaMemcpy(d_seq_len_list, seq_len_list.data(), batch_size * sizeof(int64_t), cudaMemcpyHostToDevice); add_bias_transpose_forward_pad<<<grid, block, 0, stream>>>( input_data, bias_data, batch_size, max_seq_length, d_seq_len_list, num_attention_heads, size_per_head, output_data); cudaFree(d_seq_len_list); } } // namespace kernels } // namespace layers } // namespace turbo_transformers
the_stack
#include "ImgConvert.h" #include <iostream> using namespace std; // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:DEF_BLOCK_1D // 定义一维线程块尺寸。 #define DEF_BLOCK_1D 512 // Kernel 函数:_cstConvertImgKer(实现将坐标集转化为图像算法) // 将坐标集内的坐标映射到输入图像中并将目标点置为 highpixel 从而实现 // 坐标集与图像的转化。 static __global__ void // Kernel 函数无返回值。 _cstConvertImgKer( CoordiSet incst, // 输入坐标集 ImageCuda inimg, // 输入图像 unsigned char highpixel // 高像素 ); // Kernel 函数:_markImageFlagKer(将图像转化为标志数组) // 将图像转化为标志数组,其中若像素需要记录到坐标集时,对应的标志位为 1,否则为 // 0。 static __global__ void // Kernel 函数无返回值。 _markImageFlagKer( ImageCuda inimg, // 输入图像 int imgflag[], // 输出的图像标志位数组。 ImgConvert imgcvt // 转换算法 CLASS,主要使用其中的转换标志位。 ); // Kernel 函数:_arrangeCstKer(重组坐标点集) // 在计算处图像标志位数组和对应的累加数组之后,将图像的信息转换为坐标点集的信息 // 写入输出坐标点集中。 static __global__ void // Kernel 函数无返回值。 _arrangeCstKer( ImageCuda inimg, // 输入图像 int imgflag[], // 图像标志位数组 int imgacc[], // 图像标志位数组的累加数组 int ptscnt, // 有效坐标点的数量 CoordiSetCuda outcst // 输出坐标集 ); // Kernel 函数:_curConvertImgKer(实现将曲线转化为图像算法) // 将曲线内的坐标映射到输入图像中并将目标点置为 highpixel 从而实现 // 坐标集与图像的转化。 static __global__ void // Kernel 函数无返回值。 _curConvertImgKer( Curve incur, // 输入曲线 ImageCuda inimg, // 输入图像 unsigned char highpixel // 高像素 ); // Kernel 函数:_cstConvertImgKer(实现将坐标集转化为图像算法) static __global__ void _cstConvertImgKer(CoordiSet incst, ImageCuda inimg, unsigned char highpixel) { // index 表示线程处理的像素点的坐标。 int index = blockIdx.x * blockDim.x + threadIdx.x; // 检查坐标点是否越界,如果越界,则不进行处理,一方面节省计算 // 资源,另一方面防止由于段错误导致程序崩溃。 if (index >= incst.count) return; // 获得目标点在图像中的对应位置。 int curpos = incst.tplData[2 * index + 1] * inimg.pitchBytes + incst.tplData[2 * index]; // 将坐标集中坐标在图像中对应的像素点的像素值置为 higpixel。 inimg.imgMeta.imgData[curpos] = highpixel; } // 成员方法:cstConvertToImg(坐标集转化为图像算法) __host__ int ImgConvert::cstConvertToImg(CoordiSet *incst, Image *outimg) { // 局部变量,错误码。 int errcode; // 检查输入坐标集,输出图像是否为空。 if (incst == NULL || outimg == NULL) return NULL_POINTER; // 将输出图像拷贝到 device 端。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) return errcode; // 提取输出图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { return errcode; } // 将输入坐标集拷贝到 device 端。 errcode = CoordiSetBasicOp::copyToCurrentDevice(incst); if (errcode != NO_ERROR) { return errcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 gridsize, blocksize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 初始化输入图像内所有的像素点的的像素值为 lowpixel,为转化做准备。 cudaError_t cuerrcode; cuerrcode = cudaMemset(outsubimgCud.imgMeta.imgData, this->lowPixel, sizeof(unsigned char) * outsubimgCud.imgMeta.width * outsubimgCud.imgMeta.height); if (cuerrcode != cudaSuccess) return CUDA_ERROR; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。调用一维核函数, // 在这里设置线程块内的线程数为 256,用 DEF_BLOCK_1D 表示。 size_t blocksize1, gridsize1; blocksize1 = DEF_BLOCK_1D; gridsize1 = (incst->count + blocksize1 - 1) / blocksize1; // 将输入坐标集转化为输入图像图像,即将坐标集内点映射在图像上点的 // 像素值置为 highpixel。 _cstConvertImgKer<<<gridsize1, blocksize1>>>(*incst, outsubimgCud, this->highPixel); if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕,退出。 return NO_ERROR; } // Kernel 函数:_markImageFlagKer(将图像转化为标志数组) static __global__ void _markImageFlagKer(ImageCuda inimg, int imgflag[], ImgConvert imgcvt) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并行度 // 缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 4 行 // 上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx = r * inimg.pitchBytes + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int outidx = r * inimg.imgMeta.width + c; // 根据输入像素下标读取输入像素值。 unsigned char curpixel = inimg.imgMeta.imgData[inidx]; // 如果当前像素点为有效像素点,则图像标志位置位,否则置零。 if (imgcvt.getConvertFlag(curpixel)) { imgflag[outidx] = 1; } else { imgflag[outidx] = 0; } // 处理余下的三个点。 for (int i = 1; i < 4; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各 // 点之间没有变化,故不用检查。 if (++r >= inimg.imgMeta.height) return; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 inidx += inimg.pitchBytes; outidx += inimg.imgMeta.width; curpixel = inimg.imgMeta.imgData[inidx]; // 如果当前像素点为有效像素点,则图像标志位置位,否则置零。 if (imgcvt.getConvertFlag(curpixel)) { imgflag[outidx] = 1; } else { imgflag[outidx] = 0; } } } // Kernel 函数:_arrangeCstKer(重组坐标点集) static __global__ void _arrangeCstKer( ImageCuda inimg, int imgflag[], int imgacc[], int ptscnt, CoordiSetCuda outcst) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算当前 Thread 所对应的原图像数据下标和图像标志位数组的下标。 int inidx = r * inimg.pitchBytes + c; int flagidx = r * inimg.imgMeta.width + c; // 如果当前计算的像素点是一个无效像素点,则直接退出。 if (imgflag[flagidx] == 0) return; // 获取当前像素点所对应的坐标集中的下标。 int outidx = imgacc[flagidx]; // 计算坐标点集中的附属数据,这里直接使用其像素值所对应的浮点亮度值来作为附 // 属数据。 float curval = inimg.imgMeta.imgData[inidx] / 255.0f; // 完成输出操作。这里使用 while 的原因是,如果坐标点集可容纳的坐标点数量超 // 过了实际的坐标点数量,这样需要重复的防止现有的坐标点(这样做的考虑时为了 // 防止防止了不是有效像素点的坐标点,导致凸壳等算法出错。 while (outidx < outcst.tplMeta.count) { // 将有效像素点存放到坐标点集中,顺带输出了附属数据。 outcst.tplMeta.tplData[2 * outidx] = c; outcst.tplMeta.tplData[2 * outidx + 1] = r; outcst.attachedData[outidx] = curval; // 更新输出下标,如果输出坐标点集容量过大,则循环输出有效坐标点。 outidx += ptscnt; } } // 宏:FAIL_IMGCONVERTTOCST_FREE // 当下面函数运行出错时,使用该宏清除内存,防止内存泄漏。 #define FAIL_IMGCONVERTTOCST_FREE do { \ if (tmpdata != NULL) \ cudaFree(tmpdata); \ } while (0) // 成员方法:imgConvertToCst(图像转化成坐标集算法) __host__ int ImgConvert::imgConvertToCst(Image *inimg, CoordiSet *outcst) { // 检查输入图像,输出坐标集是否为空。 if (inimg == NULL || outcst == NULL) return NULL_POINTER; // 局部变量,错误码。 int errcode; cudaError_t cuerrcode; // 定义加法运算类型 add_class<int> add; // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 计算数据尺寸。 int flagsize = inimg->width * inimg->height; int datasize = (2 * flagsize + 1) * sizeof (int); // 申请存放图像标志位数组何其对应的累加数组的内存空间。 int *tmpdata = NULL; cuerrcode = cudaMalloc((void **)&tmpdata, datasize); if (cuerrcode != cudaSuccess) { FAIL_IMGCONVERTTOCST_FREE; return CUDA_ERROR; } // 将申请的临时内存空间分配给各个指针。 int *imgflagDev = tmpdata; int *imgaccDev = imgflagDev + flagsize; // 提取输入图像对应的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 计算调用第一个 Kernel 所需要的线程块尺寸。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (insubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 调用第一个 Kernel 生成图像标志位数组。 _markImageFlagKer<<<gridsize, blocksize>>>(insubimgCud, imgflagDev, *this); if (cudaGetLastError() != cudaSuccess) { FAIL_IMGCONVERTTOCST_FREE; return CUDA_ERROR; } // 通过扫描算法计算图像标志位数组对应的累加数组。 errcode = this->aryScan.scanArrayExclusive( imgflagDev, imgaccDev, flagsize, add, false, false, false); if (errcode != NO_ERROR) { FAIL_IMGCONVERTTOCST_FREE; return errcode; } // 将累加数组中最后一个元素拷贝到 Host,该数据表示整个图像中有效像素点的数 // 量。 int ptscnt; cuerrcode = cudaMemcpy(&ptscnt, &imgaccDev[flagsize], sizeof (int), cudaMemcpyDeviceToHost); if (cudaGetLastError() != cudaSuccess) { FAIL_IMGCONVERTTOCST_FREE; return CUDA_ERROR; } // 将输出坐标点集拷贝入 Device 内存。 errcode = CoordiSetBasicOp::copyToCurrentDevice(outcst); if (errcode != NO_ERROR) { // 如果输出坐标点击无数据(故上面的拷贝函数会失败),则会创建一个和有效 // 像素点等量的坐标点集。 errcode = CoordiSetBasicOp::makeAtCurrentDevice(outcst, ptscnt); // 如果创建坐标点集也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) { FAIL_IMGCONVERTTOCST_FREE; return errcode; } } // 获取输出坐标点集对应的 CUDA 型数据。 CoordiSetCuda *outcstCud = COORDISET_CUDA(outcst); // 计算调用第二个 Kernel 所需要的线程块尺寸。 blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (insubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; // 调用第二个 Kernel 得到输出坐标点集。 _arrangeCstKer<<<gridsize, blocksize>>>( insubimgCud, imgflagDev, imgaccDev, ptscnt, *outcstCud); if (cudaGetLastError() != cudaSuccess) { FAIL_IMGCONVERTTOCST_FREE; return CUDA_ERROR; } // 释放临时内存空间。 cudaFree(tmpdata); // 处理完毕退出。 return NO_ERROR; } // Kernel 函数:_curConvertImgKer(实现将曲线转化为图像算法) static __global__ void _curConvertImgKer(Curve incur, ImageCuda inimg, unsigned char highpixel) { // index 表示线程处理的像素点的坐标。 int index = blockIdx.x * blockDim.x + threadIdx.x; // 检查坐标点是否越界,如果越界,则不进行处理,一方面节省计算 // 资源,另一方面防止由于段错误导致程序崩溃。 if (index >= incur.curveLength) return; // 获得目标点在图像中的对应位置。 int curpos = incur.crvData[2 * index + 1] * inimg.pitchBytes + incur.crvData[2 * index]; // 将曲线中坐标在图像中对应的像素点的像素值置为 higpixel。 inimg.imgMeta.imgData[curpos] = highpixel; } // Host 成员方法:curConvertToImg(曲线转化为图像算法) __host__ int ImgConvert::curConvertToImg(Curve *incur, Image *outimg) { // 局部变量,错误码。 int errcode; // 检查输入曲线,输出图像是否为空。 if (incur == NULL || outimg == NULL) return NULL_POINTER; // 将输出图像拷贝到 device 端。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) return errcode; // 提取输出图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { return errcode; } // 将输入曲线拷贝到 device 端。 errcode = CurveBasicOp::copyToCurrentDevice(incur); if (errcode != NO_ERROR) { return errcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 gridsize, blocksize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 初始化输入图像内所有的像素点的的像素值为 lowpixel,为转化做准备。 cudaError_t cuerrcode; cuerrcode = cudaMemset(outsubimgCud.imgMeta.imgData, this->lowPixel, sizeof(unsigned char) * outsubimgCud.imgMeta.width * outsubimgCud.imgMeta.height); if (cuerrcode != cudaSuccess) return CUDA_ERROR; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。调用一维核函数, // 在这里设置线程块内的线程数为 256,用 DEF_BLOCK_1D 表示。 size_t blocksize1, gridsize1; blocksize1 = DEF_BLOCK_1D; gridsize1 = (incur->curveLength + blocksize1 - 1) / blocksize1; // 将输入曲线转化为输入图像图像,即将曲线内点映射在图像上点的 // 像素值置为 highpixel。 _curConvertImgKer<<<gridsize1, blocksize1>>>(*incur, outsubimgCud, this->highPixel); if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕,退出。 return NO_ERROR; }
the_stack
#include <cuml/decomposition/pca.hpp> #include <cuml/decomposition/pca_mg.hpp> #include <cuml/decomposition/sign_flip_mg.hpp> #include <opg/linalg/qr_based_svd.hpp> #include <opg/matrix/matrix_utils.hpp> #include <opg/stats/cov.hpp> #include <opg/stats/mean.hpp> #include <opg/stats/mean_center.hpp> #include <raft/cudart_utils.h> #include <raft/linalg/transpose.h> #include <raft/comms/comms.hpp> #include <raft/cuda_utils.cuh> #include <raft/matrix/math.hpp> #include <raft/stats/mean_center.hpp> #include <cstddef> using namespace MLCommon; namespace ML { namespace PCA { namespace opg { template <typename T> void fit_impl(raft::handle_t& handle, std::vector<Matrix::Data<T>*>& input_data, Matrix::PartDescriptor& input_desc, T* components, T* explained_var, T* explained_var_ratio, T* singular_vals, T* mu, T* noise_vars, paramsPCAMG prms, cudaStream_t* streams, std::uint32_t n_streams, bool verbose) { const auto& comm = handle.get_comms(); Matrix::Data<T> mu_data{mu, prms.n_cols}; Stats::opg::mean(handle, mu_data, input_data, input_desc, streams, n_streams); rmm::device_uvector<T> cov_data(prms.n_cols * prms.n_cols, streams[0]); auto cov_data_size = cov_data.size(); Matrix::Data<T> cov{cov_data.data(), cov_data_size}; Stats::opg::cov(handle, cov, input_data, input_desc, mu_data, true, streams, n_streams); ML::truncCompExpVars<T, mg_solver>( handle, cov.ptr, components, explained_var, explained_var_ratio, prms, streams[0]); T scalar = (prms.n_rows - 1); raft::matrix::seqRoot(explained_var, singular_vals, scalar, prms.n_components, streams[0], true); Stats::opg::mean_add(input_data, input_desc, mu_data, comm, streams, n_streams); } /** * @brief performs MNMG fit operation for the pca * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param input: input data * @input param components: principal components of the input data * @output param explained_var: explained var * @output param explained_var_ratio: the explained var ratio * @output param singular_vals: singular values of the data * @output param mu: mean of every column in input * @output param noise_vars: variance of the noise * @input param prms: data structure that includes all the parameters from input size to algorithm * @input param verbose */ template <typename T> void fit_impl(raft::handle_t& handle, std::vector<Matrix::Data<T>*>& input_data, Matrix::PartDescriptor& input_desc, T* components, T* explained_var, T* explained_var_ratio, T* singular_vals, T* mu, T* noise_vars, paramsPCAMG prms, bool verbose) { int rank = handle.get_comms().get_rank(); // TODO: These streams should come from raft::handle_t // Reference issue https://github.com/rapidsai/cuml/issues/2470 auto n_streams = input_desc.blocksOwnedBy(rank).size(); cudaStream_t streams[n_streams]; for (std::uint32_t i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamCreate(&streams[i])); } if (prms.algorithm == mg_solver::COV_EIG_JACOBI || prms.algorithm == mg_solver::COV_EIG_DQ) { fit_impl(handle, input_data, input_desc, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, streams, n_streams, verbose); for (std::uint32_t i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } } else if (prms.algorithm == mg_solver::QR) { const raft::handle_t& h = handle; cudaStream_t stream = h.get_stream(); const auto& comm = h.get_comms(); // Center the data Matrix::Data<T> mu_data{mu, prms.n_cols}; Stats::opg::mean(handle, mu_data, input_data, input_desc, streams, n_streams); Stats::opg::mean_center(input_data, input_desc, mu_data, comm, streams, n_streams); for (std::uint32_t i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } // Allocate Q, S and V and call QR std::vector<Matrix::Data<T>*> uMatrixParts; Matrix::opg::allocate(h, uMatrixParts, input_desc, rank, stream); rmm::device_uvector<T> sVector(prms.n_cols, stream); rmm::device_uvector<T> vMatrix(prms.n_cols * prms.n_cols, stream); CUDA_CHECK(cudaMemset(vMatrix.data(), 0, prms.n_cols * prms.n_cols * sizeof(T))); LinAlg::opg::svdQR(h, sVector.data(), uMatrixParts, vMatrix.data(), true, true, prms.tol, prms.n_iterations, input_data, input_desc, rank); // sign flip sign_flip(handle, uMatrixParts, input_desc, vMatrix.data(), prms.n_cols, streams, n_streams); // Calculate instance variables rmm::device_uvector<T> explained_var_all(prms.n_cols, stream); rmm::device_uvector<T> explained_var_ratio_all(prms.n_cols, stream); T scalar = 1.0 / (prms.n_rows - 1); raft::matrix::power(sVector.data(), explained_var_all.data(), scalar, prms.n_cols, stream); raft::matrix::ratio( handle, explained_var_all.data(), explained_var_ratio_all.data(), prms.n_cols, stream); raft::matrix::truncZeroOrigin( sVector.data(), prms.n_cols, singular_vals, prms.n_components, std::size_t(1), stream); raft::matrix::truncZeroOrigin(explained_var_all.data(), prms.n_cols, explained_var, prms.n_components, std::size_t(1), stream); raft::matrix::truncZeroOrigin(explained_var_ratio_all.data(), prms.n_cols, explained_var_ratio, prms.n_components, std::size_t(1), stream); raft::linalg::transpose(vMatrix.data(), prms.n_cols, stream); raft::matrix::truncZeroOrigin( vMatrix.data(), prms.n_cols, components, prms.n_components, prms.n_cols, stream); Matrix::opg::deallocate(h, uMatrixParts, input_desc, rank, stream); // Re-add mean to centered data Stats::opg::mean_add(input_data, input_desc, mu_data, comm, streams, n_streams); } for (std::uint32_t i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } for (std::uint32_t i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamDestroy(streams[i])); } } template <typename T> void transform_impl(raft::handle_t& handle, std::vector<Matrix::Data<T>*>& input, const Matrix::PartDescriptor input_desc, T* components, std::vector<Matrix::Data<T>*>& trans_input, T* singular_vals, T* mu, const paramsPCAMG prms, cudaStream_t* streams, std::uint32_t n_streams, bool verbose) { std::vector<Matrix::RankSizePair*> local_blocks = input_desc.partsToRanks; if (prms.whiten) { T scalar = T(sqrt(prms.n_rows - 1)); raft::linalg::scalarMultiply( components, components, scalar, prms.n_cols * prms.n_components, streams[0]); raft::matrix::matrixVectorBinaryDivSkipZero( components, singular_vals, prms.n_cols, prms.n_components, true, true, streams[0]); } for (std::size_t i = 0; i < input.size(); i++) { auto si = i % n_streams; raft::stats::meanCenter(input[i]->ptr, input[i]->ptr, mu, prms.n_cols, local_blocks[i]->size, false, true, streams[si]); T alpha = T(1); T beta = T(0); raft::linalg::gemm(handle, input[i]->ptr, local_blocks[i]->size, prms.n_cols, components, trans_input[i]->ptr, local_blocks[i]->size, prms.n_components, CUBLAS_OP_N, CUBLAS_OP_T, alpha, beta, streams[si]); raft::stats::meanAdd(input[i]->ptr, input[i]->ptr, mu, prms.n_cols, local_blocks[i]->size, false, true, streams[si]); } if (prms.whiten) { raft::matrix::matrixVectorBinaryMultSkipZero( components, singular_vals, prms.n_cols, prms.n_components, true, true, streams[0]); T scalar = T(1 / sqrt(prms.n_rows - 1)); raft::linalg::scalarMultiply( components, components, scalar, prms.n_cols * prms.n_components, streams[0]); } for (std::uint32_t i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } } /** * @brief performs MNMG transform operation for the pca. * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param n_parts: number of partitions * @input param input: input data * @input param components: principal components of the input data * @output param trans_input: transformed input data * @input param singular_vals: singular values of the data * @input param mu: mean of every column in input * @input param prms: data structure that includes all the parameters from input size to algorithm * @input param verbose */ template <typename T> void transform_impl(raft::handle_t& handle, Matrix::RankSizePair** rank_sizes, std::uint32_t n_parts, Matrix::Data<T>** input, T* components, Matrix::Data<T>** trans_input, T* singular_vals, T* mu, paramsPCAMG prms, bool verbose) { // We want to update the API of this function, and other functions with // regards to https://github.com/rapidsai/cuml/issues/2471 int rank = handle.get_comms().get_rank(); std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts); std::vector<Matrix::Data<T>*> input_data(input, input + n_parts); Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank); std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts); // TODO: These streams should come from raft::handle_t auto n_streams = n_parts; cudaStream_t streams[n_streams]; for (std::uint32_t i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamCreate(&streams[i])); } transform_impl(handle, input_data, input_desc, components, trans_data, singular_vals, mu, prms, streams, n_streams, verbose); for (std::uint32_t i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } for (std::uint32_t i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamDestroy(streams[i])); } } template <typename T> void inverse_transform_impl(raft::handle_t& handle, std::vector<Matrix::Data<T>*>& trans_input, Matrix::PartDescriptor trans_input_desc, T* components, std::vector<Matrix::Data<T>*>& input, T* singular_vals, T* mu, paramsPCAMG prms, cudaStream_t* streams, std::uint32_t n_streams, bool verbose) { std::vector<Matrix::RankSizePair*> local_blocks = trans_input_desc.partsToRanks; if (prms.whiten) { T scalar = T(1 / sqrt(prms.n_rows - 1)); raft::linalg::scalarMultiply( components, components, scalar, prms.n_rows * prms.n_components, streams[0]); raft::matrix::matrixVectorBinaryMultSkipZero( components, singular_vals, prms.n_rows, prms.n_components, true, true, streams[0]); } for (std::size_t i = 0; i < local_blocks.size(); i++) { auto si = i % n_streams; T alpha = T(1); T beta = T(0); raft::linalg::gemm(handle, trans_input[i]->ptr, local_blocks[i]->size, prms.n_components, components, input[i]->ptr, local_blocks[i]->size, prms.n_cols, CUBLAS_OP_N, CUBLAS_OP_N, alpha, beta, streams[si]); raft::stats::meanAdd(input[i]->ptr, input[i]->ptr, mu, prms.n_cols, local_blocks[i]->size, false, true, streams[si]); } if (prms.whiten) { raft::matrix::matrixVectorBinaryDivSkipZero( components, singular_vals, prms.n_rows, prms.n_components, true, true, streams[0]); T scalar = T(sqrt(prms.n_rows - 1)); raft::linalg::scalarMultiply( components, components, scalar, prms.n_rows * prms.n_components, streams[0]); } for (std::uint32_t i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } } /** * @brief performs MNMG inverse transform operation for the pca. * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param n_parts: number of partitions * @input param trans_input: transformed input data * @input param components: principal components of the input data * @output param input: input data * @input param singular_vals: singular values of the data * @input param mu: mean of every column in input * @input param prms: data structure that includes all the parameters from input size to algorithm * @input param verbose */ template <typename T> void inverse_transform_impl(raft::handle_t& handle, Matrix::RankSizePair** rank_sizes, std::uint32_t n_parts, Matrix::Data<T>** trans_input, T* components, Matrix::Data<T>** input, T* singular_vals, T* mu, paramsPCAMG prms, bool verbose) { int rank = handle.get_comms().get_rank(); std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts); Matrix::PartDescriptor trans_desc(prms.n_rows, prms.n_components, ranksAndSizes, rank); std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts); std::vector<Matrix::Data<T>*> input_data(input, input + n_parts); // TODO: These streams should come from raft::handle_t auto n_streams = n_parts; cudaStream_t streams[n_streams]; for (std::uint32_t i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamCreate(&streams[i])); } inverse_transform_impl(handle, trans_data, trans_desc, components, input_data, singular_vals, mu, prms, streams, n_streams, verbose); for (std::uint32_t i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } for (std::uint32_t i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamDestroy(streams[i])); } } /** * @brief performs MNMG fit and transform operation for the pca. * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param n_parts: number of partitions * @input param input: input data * @output param trans_input: transformed input data * @output param components: principal components of the input data * @output param explained_var: explained var * @output param explained_var_ratio: the explained var ratio * @output param singular_vals: singular values of the data * @output param mu: mean of every column in input * @output param noise_vars: variance of the noise * @input param prms: data structure that includes all the parameters from input size to algorithm * @input param verbose */ template <typename T> void fit_transform_impl(raft::handle_t& handle, Matrix::RankSizePair** rank_sizes, std::uint32_t n_parts, Matrix::Data<T>** input, Matrix::Data<T>** trans_input, T* components, T* explained_var, T* explained_var_ratio, T* singular_vals, T* mu, T* noise_vars, paramsPCAMG prms, bool verbose) { int rank = handle.get_comms().get_rank(); std::vector<Matrix::RankSizePair*> ranksAndSizes(rank_sizes, rank_sizes + n_parts); std::vector<Matrix::Data<T>*> input_data(input, input + n_parts); Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank); std::vector<Matrix::Data<T>*> trans_data(trans_input, trans_input + n_parts); // TODO: These streams should come from raft::handle_t auto n_streams = n_parts; cudaStream_t streams[n_streams]; for (std::uint32_t i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamCreate(&streams[i])); } fit_impl(handle, input_data, input_desc, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, streams, n_streams, verbose); transform_impl(handle, input_data, input_desc, components, trans_data, singular_vals, mu, prms, streams, n_streams, verbose); sign_flip(handle, trans_data, input_desc, components, prms.n_components, streams, n_streams); for (std::uint32_t i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } for (std::uint32_t i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamDestroy(streams[i])); } } void fit(raft::handle_t& handle, std::vector<Matrix::Data<float>*>& input_data, Matrix::PartDescriptor& input_desc, float* components, float* explained_var, float* explained_var_ratio, float* singular_vals, float* mu, float* noise_vars, paramsPCAMG prms, bool verbose) { fit_impl(handle, input_data, input_desc, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, verbose); } void fit(raft::handle_t& handle, std::vector<Matrix::Data<double>*>& input_data, Matrix::PartDescriptor& input_desc, double* components, double* explained_var, double* explained_var_ratio, double* singular_vals, double* mu, double* noise_vars, paramsPCAMG prms, bool verbose) { fit_impl(handle, input_data, input_desc, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, verbose); } void fit_transform(raft::handle_t& handle, Matrix::RankSizePair** rank_sizes, std::uint32_t n_parts, Matrix::floatData_t** input, Matrix::floatData_t** trans_input, float* components, float* explained_var, float* explained_var_ratio, float* singular_vals, float* mu, float* noise_vars, paramsPCAMG prms, bool verbose) { fit_transform_impl(handle, rank_sizes, n_parts, input, trans_input, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, verbose); } void fit_transform(raft::handle_t& handle, Matrix::RankSizePair** rank_sizes, std::uint32_t n_parts, Matrix::doubleData_t** input, Matrix::doubleData_t** trans_input, double* components, double* explained_var, double* explained_var_ratio, double* singular_vals, double* mu, double* noise_vars, paramsPCAMG prms, bool verbose) { fit_transform_impl(handle, rank_sizes, n_parts, input, trans_input, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, verbose); } void transform(raft::handle_t& handle, Matrix::RankSizePair** rank_sizes, std::uint32_t n_parts, Matrix::Data<float>** input, float* components, Matrix::Data<float>** trans_input, float* singular_vals, float* mu, paramsPCAMG prms, bool verbose) { transform_impl( handle, rank_sizes, n_parts, input, components, trans_input, singular_vals, mu, prms, verbose); } void transform(raft::handle_t& handle, Matrix::RankSizePair** rank_sizes, std::uint32_t n_parts, Matrix::Data<double>** input, double* components, Matrix::Data<double>** trans_input, double* singular_vals, double* mu, paramsPCAMG prms, bool verbose) { transform_impl( handle, rank_sizes, n_parts, input, components, trans_input, singular_vals, mu, prms, verbose); } void inverse_transform(raft::handle_t& handle, Matrix::RankSizePair** rank_sizes, std::uint32_t n_parts, Matrix::Data<float>** trans_input, float* components, Matrix::Data<float>** input, float* singular_vals, float* mu, paramsPCAMG prms, bool verbose) { inverse_transform_impl( handle, rank_sizes, n_parts, trans_input, components, input, singular_vals, mu, prms, verbose); } void inverse_transform(raft::handle_t& handle, Matrix::RankSizePair** rank_sizes, std::uint32_t n_parts, Matrix::Data<double>** trans_input, double* components, Matrix::Data<double>** input, double* singular_vals, double* mu, paramsPCAMG prms, bool verbose) { inverse_transform_impl( handle, rank_sizes, n_parts, trans_input, components, input, singular_vals, mu, prms, verbose); } } // namespace opg } // namespace PCA } // namespace ML
the_stack
namespace simtbx { namespace gpu { namespace af = scitbx::af; //refactor later into helper file static cudaError_t cudaMemcpyVectorDoubleToDevice(CUDAREAL *dst, const double *src, size_t vector_items) { CUDAREAL * temp = new CUDAREAL[vector_items]; for (size_t i = 0; i < vector_items; i++) { temp[i] = src[i]; } cudaError_t ret = cudaMemcpy(dst, temp, sizeof(*dst) * vector_items, cudaMemcpyHostToDevice); delete temp; return ret; } /* make a unit vector pointing in same direction and report magnitude (both args can be same vector) */ double cpu_unitize(const double * vector, double * new_unit_vector) { double v1 = vector[1]; double v2 = vector[2]; double v3 = vector[3]; double mag = sqrt(v1 * v1 + v2 * v2 + v3 * v3); if (mag != 0.0) { /* normalize it */ new_unit_vector[0] = mag; new_unit_vector[1] = v1 / mag; new_unit_vector[2] = v2 / mag; new_unit_vector[3] = v3 / mag; } else { /* can't normalize, report zero vector */ new_unit_vector[0] = 0.0; new_unit_vector[1] = 0.0; new_unit_vector[2] = 0.0; new_unit_vector[3] = 0.0; } return mag; } void exascale_api::show(){ SCITBX_EXAMINE(SIM.roi_xmin); SCITBX_EXAMINE(SIM.roi_xmax); SCITBX_EXAMINE(SIM.roi_ymin); SCITBX_EXAMINE(SIM.roi_ymax); SCITBX_EXAMINE(SIM.oversample); SCITBX_EXAMINE(SIM.point_pixel); SCITBX_EXAMINE(SIM.pixel_size); SCITBX_EXAMINE(cu_subpixel_size); SCITBX_EXAMINE(cu_steps); SCITBX_EXAMINE(SIM.detector_thickstep); SCITBX_EXAMINE(SIM.detector_thicksteps); SCITBX_EXAMINE(SIM.detector_thick); SCITBX_EXAMINE(SIM.detector_attnlen); SCITBX_EXAMINE(SIM.curved_detector); SCITBX_EXAMINE(SIM.distance); SCITBX_EXAMINE(SIM.close_distance); SCITBX_EXAMINE(SIM.dmin); SCITBX_EXAMINE(SIM.phi0); SCITBX_EXAMINE(SIM.phistep); SCITBX_EXAMINE(SIM.phisteps); SCITBX_EXAMINE(SIM.sources); SCITBX_EXAMINE(SIM.mosaic_spread); SCITBX_EXAMINE(SIM.mosaic_domains); SCITBX_EXAMINE(SIM.Na); SCITBX_EXAMINE(SIM.Nb); SCITBX_EXAMINE(SIM.Nc); SCITBX_EXAMINE(SIM.fluence); SCITBX_EXAMINE(SIM.spot_scale); SCITBX_EXAMINE(SIM.integral_form); SCITBX_EXAMINE(SIM.default_F); SCITBX_EXAMINE(SIM.interpolate); SCITBX_EXAMINE(SIM.nopolar); SCITBX_EXAMINE(SIM.polarization); SCITBX_EXAMINE(SIM.fudge); } void exascale_api::add_energy_channel_from_gpu_amplitudes( int const& ichannel, simtbx::gpu::gpu_energy_channels & gec, simtbx::gpu::gpu_detector & gdt ){ cudaSafeCall(cudaSetDevice(SIM.device_Id)); // transfer source_I, source_lambda // the int arguments are for sizes of the arrays cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_I, SIM.source_I, SIM.sources)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_lambda, SIM.source_lambda, SIM.sources)); // magic happens here: take pointer from singleton, temporarily use it for add Bragg iteration: cu_current_channel_Fhkl = gec.d_channel_Fhkl[ichannel]; cudaDeviceProp deviceProps = { 0 }; cudaSafeCall(cudaGetDeviceProperties(&deviceProps, SIM.device_Id)); int smCount = deviceProps.multiProcessorCount; dim3 threadsPerBlock(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y); dim3 numBlocks(smCount * 8, 1); std::size_t panel_size = gdt.cu_slow_pixels * gdt.cu_fast_pixels; const int vec_len = 4; // the for loop around panels. Offsets given. for (std::size_t idx_p = 0; idx_p < gdt.cu_n_panels; idx_p++){ // loop thru panels and increment the array ptrs nanoBraggSpotsCUDAKernel<<<numBlocks, threadsPerBlock>>>( gdt.cu_slow_pixels, gdt.cu_fast_pixels, SIM.roi_xmin, SIM.roi_xmax, SIM.roi_ymin, SIM.roi_ymax, SIM.oversample, SIM.point_pixel, SIM.pixel_size, cu_subpixel_size, cu_steps, SIM.detector_thickstep, SIM.detector_thicksteps, SIM.detector_thick, SIM.detector_attnlen, &(gdt.cu_sdet_vector[vec_len * idx_p]), &(gdt.cu_fdet_vector[vec_len * idx_p]), &(gdt.cu_odet_vector[vec_len * idx_p]), &(gdt.cu_pix0_vector[vec_len * idx_p]), SIM.curved_detector, gdt.metrology.dists[idx_p], gdt.metrology.dists[idx_p], cu_beam_vector, gdt.metrology.Xbeam[idx_p], gdt.metrology.Ybeam[idx_p], SIM.dmin, SIM.phi0, SIM.phistep, SIM.phisteps, cu_spindle_vector, SIM.sources, cu_source_X, cu_source_Y, cu_source_Z, cu_source_I, cu_source_lambda, cu_a0, cu_b0, cu_c0, SIM.xtal_shape, SIM.mosaic_spread, SIM.mosaic_domains, cu_mosaic_umats, SIM.Na, SIM.Nb, SIM.Nc, SIM.V_cell, cu_water_size, cu_water_F, cu_water_MW, simtbx::nanoBragg::r_e_sqr, SIM.fluence, simtbx::nanoBragg::Avogadro, SIM.spot_scale, SIM.integral_form, SIM.default_F, SIM.interpolate, cu_current_channel_Fhkl, gec.cu_FhklParams, SIM.nopolar, cu_polar_vector, SIM.polarization, SIM.fudge, /* &(gdt.cu_maskimage[panel_size * idx_p]), */ NULL, &(gdt.cu_floatimage[panel_size * idx_p]) /*out*/, &(gdt.cu_omega_reduction[panel_size * idx_p]) /*out*/, &(gdt.cu_max_I_x_reduction[panel_size * idx_p]) /*out*/, &(gdt.cu_max_I_y_reduction[panel_size * idx_p]) /*out*/, &(gdt.cu_rangemap[panel_size * idx_p]) /*out*/); cudaSafeCall(cudaPeekAtLastError()); } cudaSafeCall(cudaDeviceSynchronize()); //don't want to free the gec data when the nanoBragg goes out of scope, so switch the pointer cu_current_channel_Fhkl = NULL; add_array_CUDAKernel<<<numBlocks, threadsPerBlock>>>(gdt.cu_accumulate_floatimage, gdt.cu_floatimage, gdt.cu_n_panels * gdt.cu_slow_pixels * gdt.cu_fast_pixels); } void exascale_api::add_energy_channel_mask_allpanel( int const& ichannel, simtbx::gpu::gpu_energy_channels & gec, simtbx::gpu::gpu_detector & gdt, af::shared<bool> all_panel_mask ){ // here or there, need to convert the all_panel_mask (3D map) into a 1D list of accepted pixels // coordinates for the active pixel list are absolute offsets into the detector array af::shared<int> active_pixel_list; const bool* jptr = all_panel_mask.begin(); for (int j=0; j < all_panel_mask.size(); ++j){ if (jptr[j]) { active_pixel_list.push_back(j); } } add_energy_channel_mask_allpanel( ichannel, gec, gdt, active_pixel_list); } void exascale_api::add_energy_channel_mask_allpanel( int const& ichannel, simtbx::gpu::gpu_energy_channels & gec, simtbx::gpu::gpu_detector & gdt, af::shared<int> const active_pixel_list ){ cudaSafeCall(cudaSetDevice(SIM.device_Id)); gdt.set_active_pixels_on_GPU(active_pixel_list); // transfer source_I, source_lambda // the int arguments are for sizes of the arrays cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_I, SIM.source_I, SIM.sources)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_lambda, SIM.source_lambda, SIM.sources)); // magic happens here: take pointer from singleton, temporarily use it for add Bragg iteration: cu_current_channel_Fhkl = gec.d_channel_Fhkl[ichannel]; cudaDeviceProp deviceProps = { 0 }; cudaSafeCall(cudaGetDeviceProperties(&deviceProps, SIM.device_Id)); int smCount = deviceProps.multiProcessorCount; dim3 threadsPerBlock(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y); dim3 numBlocks(smCount * 8, 1); const int vec_len = 4; // for call for all panels at the same time debranch_maskall_CUDAKernel<<<numBlocks, threadsPerBlock>>>( gdt.cu_n_panels, gdt.cu_slow_pixels, gdt.cu_fast_pixels, active_pixel_list.size(), SIM.oversample, SIM.point_pixel, SIM.pixel_size, cu_subpixel_size, cu_steps, SIM.detector_thickstep, SIM.detector_thicksteps, SIM.detector_thick, SIM.detector_attnlen, vec_len, gdt.cu_sdet_vector, gdt.cu_fdet_vector, gdt.cu_odet_vector, gdt.cu_pix0_vector, gdt.cu_distance, gdt.cu_distance, cu_beam_vector, gdt.cu_Xbeam, gdt.cu_Ybeam, SIM.dmin, SIM.phi0, SIM.phistep, SIM.phisteps, cu_spindle_vector, SIM.sources, cu_source_X, cu_source_Y, cu_source_Z, cu_source_I, cu_source_lambda, cu_a0, cu_b0, cu_c0, SIM.xtal_shape, SIM.mosaic_domains, cu_mosaic_umats, SIM.Na, SIM.Nb, SIM.Nc, SIM.V_cell, cu_water_size, cu_water_F, cu_water_MW, simtbx::nanoBragg::r_e_sqr, SIM.fluence, simtbx::nanoBragg::Avogadro, SIM.spot_scale, SIM.integral_form, SIM.default_F, cu_current_channel_Fhkl, gec.cu_FhklParams, SIM.nopolar, cu_polar_vector, SIM.polarization, SIM.fudge, gdt.cu_active_pixel_list, gdt.cu_floatimage /*out*/, gdt.cu_omega_reduction /*out*/, gdt.cu_max_I_x_reduction /*out*/, gdt.cu_max_I_y_reduction /*out*/, gdt.cu_rangemap /*out*/); cudaSafeCall(cudaPeekAtLastError()); cudaSafeCall(cudaDeviceSynchronize()); //don't want to free the gec data when the nanoBragg goes out of scope, so switch the pointer cu_current_channel_Fhkl = NULL; add_array_CUDAKernel<<<numBlocks, threadsPerBlock>>>(gdt.cu_accumulate_floatimage, gdt.cu_floatimage, gdt.cu_n_panels * gdt.cu_slow_pixels * gdt.cu_fast_pixels); } void exascale_api::add_background(simtbx::gpu::gpu_detector & gdt, int const& override_source){ cudaSafeCall(cudaSetDevice(SIM.device_Id)); // transfer source_I, source_lambda // the int arguments are for sizes of the arrays cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_I, SIM.source_I, SIM.sources)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_lambda, SIM.source_lambda, SIM.sources)); CUDAREAL * cu_stol_of; cudaSafeCall(cudaMalloc((void ** )&cu_stol_of, sizeof(*cu_stol_of) * SIM.stols)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_stol_of, SIM.stol_of, SIM.stols)); CUDAREAL * cu_Fbg_of; cudaSafeCall(cudaMalloc((void ** )&cu_Fbg_of, sizeof(*cu_Fbg_of) * SIM.stols)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_Fbg_of, SIM.Fbg_of, SIM.stols)); cudaDeviceProp deviceProps = { 0 }; cudaSafeCall(cudaGetDeviceProperties(&deviceProps, SIM.device_Id)); int smCount = deviceProps.multiProcessorCount; dim3 threadsPerBlock(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y); dim3 numBlocks(smCount * 8, 1); // initialize the device memory within a kernel. // modify the arguments to initialize multipanel detector. nanoBraggSpotsInitCUDAKernel<<<numBlocks, threadsPerBlock>>>( gdt.cu_n_panels * gdt.cu_slow_pixels, gdt.cu_fast_pixels, gdt.cu_floatimage, gdt.cu_omega_reduction, gdt.cu_max_I_x_reduction, gdt.cu_max_I_y_reduction, gdt.cu_rangemap); cudaSafeCall(cudaPeekAtLastError()); cudaSafeCall(cudaDeviceSynchronize()); std::size_t panel_size = gdt.cu_slow_pixels * gdt.cu_fast_pixels; const int vec_len = 4; // the for loop around panels. Offsets given. for (std::size_t idx_p = 0; idx_p < gdt.cu_n_panels; idx_p++){ add_background_CUDAKernel<<<numBlocks, threadsPerBlock>>>(SIM.sources, SIM.oversample, override_source, SIM.pixel_size, gdt.cu_slow_pixels, gdt.cu_fast_pixels, SIM.detector_thicksteps, SIM.detector_thickstep, SIM.detector_attnlen, &(gdt.cu_sdet_vector[vec_len * idx_p]), &(gdt.cu_fdet_vector[vec_len * idx_p]), &(gdt.cu_odet_vector[vec_len * idx_p]), &(gdt.cu_pix0_vector[vec_len * idx_p]), gdt.metrology.dists[idx_p], SIM.point_pixel, SIM.detector_thick, cu_source_X, cu_source_Y, cu_source_Z, cu_source_lambda, cu_source_I, SIM.stols, cu_stol_of, cu_Fbg_of, SIM.nopolar, SIM.polarization, cu_polar_vector, simtbx::nanoBragg::r_e_sqr, SIM.fluence, SIM.amorphous_molecules, &(gdt.cu_floatimage[panel_size * idx_p]) /*out*/); cudaSafeCall(cudaPeekAtLastError()); } cudaSafeCall(cudaDeviceSynchronize()); add_array_CUDAKernel<<<numBlocks, threadsPerBlock>>>(gdt.cu_accumulate_floatimage, gdt.cu_floatimage, gdt.cu_n_panels * gdt.cu_slow_pixels * gdt.cu_fast_pixels); cudaSafeCall(cudaFree(cu_stol_of)); cudaSafeCall(cudaFree(cu_Fbg_of)); } void exascale_api::allocate(){ cudaSafeCall(cudaSetDevice(SIM.device_Id)); /* water_size not defined in class, CLI argument, defaults to 0 */ double water_size = 0.0; /* missing constants */ double water_F = 2.57; double water_MW = 18.0; /* make sure we are normalizing with the right number of sub-steps */ int nb_steps = SIM.phisteps*SIM.mosaic_domains*SIM.oversample*SIM.oversample; double nb_subpixel_size = SIM.pixel_size/SIM.oversample; /*create transfer arguments to device space*/ cu_subpixel_size = nb_subpixel_size; //check for conflict? cu_steps = nb_steps; //check for conflict? /* presumably thickness and attenuation can be migrated to the gpu detector class XXX FIXME*/ //cu_detector_thick = SIM.detector_thick; //cu_detector_mu = SIM.detector_attnlen; // synonyms //cu_distance = SIM.distance; /* distance and close distance, detector properties? XXX FIXME */ //cu_close_distance = SIM.close_distance; cu_water_size = water_size; cu_water_F = water_F; cu_water_MW = water_MW; const int vector_length = 4; int cu_sources = SIM.sources; int cu_mosaic_domains = SIM.mosaic_domains; cudaSafeCall(cudaMalloc((void ** )&cu_beam_vector, sizeof(*cu_beam_vector) * vector_length)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_beam_vector, SIM.beam_vector, vector_length)); cudaSafeCall(cudaMalloc((void ** )&cu_spindle_vector, sizeof(*cu_spindle_vector) * vector_length)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_spindle_vector, SIM.spindle_vector, vector_length)); cudaSafeCall(cudaMalloc((void ** )&cu_a0, sizeof(*cu_a0) * vector_length)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_a0, SIM.a0, vector_length)); cudaSafeCall(cudaMalloc((void ** )&cu_b0, sizeof(*cu_b0) * vector_length)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_b0, SIM.b0, vector_length)); cudaSafeCall(cudaMalloc((void ** )&cu_c0, sizeof(*cu_c0) * vector_length)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_c0, SIM.c0, vector_length)); // Unitize polar vector before sending it to the GPU. // Optimization do it only once here rather than multiple time per pixel in the GPU. double polar_vector_unitized[4]; cpu_unitize(SIM.polar_vector, polar_vector_unitized); cudaSafeCall(cudaMalloc((void ** )&cu_polar_vector, sizeof(*cu_polar_vector) * vector_length)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_polar_vector, polar_vector_unitized, vector_length)); cudaSafeCall(cudaMalloc((void ** )&cu_source_X, sizeof(*cu_source_X) * cu_sources)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_X, SIM.source_X, cu_sources)); cudaSafeCall(cudaMalloc((void ** )&cu_source_Y, sizeof(*cu_source_Y) * cu_sources)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_Y, SIM.source_Y, cu_sources)); cudaSafeCall(cudaMalloc((void ** )&cu_source_Z, sizeof(*cu_source_Z) * cu_sources)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_Z, SIM.source_Z, cu_sources)); cudaSafeCall(cudaMalloc((void ** )&cu_source_I, sizeof(*cu_source_I) * cu_sources)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_I, SIM.source_I, cu_sources)); cudaSafeCall(cudaMalloc((void ** )&cu_source_lambda, sizeof(*cu_source_lambda) * cu_sources)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_source_lambda, SIM.source_lambda, cu_sources)); cudaSafeCall(cudaMalloc((void ** )&cu_mosaic_umats, sizeof(*cu_mosaic_umats) * cu_mosaic_domains * 9)); cudaSafeCall(cudaMemcpyVectorDoubleToDevice(cu_mosaic_umats, SIM.mosaic_umats, cu_mosaic_domains * 9)); }; exascale_api::~exascale_api(){ cudaSafeCall(cudaSetDevice(SIM.device_Id)); cudaSafeCall(cudaFree(cu_beam_vector)); cudaSafeCall(cudaFree(cu_spindle_vector)); cudaSafeCall(cudaFree(cu_source_X)); cudaSafeCall(cudaFree(cu_source_Y)); cudaSafeCall(cudaFree(cu_source_Z)); cudaSafeCall(cudaFree(cu_source_I)); cudaSafeCall(cudaFree(cu_source_lambda)); cudaSafeCall(cudaFree(cu_a0)); cudaSafeCall(cudaFree(cu_b0)); cudaSafeCall(cudaFree(cu_c0)); cudaSafeCall(cudaFree(cu_mosaic_umats)); cudaSafeCall(cudaFree(cu_polar_vector)); } } // gpu } // simtbx
the_stack
// Forward Declarations for benchmark kernels __global__ void MAddU(float *target, float val1, float val2); __global__ void MulMAddU(float *target, float val1, float val2); __global__ void MAddU_DP(double *target, double val1, double val2); __global__ void MulMAddU_DP(double *target, double val1, double val2); // Add kernels template <class T> __global__ void Add1(T *data, int nIters, T v); template <class T> __global__ void Add2(T *data, int nIters, T v); template <class T> __global__ void Add4(T *data, int nIters, T v); template <class T> __global__ void Add8(T *data, int nIters, T v); // Mul kernels template <class T> __global__ void Mul1(T *data, int nIters, T v); template <class T> __global__ void Mul2(T *data, int nIters, T v); template <class T> __global__ void Mul4(T *data, int nIters, T v); template <class T> __global__ void Mul8(T *data, int nIters, T v); // MAdd kernels template <class T> __global__ void MAdd1(T *data, int nIters, T v1, T v2); template <class T> __global__ void MAdd2(T *data, int nIters, T v1, T v2); template <class T> __global__ void MAdd4(T *data, int nIters, T v1, T v2); template <class T> __global__ void MAdd8(T *data, int nIters, T v1, T v2); // MulMAdd kernels template <class T> __global__ void MulMAdd1(T *data, int nIters, T v1, T v2); template <class T> __global__ void MulMAdd2(T *data, int nIters, T v1, T v2); template <class T> __global__ void MulMAdd4(T *data, int nIters, T v1, T v2); template <class T> __global__ void MulMAdd8(T *data, int nIters, T v1, T v2); // Forward Declarations // execute simple precision and double precision versions of the benchmarks template <class T> void RunTest(ResultDatabase &resultDB, int npasses, int verbose, int quiet, float repeatF, ProgressBar &pb, const char* precision); // Block size to use in measurements #define BLOCK_SIZE_SP 256 #define BLOCK_SIZE_DP 128 // **************************************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing // // Arguments: // op: the options parser / parameter database // // Returns: nothing // // Programmer: Kyle Spafford // Creation: December 11, 2009 // // Modifications: // // **************************************************************************** void addBenchmarkSpecOptions(OptionParser &op) { } // **************************************************************************** // Function: runBenchmark // // Purpose: // This benchmark measures the max floating point capability of a gpu using // a highly unrolled kernel with a large number of floating point operations. // // Arguments: // resultDB: the benchmark stores its results in this ResultDatabase // op: the options parser / parameter database // // Returns: nothing // // Programmer: Kyle Spafford // Creation: September 08, 2009 // // Modifications: // Jeremy Meredith, Fri May 14 11:23:10 EDT 2010 // Made double precision a copy of SP, with a few tweaks. // Allow any capability at least 1.3 or 2.0 to use double. // // Gabriel Marin, Thu Jan 13, 2010 // Add the auto-generated kernels from the OpenCL implementation. // DP / SP implemented as templates for the new kernels. // Add text progress bar. // // **************************************************************************** void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) { bool verbose = op.getOptionBool("verbose"); bool quiet = op.getOptionBool("quiet"); const unsigned int passes = op.getOptionInt("passes"); // Test to see if this device supports double precision int device; cudaGetDevice(&device); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); bool doDouble = false; if ((deviceProp.major == 1 && deviceProp.minor >= 3) || (deviceProp.major >= 2)) { doDouble = true; } // determine the speed of the device first. This determines the number of // iterations for all kernels. const unsigned int halfBufSize = 1024*1024; unsigned int halfNumFloats = halfBufSize / sizeof(float), numFloats = 2*halfNumFloats; float *gpu_mem, *hostMem; hostMem = new float[numFloats]; cudaMalloc((void**)&gpu_mem, halfBufSize*2); CHECK_CUDA_ERROR(); // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (float)(drand48()*10.0); } // Variables used for timing float t = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); CHECK_CUDA_ERROR(); // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, halfBufSize*2, cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Thread block configuration dim3 threads(BLOCK_SIZE_SP,1,1); dim3 blocks((numFloats)/BLOCK_SIZE_SP,1,1); // Decrease block size for devices with lower compute // capability. Avoids an out of resources error if ((deviceProp.major == 1 && deviceProp.minor <= 2)) { threads.x = 128; blocks.x = (numFloats)/128; } // Benchmark the MulMAdd2 kernel to compute a scaling factor. t = 0.0f; cudaEventRecord(start, 0); MulMAdd2<float><<< blocks, threads >>>(gpu_mem, 10, 3.75, 0.355); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; double repeatF = 1.1e07 / (double)t; fprintf (stdout, "Adjust repeat factor = %lg\n", repeatF); delete[] hostMem; cudaFree((void*)gpu_mem); CHECK_CUDA_ERROR(); // Initialize progress bar. We have 16 generic kernels and 2 hand tuned kernels. // Each kernel is executed 'passes' number of times for each single precision and // double precision (if avaialble). int totalRuns = 18*passes; if (doDouble) totalRuns <<= 1; // multiply by 2 ProgressBar pb(totalRuns); if (!verbose && !quiet) pb.Show(stdout); // Run single precision kernels RunTest<float> (resultDB, passes, verbose, quiet, repeatF, pb, "-SP"); if (doDouble) RunTest<double> (resultDB, passes, verbose, quiet, repeatF, pb, "-DP"); else { const char atts[] = "DP_Not_Supported"; for (int pas=0 ; pas<passes ; ++pas) { resultDB.AddResult("Add1-DP", atts, "GFLOPS", FLT_MAX); resultDB.AddResult("Add2-DP", atts, "GFLOPS", FLT_MAX); resultDB.AddResult("Add4-DP", atts, "GFLOPS", FLT_MAX); resultDB.AddResult("Add8-DP", atts, "GFLOPS", FLT_MAX); resultDB.AddResult("Mul1-DP", atts, "GFLOPS", FLT_MAX); resultDB.AddResult("Mul2-DP", atts, "GFLOPS", FLT_MAX); resultDB.AddResult("Mul4-DP", atts, "GFLOPS", FLT_MAX); resultDB.AddResult("Mul8-DP", atts, "GFLOPS", FLT_MAX); resultDB.AddResult("MAdd1-DP", atts, "GFLOPS", FLT_MAX); resultDB.AddResult("MAdd2-DP", atts, "GFLOPS", FLT_MAX); resultDB.AddResult("MAdd4-DP", atts, "GFLOPS", FLT_MAX); resultDB.AddResult("MAdd8-DP", atts, "GFLOPS", FLT_MAX); resultDB.AddResult("MulMAdd1-DP", atts, "GFLOPS", FLT_MAX); resultDB.AddResult("MulMAdd2-DP", atts, "GFLOPS", FLT_MAX); resultDB.AddResult("MulMAdd4-DP", atts, "GFLOPS", FLT_MAX); resultDB.AddResult("MulMAdd8-DP", atts, "GFLOPS", FLT_MAX); // we deal with these separately //resultDB.AddResult("MulMAddU-DP", atts, "GFLOPS", FLT_MAX); //resultDB.AddResult("MAddU-DP", atts, "GFLOPS", FLT_MAX); } } // Problem Size int w = 2048, h = 2048; float root2 = 1.4142; if (repeatF<1) while (repeatF*root2<1) { repeatF*=2; if (w>h) w >>= 1; else h >>= 1; } /* When auto-scaling up, we must make sure that we do not exceed some device limit for block size. Disable for now. */ /* else while (repeatF>root2) { repeatF *= 0.5; if (w>h) h <<= 1; else w <<= 1; } */ const int nbytes_sp = w * h * sizeof(float); // Allocate gpu memory float *target_sp; cudaMalloc((void**)&target_sp, nbytes_sp); CHECK_CUDA_ERROR(); // Get a couple non-zero random numbers float val1 = 0, val2 = 0; while (val1==0 || val2==0) { val1 = drand48(); val2 = drand48(); } blocks.x = (w*h)/threads.x; for (int p = 0; p < passes; p++) { t = 0.0f; cudaEventRecord(start, 0); MAddU<<< blocks, threads >>>(target_sp, val1, val2); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t /= 1.e3; // Add result char atts[1024]; long int nflopsPerPixel = ((2*32)*10*10*5) + 61; sprintf(atts, "Size:%d", w*h); resultDB.AddResult("MAddU-SP", atts, "GFLOPS", (((double)nflopsPerPixel)*w*h) / (t*1.e9)); // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); cudaEventRecord(start, 0); MulMAddU<<< blocks, threads >>>(target_sp, val1, val2); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t /= 1.e3; // Add result nflopsPerPixel = ((3*8)*10*10*5) + 13; sprintf(atts, "Size:%d",w*h); resultDB.AddResult("MulMAddU-SP", atts, "GFLOPS", (((double)nflopsPerPixel)*w*h) / (t*1.e9)); // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); } cudaFree((void*)target_sp); CHECK_CUDA_ERROR(); if (doDouble) { const int nbytes_dp = w * h * sizeof(double); double *target_dp; cudaMalloc((void**)&target_dp, nbytes_dp); CHECK_CUDA_ERROR(); // Thread block configuration dim3 threads(BLOCK_SIZE_DP,1,1); dim3 blocks((w*h)/BLOCK_SIZE_DP,1,1); const unsigned int passes = op.getOptionInt("passes"); for (int p = 0; p < passes; p++) { cudaEventRecord(start, 0); MAddU_DP<<< blocks, threads >>>(target_dp, val1, val2); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t /= 1.e3; // Add result char atts[1024]; long int nflopsPerPixel = ((2*32)*10*10*5) + 61; sprintf(atts, "Size:%d", w*h); resultDB.AddResult("MAddU-DP", atts, "GFLOPS", (((double)nflopsPerPixel)*w*h) / (t*1.e9)); // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); cudaEventRecord(start, 0); MulMAddU_DP<<< blocks, threads >>>(target_dp, val1, val2); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t /= 1.e3; // Add result nflopsPerPixel = ((3*8)*10*10*5) + 13; sprintf(atts, "Size:%d",w*h); resultDB.AddResult("MulMAddU-DP", atts, "GFLOPS", (((double)nflopsPerPixel)*w*h) / (t*1.e9)); // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); } cudaFree((void*)target_dp); CHECK_CUDA_ERROR(); } else { // Add result char atts[1024]; sprintf(atts, "Size:%d", w * h); // resultDB requires neg entry for every possible result const unsigned int passes = op.getOptionInt("passes"); for (int p = 0; p < passes; p++) { resultDB.AddResult("MAddU-DP", atts, "GFLOPS", FLT_MAX); resultDB.AddResult("MulMAddU-DP", atts, "GFLOPS", FLT_MAX); } } if (!verbose) fprintf (stdout, "\n\n"); cudaEventDestroy(start); cudaEventDestroy(stop); } // **************************************************************************** // Function: RunTest // // Purpose: // Template function used for specializing the generic kernels for // single precision and double precision. // // Arguments: // resultDB: the benchmark stores its results in this ResultDatabase // // Returns: nothing // // Programmer: Gabriel Marin // Creation: January 13, 2010 // // **************************************************************************** template <class T> void RunTest(ResultDatabase &resultDB, int npasses, int verbose, int quiet, float repeatF, ProgressBar &pb, const char* precision) { T *gpu_mem; char sizeStr[128]; T *hostMem, *hostMem2; int realRepeats = (int)::round(repeatF*20); if (realRepeats < 2) realRepeats = 2; // Alloc host memory int halfNumFloats = 1024*1024; int numFloats = 2*halfNumFloats; hostMem = new T[numFloats]; hostMem2 = new T[numFloats]; cudaMalloc((void**)&gpu_mem, numFloats*sizeof(T)); CHECK_CUDA_ERROR(); // Variables used for timing float t = 0.0f; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); CHECK_CUDA_ERROR(); // Thread block configuration dim3 threads(128,1,1); dim3 blocks((numFloats)/128,1,1); for (int pass=0 ; pass<npasses ; ++pass) { // Benchmark each generic kernel. Generate new random numbers for each run. ////////// Add1 ////////// // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); } // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Execute the Add1 kernel t = 0.0f; cudaEventRecord(start, 0); Add1<T><<< blocks, threads >>>(gpu_mem, realRepeats, 10.0); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; // flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams double flopCount = (double)numFloats * 1 * realRepeats * 240 * 1; double gflop = flopCount / (double)(t); sprintf (sizeStr, "Size:%07d", numFloats); resultDB.AddResult(string("Add1")+precision, sizeStr, "GFLOPS", gflop); // Zero out the test host memory for (int j=0 ; j<numFloats ; ++j) hostMem2[j] = 0.0; // Read the result device memory back to the host cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Check the result -- At a minimum the first half of memory // should match the second half exactly for (int j=0 ; j<halfNumFloats ; ++j) { if (hostMem2[j] != hostMem2[numFloats-j-1]) { cout << "Error; hostMem2[" << j << "]=" << hostMem2[j] << " is different from its twin element hostMem2[" << (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1] <<"; stopping check\n"; break; } } // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); ////////// Add2 ////////// // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); } // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Execute the Add2 kernel t = 0.0f; cudaEventRecord(start, 0); Add2<T><<< blocks, threads >>>(gpu_mem, realRepeats, 10.0); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; // flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams flopCount = (double)numFloats * 1 * realRepeats * 120 * 2; gflop = flopCount / (double)(t); sprintf (sizeStr, "Size:%07d", numFloats); resultDB.AddResult(string("Add2")+precision, sizeStr, "GFLOPS", gflop); // Zero out the test host memory for (int j=0 ; j<numFloats ; ++j) hostMem2[j] = 0.0; // Read the result device memory back to the host cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Check the result -- At a minimum the first half of memory // should match the second half exactly for (int j=0 ; j<halfNumFloats ; ++j) { if (hostMem2[j] != hostMem2[numFloats-j-1]) { cout << "Error; hostMem2[" << j << "]=" << hostMem2[j] << " is different from its twin element hostMem2[" << (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1] <<"; stopping check\n"; break; } } // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); ////////// Add4 ////////// // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); } // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Execute the Add4 kernel t = 0.0f; cudaEventRecord(start, 0); Add4<T><<< blocks, threads >>>(gpu_mem, realRepeats, 10.0); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; // flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams flopCount = (double)numFloats * 1 * realRepeats * 60 * 4; gflop = flopCount / (double)(t); sprintf (sizeStr, "Size:%07d", numFloats); resultDB.AddResult(string("Add4")+precision, sizeStr, "GFLOPS", gflop); // Zero out the test host memory for (int j=0 ; j<numFloats ; ++j) hostMem2[j] = 0.0; // Read the result device memory back to the host cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Check the result -- At a minimum the first half of memory // should match the second half exactly for (int j=0 ; j<halfNumFloats ; ++j) { if (hostMem2[j] != hostMem2[numFloats-j-1]) { cout << "Error; hostMem2[" << j << "]=" << hostMem2[j] << " is different from its twin element hostMem2[" << (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1] <<"; stopping check\n"; break; } } // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); ////////// Add8 ////////// // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); } // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Execute the Add8 kernel t = 0.0f; cudaEventRecord(start, 0); Add8<T><<< blocks, threads >>>(gpu_mem, realRepeats, 10.0); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; // flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams flopCount = (double)numFloats * 1 * realRepeats * 30 * 8; gflop = flopCount / (double)(t); sprintf (sizeStr, "Size:%07d", numFloats); resultDB.AddResult(string("Add8")+precision, sizeStr, "GFLOPS", gflop); // Zero out the test host memory for (int j=0 ; j<numFloats ; ++j) hostMem2[j] = 0.0; // Read the result device memory back to the host cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Check the result -- At a minimum the first half of memory // should match the second half exactly for (int j=0 ; j<halfNumFloats ; ++j) { if (hostMem2[j] != hostMem2[numFloats-j-1]) { cout << "Error; hostMem2[" << j << "]=" << hostMem2[j] << " is different from its twin element hostMem2[" << (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1] <<"; stopping check\n"; break; } } // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); ////////// Mul1 ////////// // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); } // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Execute the Mul1 kernel t = 0.0f; cudaEventRecord(start, 0); Mul1<T><<< blocks, threads >>>(gpu_mem, realRepeats, 1.01); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; // flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams flopCount = (double)numFloats * 2 * realRepeats * 200 * 1; gflop = flopCount / (double)(t); sprintf (sizeStr, "Size:%07d", numFloats); resultDB.AddResult(string("Mul1")+precision, sizeStr, "GFLOPS", gflop); // Zero out the test host memory for (int j=0 ; j<numFloats ; ++j) hostMem2[j] = 0.0; // Read the result device memory back to the host cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Check the result -- At a minimum the first half of memory // should match the second half exactly for (int j=0 ; j<halfNumFloats ; ++j) { if (hostMem2[j] != hostMem2[numFloats-j-1]) { cout << "Error; hostMem2[" << j << "]=" << hostMem2[j] << " is different from its twin element hostMem2[" << (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1] <<"; stopping check\n"; break; } } // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); ////////// Mul2 ////////// // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); } // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Execute the Mul2 kernel t = 0.0f; cudaEventRecord(start, 0); Mul2<T><<< blocks, threads >>>(gpu_mem, realRepeats, 1.01); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; // flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams flopCount = (double)numFloats * 2 * realRepeats * 100 * 2; gflop = flopCount / (double)(t); sprintf (sizeStr, "Size:%07d", numFloats); resultDB.AddResult(string("Mul2")+precision, sizeStr, "GFLOPS", gflop); // Zero out the test host memory for (int j=0 ; j<numFloats ; ++j) hostMem2[j] = 0.0; // Read the result device memory back to the host cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Check the result -- At a minimum the first half of memory // should match the second half exactly for (int j=0 ; j<halfNumFloats ; ++j) { if (hostMem2[j] != hostMem2[numFloats-j-1]) { cout << "Error; hostMem2[" << j << "]=" << hostMem2[j] << " is different from its twin element hostMem2[" << (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1] <<"; stopping check\n"; break; } } // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); ////////// Mul4 ////////// // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); } // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Execute the Mul4 kernel t = 0.0f; cudaEventRecord(start, 0); Mul4<T><<< blocks, threads >>>(gpu_mem, realRepeats, 1.01); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; // flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams flopCount = (double)numFloats * 2 * realRepeats * 50 * 4; gflop = flopCount / (double)(t); sprintf (sizeStr, "Size:%07d", numFloats); resultDB.AddResult(string("Mul4")+precision, sizeStr, "GFLOPS", gflop); // Zero out the test host memory for (int j=0 ; j<numFloats ; ++j) hostMem2[j] = 0.0; // Read the result device memory back to the host cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Check the result -- At a minimum the first half of memory // should match the second half exactly for (int j=0 ; j<halfNumFloats ; ++j) { if (hostMem2[j] != hostMem2[numFloats-j-1]) { cout << "Error; hostMem2[" << j << "]=" << hostMem2[j] << " is different from its twin element hostMem2[" << (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1] <<"; stopping check\n"; break; } } // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); ////////// Mul8 ////////// // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); } // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Execute the Mul8 kernel t = 0.0f; cudaEventRecord(start, 0); Mul8<T><<< blocks, threads >>>(gpu_mem, realRepeats, 1.01); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; // flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams flopCount = (double)numFloats * 2 * realRepeats * 25 * 8; gflop = flopCount / (double)(t); sprintf (sizeStr, "Size:%07d", numFloats); resultDB.AddResult(string("Mul8")+precision, sizeStr, "GFLOPS", gflop); // Zero out the test host memory for (int j=0 ; j<numFloats ; ++j) hostMem2[j] = 0.0; // Read the result device memory back to the host cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Check the result -- At a minimum the first half of memory // should match the second half exactly for (int j=0 ; j<halfNumFloats ; ++j) { if (hostMem2[j] != hostMem2[numFloats-j-1]) { cout << "Error; hostMem2[" << j << "]=" << hostMem2[j] << " is different from its twin element hostMem2[" << (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1] <<"; stopping check\n"; break; } } // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); ////////// MAdd1 ////////// // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); } // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Execute the MAdd1 kernel t = 0.0f; cudaEventRecord(start, 0); MAdd1<T><<< blocks, threads >>>(gpu_mem, realRepeats, 10.0, 0.9899); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; // flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams flopCount = (double)numFloats * 2 * realRepeats * 240 * 1; gflop = flopCount / (double)(t); sprintf (sizeStr, "Size:%07d", numFloats); resultDB.AddResult(string("MAdd1")+precision, sizeStr, "GFLOPS", gflop); // Zero out the test host memory for (int j=0 ; j<numFloats ; ++j) hostMem2[j] = 0.0; // Read the result device memory back to the host cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Check the result -- At a minimum the first half of memory // should match the second half exactly for (int j=0 ; j<halfNumFloats ; ++j) { if (hostMem2[j] != hostMem2[numFloats-j-1]) { cout << "Error; hostMem2[" << j << "]=" << hostMem2[j] << " is different from its twin element hostMem2[" << (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1] <<"; stopping check\n"; break; } } // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); ////////// MAdd2 ////////// // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); } // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Execute the MAdd2 kernel t = 0.0f; cudaEventRecord(start, 0); MAdd2<T><<< blocks, threads >>>(gpu_mem, realRepeats, 10.0, 0.9899); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; // flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams flopCount = (double)numFloats * 2 * realRepeats * 120 * 2; gflop = flopCount / (double)(t); sprintf (sizeStr, "Size:%07d", numFloats); resultDB.AddResult(string("MAdd2")+precision, sizeStr, "GFLOPS", gflop); // Zero out the test host memory for (int j=0 ; j<numFloats ; ++j) hostMem2[j] = 0.0; // Read the result device memory back to the host cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Check the result -- At a minimum the first half of memory // should match the second half exactly for (int j=0 ; j<halfNumFloats ; ++j) { if (hostMem2[j] != hostMem2[numFloats-j-1]) { cout << "Error; hostMem2[" << j << "]=" << hostMem2[j] << " is different from its twin element hostMem2[" << (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1] <<"; stopping check\n"; break; } } // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); ////////// MAdd4 ////////// // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); } // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Execute the MAdd4 kernel t = 0.0f; cudaEventRecord(start, 0); MAdd4<T><<< blocks, threads >>>(gpu_mem, realRepeats, 10.0, 0.9899); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; // flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams flopCount = (double)numFloats * 2 * realRepeats * 60 * 4; gflop = flopCount / (double)(t); sprintf (sizeStr, "Size:%07d", numFloats); resultDB.AddResult(string("MAdd4")+precision, sizeStr, "GFLOPS", gflop); // Zero out the test host memory for (int j=0 ; j<numFloats ; ++j) hostMem2[j] = 0.0; // Read the result device memory back to the host cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Check the result -- At a minimum the first half of memory // should match the second half exactly for (int j=0 ; j<halfNumFloats ; ++j) { if (hostMem2[j] != hostMem2[numFloats-j-1]) { cout << "Error; hostMem2[" << j << "]=" << hostMem2[j] << " is different from its twin element hostMem2[" << (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1] <<"; stopping check\n"; break; } } // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); ////////// MAdd8 ////////// // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); } // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Execute the MAdd8 kernel t = 0.0f; cudaEventRecord(start, 0); MAdd8<T><<< blocks, threads >>>(gpu_mem, realRepeats, 10.0, 0.9899); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; // flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams flopCount = (double)numFloats * 2 * realRepeats * 30 * 8; gflop = flopCount / (double)(t); sprintf (sizeStr, "Size:%07d", numFloats); resultDB.AddResult(string("MAdd8")+precision, sizeStr, "GFLOPS", gflop); // Zero out the test host memory for (int j=0 ; j<numFloats ; ++j) hostMem2[j] = 0.0; // Read the result device memory back to the host cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Check the result -- At a minimum the first half of memory // should match the second half exactly for (int j=0 ; j<halfNumFloats ; ++j) { if (hostMem2[j] != hostMem2[numFloats-j-1]) { cout << "Error; hostMem2[" << j << "]=" << hostMem2[j] << " is different from its twin element hostMem2[" << (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1] <<"; stopping check\n"; break; } } // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); ////////// MulMAdd1 ////////// // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); } // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Execute the MulMAdd1 kernel t = 0.0f; cudaEventRecord(start, 0); MulMAdd1<T><<< blocks, threads >>>(gpu_mem, realRepeats, 3.75, 0.355); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; // flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams flopCount = (double)numFloats * 3 * realRepeats * 160 * 1; gflop = flopCount / (double)(t); sprintf (sizeStr, "Size:%07d", numFloats); resultDB.AddResult(string("MulMAdd1")+precision, sizeStr, "GFLOPS", gflop); // Zero out the test host memory for (int j=0 ; j<numFloats ; ++j) hostMem2[j] = 0.0; // Read the result device memory back to the host cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Check the result -- At a minimum the first half of memory // should match the second half exactly for (int j=0 ; j<halfNumFloats ; ++j) { if (hostMem2[j] != hostMem2[numFloats-j-1]) { cout << "Error; hostMem2[" << j << "]=" << hostMem2[j] << " is different from its twin element hostMem2[" << (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1] <<"; stopping check\n"; break; } } // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); ////////// MulMAdd2 ////////// // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); } // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Execute the MulMAdd2 kernel t = 0.0f; cudaEventRecord(start, 0); MulMAdd2<T><<< blocks, threads >>>(gpu_mem, realRepeats, 3.75, 0.355); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; // flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams flopCount = (double)numFloats * 3 * realRepeats * 80 * 2; gflop = flopCount / (double)(t); sprintf (sizeStr, "Size:%07d", numFloats); resultDB.AddResult(string("MulMAdd2")+precision, sizeStr, "GFLOPS", gflop); // Zero out the test host memory for (int j=0 ; j<numFloats ; ++j) hostMem2[j] = 0.0; // Read the result device memory back to the host cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Check the result -- At a minimum the first half of memory // should match the second half exactly for (int j=0 ; j<halfNumFloats ; ++j) { if (hostMem2[j] != hostMem2[numFloats-j-1]) { cout << "Error; hostMem2[" << j << "]=" << hostMem2[j] << " is different from its twin element hostMem2[" << (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1] <<"; stopping check\n"; break; } } // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); ////////// MulMAdd4 ////////// // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); } // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Execute the MulMAdd4 kernel t = 0.0f; cudaEventRecord(start, 0); MulMAdd4<T><<< blocks, threads >>>(gpu_mem, realRepeats, 3.75, 0.355); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; // flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams flopCount = (double)numFloats * 3 * realRepeats * 40 * 4; gflop = flopCount / (double)(t); sprintf (sizeStr, "Size:%07d", numFloats); resultDB.AddResult(string("MulMAdd4")+precision, sizeStr, "GFLOPS", gflop); // Zero out the test host memory for (int j=0 ; j<numFloats ; ++j) hostMem2[j] = 0.0; // Read the result device memory back to the host cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Check the result -- At a minimum the first half of memory // should match the second half exactly for (int j=0 ; j<halfNumFloats ; ++j) { if (hostMem2[j] != hostMem2[numFloats-j-1]) { cout << "Error; hostMem2[" << j << "]=" << hostMem2[j] << " is different from its twin element hostMem2[" << (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1] <<"; stopping check\n"; break; } } // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); ////////// MulMAdd8 ////////// // Initialize host data, with the first half the same as the second for (int j=0; j<halfNumFloats; ++j) { hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0); } // copy host memory to GPU memory cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Execute the MulMAdd8 kernel t = 0.0f; cudaEventRecord(start, 0); MulMAdd8<T><<< blocks, threads >>>(gpu_mem, realRepeats, 3.75, 0.355); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); cudaEventElapsedTime(&t, start, stop); t *= 1.e6; // flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams flopCount = (double)numFloats * 3 * realRepeats * 20 * 8; gflop = flopCount / (double)(t); sprintf (sizeStr, "Size:%07d", numFloats); resultDB.AddResult(string("MulMAdd8")+precision, sizeStr, "GFLOPS", gflop); // Zero out the test host memory for (int j=0 ; j<numFloats ; ++j) hostMem2[j] = 0.0; // Read the result device memory back to the host cudaEventRecord(start, 0); // do I even need this if I do not need the time? cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // Check the result -- At a minimum the first half of memory // should match the second half exactly for (int j=0 ; j<halfNumFloats ; ++j) { if (hostMem2[j] != hostMem2[numFloats-j-1]) { cout << "Error; hostMem2[" << j << "]=" << hostMem2[j] << " is different from its twin element hostMem2[" << (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1] <<"; stopping check\n"; break; } } // update progress bar pb.addItersDone(); if (!verbose && !quiet) pb.Show(stdout); } delete[] hostMem; delete[] hostMem2; cudaFree((void*)gpu_mem); CHECK_CUDA_ERROR(); cudaEventDestroy(start); cudaEventDestroy(stop); } // Macros used to construct MaxFlops kernels // Each mad OP is 32*2 = 64 FLOPS #define OP { \ s0 = s6*s5 + s28; \ s1 = s7*s6 + s29; \ s2 = s8*s7 + s30; \ s3 = s9*s8 + s31; \ s4 = s10*s9 + s0; \ s5 = s11*s10 + s1; \ s6 = s12*s11 + s2; \ s7 = s13*s12 + s3; \ s8 = s14*s13 + s4; \ s9 = s15*s14 + s5; \ s10 = s16*s15 + s6; \ s11 = s17*s16 + s7; \ s12 = s18*s17 + s8; \ s13 = s19*s18 + s9; \ s14 = s20*s19 + s10; \ s15 = s21*s20 + s11; \ s16 = s22*s21 + s12; \ s17 = s23*s22 + s13; \ s18 = s24*s23 + s14; \ s19 = s25*s24 + s15; \ s20 = s26*s25 + s16; \ s21 = s27*s26 + s17; \ s22 = s28*s27 + s18; \ s23 = s29*s28 + s19; \ s24 = s30*s29 + s20; \ s25 = s31*s30 + s21; \ s26 = s0*s31 + s22; \ s27 = s1*s0 + s23; \ s28 = s2*s1 + s24; \ s29 = s3*s2 + s25; \ s30 = s4*s3 + s26; \ s31 = s5*s4 + s27; \ } // so Each OP10 is 640 FLOPS #define OP10 { OP OP OP OP OP OP OP OP OP OP } // Each mad+mul MMOP is 8*3 = 24 FLOPS #define MMOP { \ s0 = s4*s4 + s4; \ s6 = s0*s5; \ s1 = s5*s5 + s5; \ s7 = s1*s6; \ s2 = s6*s6 + s6; \ s0 = s2*s7; \ s3 = s7*s7 + s7; \ s1 = s3*s0; \ s4 = s0*s0 + s0; \ s2 = s4*s1; \ s5 = s1*s1 + s1; \ s3 = s5*s2; \ s6 = s2*s2 + s2; \ s4 = s6*s3; \ s7 = s3*s3 + s3; \ s5 = s7*s4; \ } // So each OP10 is 240 FLOPS #define MMOP10 { MMOP MMOP MMOP MMOP MMOP MMOP MMOP MMOP MMOP MMOP } // Benchmark Kernels __global__ void MAddU(float *target, float val1, float val2) { int index = blockIdx.x*blockDim.x + threadIdx.x; // Create a bunch of local variables we can use up to 32 steps.. register float v0=val1, v1=val2, v2=v0+v1, v3=v0+v2; register float v4=v0+v3, v5=v0+v4, v6=v0+v5, v7=v0+v6; register float v8=v0+v7, v9=v0+v8, v10=v0+v9, v11=v0+v10; register float v12=v0+v11, v13=v0+v12, v14=v0+v13, v15=v0+v14; register float v16=v0+v15, v17=v16+v0, v18=v16+v1, v19=v16+v2; register float v20=v16+v3, v21=v16+v4, v22=v16+v5, v23=v16+v6; register float v24=v16+v7, v25=v16+v8, v26=v16+v9, v27=v16+v10; register float v28=v16+v11, v29=v16+v12, v30=v16+v13, v31=v16+v14; register float s0=v0, s1=v1, s2=v2, s3=v3; register float s4=v4, s5=v5, s6=v6, s7=v7; register float s8=v8, s9=v9, s10=v10, s11=v11; register float s12=v12, s13=v13, s14=v14, s15=v15; register float s16=v16, s17=v17, s18=v18, s19=v19; register float s20=v20, s21=v21, s22=v22, s23=v23; register float s24=v24, s25=v25, s26=v26, s27=v27; register float s28=v28, s29=v29, s30=v30, s31=v31; // 10 OP10s inside the loop = 6400 FLOPS in the .ptx code // and 5 loops of 10 OP10s = 32000 FLOPS per pixel total for (int i=0; i<5; i++) { OP10; OP10; OP10; OP10; OP10; OP10; OP10; OP10; OP10; OP10; } float result = (s0+s1+s2+s3+s4+s5+s6+s7+ s8+s9+s10+s11+s12+s13+s14+s15 + s16+s17+s18+s19+s20+s21+s22+s23+ s24+s25+s26+s27+s28+s29+s30+s31); target[index] = result; } __global__ void MAddU_DP(double *target, double val1, double val2) { int index = blockIdx.x*blockDim.x + threadIdx.x; register double v0=val1, v1=val2, v2=v0+v1, v3=v0+v2; register double v4=v0+v3, v5=v0+v4, v6=v0+v5, v7=v0+v6; register double v8=v0+v7, v9=v0+v8, v10=v0+v9, v11=v0+v10; register double v12=v0+v11, v13=v0+v12, v14=v0+v13, v15=v0+v14; register double v16=v0+v15, v17=v16+v0, v18=v16+v1, v19=v16+v2; register double v20=v16+v3, v21=v16+v4, v22=v16+v5, v23=v16+v6; register double v24=v16+v7, v25=v16+v8, v26=v16+v9, v27=v16+v10; register double v28=v16+v11, v29=v16+v12, v30=v16+v13, v31=v16+v14; register double s0=v0, s1=v1, s2=v2, s3=v3; register double s4=v4, s5=v5, s6=v6, s7=v7; register double s8=v8, s9=v9, s10=v10, s11=v11; register double s12=v12, s13=v13, s14=v14, s15=v15; register double s16=v16, s17=v17, s18=v18, s19=v19; register double s20=v20, s21=v21, s22=v22, s23=v23; register double s24=v24, s25=v25, s26=v26, s27=v27; register double s28=v28, s29=v29, s30=v30, s31=v31; // 10 OP10s inside the loop = 6400 FLOPS in the .ptx code // and 5 loops of 10 OP10s = 32000 FLOPS per pixel total for (int i=0; i<5; i++) { OP10; OP10; OP10; OP10; OP10; OP10; OP10; OP10; OP10; OP10; } double result = (s0+s1+s2+s3+s4+s5+s6+s7+ s8+s9+s10+s11+s12+s13+s14+s15 + s16+s17+s18+s19+s20+s21+s22+s23+ s24+s25+s26+s27+s28+s29+s30+s31); target[index] = result; } __global__ void MulMAddU(float *target, float val1, float val2) { int index = blockIdx.x*blockDim.x + threadIdx.x; register float v0=val1, v1=val2, v2=v0+v1, v3=v0+v2; register float v4=v0+v3, v5=v0+v4, v6=v0+v5, v7=v0+v6; register float v8=v0+v7, v9=v0+v8, v10=v0+v9, v11=v0+v10; register float v12=v0+v11, v13=v0+v12, v14=v0+v13, v15=v0+v14; register float v16=v0+v15, v17=v16+v0, v18=v16+v1, v19=v16+v2; register float v20=v16+v3, v21=v16+v4, v22=v16+v5, v23=v16+v6; register float v24=v16+v7, v25=v16+v8, v26=v16+v9, v27=v16+v10; register float v28=v16+v11, v29=v16+v12, v30=v16+v13, v31=v16+v14; register float s0=v0, s1=v1, s2=v2, s3=v3; register float s4=v4, s5=v5, s6=v6, s7=v7; register float s8=v8, s9=v9, s10=v10, s11=v11; register float s12=v12, s13=v13, s14=v14, s15=v15; register float s16=v16, s17=v17, s18=v18, s19=v19; register float s20=v20, s21=v21, s22=v22, s23=v23; register float s24=v24, s25=v25, s26=v26, s27=v27; register float s28=v28, s29=v29, s30=v30, s31=v31; // 10 OP10s inside the loop = 2400 FLOPS in the .ptx code // and 5 loops of 10 OP10s = 12000 FLOPS per pixel total for (int i=0; i<5; i++) { MMOP10; MMOP10; MMOP10; MMOP10; MMOP10; MMOP10; MMOP10; MMOP10; MMOP10; MMOP10; } float result = (s0+s1+s2+s3+s4+s5+s6+s7+ s8+s9+s10+s11+s12+s13+s14+s15 + s16+s17+s18+s19+s20+s21+s22+s23+ s24+s25+s26+s27+s28+s29+s30+s31); target[index] = result; } __global__ void MulMAddU_DP(double *target, double val1, double val2) { int index = blockIdx.x*blockDim.x + threadIdx.x; register double v0=val1, v1=val2, v2=v0+v1, v3=v0+v2; register double v4=v0+v3, v5=v0+v4, v6=v0+v5, v7=v0+v6; register double v8=v0+v7, v9=v0+v8, v10=v0+v9, v11=v0+v10; register double v12=v0+v11, v13=v0+v12, v14=v0+v13, v15=v0+v14; register double v16=v0+v15, v17=v16+v0, v18=v16+v1, v19=v16+v2; register double v20=v16+v3, v21=v16+v4, v22=v16+v5, v23=v16+v6; register double v24=v16+v7, v25=v16+v8, v26=v16+v9, v27=v16+v10; register double v28=v16+v11, v29=v16+v12, v30=v16+v13, v31=v16+v14; register double s0=v0, s1=v1, s2=v2, s3=v3; register double s4=v4, s5=v5, s6=v6, s7=v7; register double s8=v8, s9=v9, s10=v10, s11=v11; register double s12=v12, s13=v13, s14=v14, s15=v15; register double s16=v16, s17=v17, s18=v18, s19=v19; register double s20=v20, s21=v21, s22=v22, s23=v23; register double s24=v24, s25=v25, s26=v26, s27=v27; register double s28=v28, s29=v29, s30=v30, s31=v31; // 10 OP10s inside the loop = 2400 FLOPS in the .ptx code // and 5 loops of 10 OP10s = 12000 FLOPS per pixel total for (int i=0; i<5; i++) { MMOP10; MMOP10; MMOP10; MMOP10; MMOP10; MMOP10; MMOP10; MMOP10; MMOP10; MMOP10; } double result = (s0+s1+s2+s3+s4+s5+s6+s7+ s8+s9+s10+s11+s12+s13+s14+s15 + s16+s17+s18+s19+s20+s21+s22+s23+ s24+s25+s26+s27+s28+s29+s30+s31); target[index] = result; } // v = 10.0 #define ADD1_OP s=v-s; #define ADD2_OP ADD1_OP s2=v-s2; #define ADD4_OP ADD2_OP s3=v-s3; s4=v-s4; #define ADD8_OP ADD4_OP s5=v-s5; s6=v-s6; s7=v-s7; s8=v-s8; // v = 1.01 #define MUL1_OP s=s*s*v; #define MUL2_OP MUL1_OP s2=s2*s2*v; #define MUL4_OP MUL2_OP s3=s3*s3*v; s4=s4*s4*v; #define MUL8_OP MUL4_OP s5=s5*s5*v; s6=s6*s6*v; s7=s7*s7*v; s8=s8*s8*v; // v1 = 10.0, v2 = 0.9899 #define MADD1_OP s=v1-s*v2; #define MADD2_OP MADD1_OP s2=v1-s2*v2; #define MADD4_OP MADD2_OP s3=v1-s3*v2; s4=v1-s4*v2; #define MADD8_OP MADD4_OP s5=v1-s5*v2; s6=v1-s6*v2; s7=v1-s7*v2; s8=v1-s8*v2; // v1 = 3.75, v2 = 0.355 #define MULMADD1_OP s=(v1-v2*s)*s; #define MULMADD2_OP MULMADD1_OP s2=(v1-v2*s2)*s2; #define MULMADD4_OP MULMADD2_OP s3=(v1-v2*s3)*s3; s4=(v1-v2*s4)*s4; #define MULMADD8_OP MULMADD4_OP s5=(v1-v2*s5)*s5; s6=(v1-v2*s6)*s6; s7=(v1-v2*s7)*s7; s8=(v1-v2*s8)*s8; #define ADD1_MOP20 \ ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP \ ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP #define ADD2_MOP20 \ ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP \ ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP #define ADD4_MOP10 \ ADD4_OP ADD4_OP ADD4_OP ADD4_OP ADD4_OP \ ADD4_OP ADD4_OP ADD4_OP ADD4_OP ADD4_OP #define ADD8_MOP5 \ ADD8_OP ADD8_OP ADD8_OP ADD8_OP ADD8_OP #define MUL1_MOP20 \ MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP \ MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP #define MUL2_MOP20 \ MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP \ MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP #define MUL4_MOP10 \ MUL4_OP MUL4_OP MUL4_OP MUL4_OP MUL4_OP \ MUL4_OP MUL4_OP MUL4_OP MUL4_OP MUL4_OP #define MUL8_MOP5 \ MUL8_OP MUL8_OP MUL8_OP MUL8_OP MUL8_OP #define MADD1_MOP20 \ MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP \ MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP #define MADD2_MOP20 \ MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP \ MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP #define MADD4_MOP10 \ MADD4_OP MADD4_OP MADD4_OP MADD4_OP MADD4_OP \ MADD4_OP MADD4_OP MADD4_OP MADD4_OP MADD4_OP #define MADD8_MOP5 \ MADD8_OP MADD8_OP MADD8_OP MADD8_OP MADD8_OP #define MULMADD1_MOP20 \ MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP \ MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP #define MULMADD2_MOP20 \ MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP \ MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP #define MULMADD4_MOP10 \ MULMADD4_OP MULMADD4_OP MULMADD4_OP MULMADD4_OP MULMADD4_OP \ MULMADD4_OP MULMADD4_OP MULMADD4_OP MULMADD4_OP MULMADD4_OP #define MULMADD8_MOP5 \ MULMADD8_OP MULMADD8_OP MULMADD8_OP MULMADD8_OP MULMADD8_OP template <class T> __global__ void Add1(T *data, int nIters, T v) { int gid = blockIdx.x*blockDim.x + threadIdx.x; register T s = data[gid]; for (int j=0 ; j<nIters ; ++j) { /* Each macro op has 20 operations. Unroll 12 more times for 240 operations total. */ ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 } data[gid] = s; } template <class T> __global__ void Add2(T *data, int nIters, T v) { int gid = blockIdx.x*blockDim.x + threadIdx.x; register T s = data[gid], s2=10.0f-s; for (int j=0 ; j<nIters ; ++j) { /* Each macro op has 20 operations. Unroll 6 more times for 120 operations total. */ ADD2_MOP20 ADD2_MOP20 ADD2_MOP20 ADD2_MOP20 ADD2_MOP20 ADD2_MOP20 } data[gid] = s+s2; } template <class T> __global__ void Add4(T *data, int nIters, T v) { int gid = blockIdx.x*blockDim.x + threadIdx.x; register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2; for (int j=0 ; j<nIters ; ++j) { /* Each macro op has 10 operations. Unroll 6 more times for 60 operations total. */ ADD4_MOP10 ADD4_MOP10 ADD4_MOP10 ADD4_MOP10 ADD4_MOP10 ADD4_MOP10 } data[gid] = (s+s2)+(s3+s4); } template <class T> __global__ void Add8(T *data, int nIters, T v) { int gid = blockIdx.x*blockDim.x + threadIdx.x; register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2, s5=8.0f-s, s6=8.0f-s2, s7=7.0f-s, s8=7.0f-s2; for (int j=0 ; j<nIters ; ++j) { /* Each macro op has 5 operations. Unroll 6 more times for 30 operations total. */ ADD8_MOP5 ADD8_MOP5 ADD8_MOP5 ADD8_MOP5 ADD8_MOP5 ADD8_MOP5 } data[gid] = ((s+s2)+(s3+s4))+((s5+s6)+(s7+s8)); } template <class T> __global__ void Mul1(T *data, int nIters, T v) { int gid = blockIdx.x*blockDim.x + threadIdx.x; register T s = data[gid]-data[gid]+0.999f; for (int j=0 ; j<nIters ; ++j) { /* Each macro op has 20 operations. Unroll 10 more times for 200 operations total. */ MUL1_MOP20 MUL1_MOP20 MUL1_MOP20 MUL1_MOP20 MUL1_MOP20 MUL1_MOP20 MUL1_MOP20 MUL1_MOP20 MUL1_MOP20 MUL1_MOP20 } data[gid] = s; } template <class T> __global__ void Mul2(T *data, int nIters, T v) { int gid = blockIdx.x*blockDim.x + threadIdx.x; register T s = data[gid]-data[gid]+0.999f, s2=s-0.0001f; for (int j=0 ; j<nIters ; ++j) { /* Each macro op has 20 operations. Unroll 5 more times for 100 operations total. */ MUL2_MOP20 MUL2_MOP20 MUL2_MOP20 MUL2_MOP20 MUL2_MOP20 } data[gid] = s+s2; } template <class T> __global__ void Mul4(T *data, int nIters, T v) { int gid = blockIdx.x*blockDim.x + threadIdx.x; register T s = data[gid]-data[gid]+0.999f, s2=s-0.0001f, s3=s-0.0002f, s4=s-0.0003f; for (int j=0 ; j<nIters ; ++j) { /* Each macro op has 10 operations. Unroll 5 more times for 50 operations total. */ MUL4_MOP10 MUL4_MOP10 MUL4_MOP10 MUL4_MOP10 MUL4_MOP10 } data[gid] = (s+s2)+(s3+s4); } template <class T> __global__ void Mul8(T *data, int nIters, T v) { int gid = blockIdx.x*blockDim.x + threadIdx.x; register T s = data[gid]-data[gid]+0.999f, s2=s-0.0001f, s3=s-0.0002f, s4=s-0.0003f, s5=s-0.0004f, s6=s-0.0005f, s7=s-0.0006f, s8=s-0.0007f; for (int j=0 ; j<nIters ; ++j) { /* Each macro op has 5 operations. Unroll 5 more times for 25 operations total. */ MUL8_MOP5 MUL8_MOP5 MUL8_MOP5 MUL8_MOP5 MUL8_MOP5 } data[gid] = ((s+s2)+(s3+s4))+((s5+s6)+(s7+s8)); } template <class T> __global__ void MAdd1(T *data, int nIters, T v1, T v2) { int gid = blockIdx.x*blockDim.x + threadIdx.x; register T s = data[gid]; for (int j=0 ; j<nIters ; ++j) { /* Each macro op has 20 operations. Unroll 12 more times for 240 operations total. */ MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 } data[gid] = s; } template <class T> __global__ void MAdd2(T *data, int nIters, T v1, T v2) { int gid = blockIdx.x*blockDim.x + threadIdx.x; register T s = data[gid], s2=10.0f-s; for (int j=0 ; j<nIters ; ++j) { /* Each macro op has 20 operations. Unroll 6 more times for 120 operations total. */ MADD2_MOP20 MADD2_MOP20 MADD2_MOP20 MADD2_MOP20 MADD2_MOP20 MADD2_MOP20 } data[gid] = s+s2; } template <class T> __global__ void MAdd4(T *data, int nIters, T v1, T v2) { int gid = blockIdx.x*blockDim.x + threadIdx.x; register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2; for (int j=0 ; j<nIters ; ++j) { /* Each macro op has 10 operations. Unroll 6 more times for 60 operations total. */ MADD4_MOP10 MADD4_MOP10 MADD4_MOP10 MADD4_MOP10 MADD4_MOP10 MADD4_MOP10 } data[gid] = (s+s2)+(s3+s4); } template <class T> __global__ void MAdd8(T *data, int nIters, T v1, T v2) { int gid = blockIdx.x*blockDim.x + threadIdx.x; register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2, s5=8.0f-s, s6=8.0f-s2, s7=7.0f-s, s8=7.0f-s2; for (int j=0 ; j<nIters ; ++j) { /* Each macro op has 5 operations. Unroll 6 more times for 30 operations total. */ MADD8_MOP5 MADD8_MOP5 MADD8_MOP5 MADD8_MOP5 MADD8_MOP5 MADD8_MOP5 } data[gid] = ((s+s2)+(s3+s4))+((s5+s6)+(s7+s8)); } template <class T> __global__ void MulMAdd1(T *data, int nIters, T v1, T v2) { int gid = blockIdx.x*blockDim.x + threadIdx.x; register T s = data[gid]; for (int j=0 ; j<nIters ; ++j) { /* Each macro op has 20 operations. Unroll 8 more times for 160 operations total. */ MULMADD1_MOP20 MULMADD1_MOP20 MULMADD1_MOP20 MULMADD1_MOP20 MULMADD1_MOP20 MULMADD1_MOP20 MULMADD1_MOP20 MULMADD1_MOP20 } data[gid] = s; } template <class T> __global__ void MulMAdd2(T *data, int nIters, T v1, T v2) { int gid = blockIdx.x*blockDim.x + threadIdx.x; register T s = data[gid], s2=10.0f-s; for (int j=0 ; j<nIters ; ++j) { /* Each macro op has 20 operations. Unroll 4 more times for 80 operations total. */ MULMADD2_MOP20 MULMADD2_MOP20 MULMADD2_MOP20 MULMADD2_MOP20 } data[gid] = s+s2; } template <class T> __global__ void MulMAdd4(T *data, int nIters, T v1, T v2) { int gid = blockIdx.x*blockDim.x + threadIdx.x; register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2; for (int j=0 ; j<nIters ; ++j) { /* Each macro op has 10 operations. Unroll 4 more times for 40 operations total. */ MULMADD4_MOP10 MULMADD4_MOP10 MULMADD4_MOP10 MULMADD4_MOP10 } data[gid] = (s+s2)+(s3+s4); } template <class T> __global__ void MulMAdd8(T *data, int nIters, T v1, T v2) { int gid = blockIdx.x*blockDim.x + threadIdx.x; register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2, s5=8.0f-s, s6=8.0f-s2, s7=7.0f-s, s8=7.0f-s2; for (int j=0 ; j<nIters ; ++j) { /* Each macro op has 5 operations. Unroll 4 more times for 20 operations total. */ MULMADD8_MOP5 MULMADD8_MOP5 MULMADD8_MOP5 MULMADD8_MOP5 } data[gid] = ((s+s2)+(s3+s4))+((s5+s6)+(s7+s8)); }
the_stack
#include <cugraph/detail/shuffle_wrappers.hpp> #include <cugraph/utilities/error.hpp> #include <raft/handle.hpp> #include <rmm/device_uvector.hpp> #include <thrust/copy.h> #include <thrust/distance.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/iterator_traits.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/merge.h> #include <thrust/partition.h> #include <thrust/set_operations.h> #include <thrust/sort.h> #include <thrust/tuple.h> #include <algorithm> #include <optional> namespace cugraph { namespace { // compare after flipping major & minor template <typename vertex_t, typename weight_t> struct compare_upper_triangular_edges_as_lower_triangular_t { __device__ bool operator()(thrust::tuple<vertex_t, vertex_t, weight_t> const& lhs, thrust::tuple<vertex_t, vertex_t, weight_t> const& rhs) const { return thrust::make_tuple(thrust::get<1>(lhs), thrust::get<0>(lhs), thrust::get<2>(lhs)) < thrust::make_tuple(thrust::get<1>(rhs), thrust::get<0>(rhs), thrust::get<2>(rhs)); } }; // if in the upper triangular region, flip major & minor before comparison. // if major & minor coincide (after flip if upper triangular), lower triangular edges are less than // upper triangular edges template <typename vertex_t, typename weight_t> struct compare_lower_and_upper_triangular_edges_t { __device__ bool operator()(thrust::tuple<vertex_t, vertex_t, weight_t> const& lhs, thrust::tuple<vertex_t, vertex_t, weight_t> const& rhs) const { auto lhs_in_lower = thrust::get<0>(lhs) > thrust::get<1>(lhs); auto rhs_in_lower = thrust::get<0>(rhs) > thrust::get<1>(rhs); return thrust::make_tuple( lhs_in_lower ? thrust::get<0>(lhs) : thrust::get<1>(lhs), lhs_in_lower ? thrust::get<1>(lhs) : thrust::get<0>(lhs), !lhs_in_lower, // lower triangular edges comes before upper triangular edges thrust::get<2>(lhs)) < thrust::make_tuple( rhs_in_lower ? thrust::get<0>(rhs) : thrust::get<1>(rhs), rhs_in_lower ? thrust::get<1>(rhs) : thrust::get<0>(rhs), !rhs_in_lower, // lower triangular edges comes before upper triangular edges thrust::get<2>(rhs)); } }; template <typename EdgeIterator> struct symmetrize_op_t { bool reciprocal{false}; __device__ void operator()( EdgeIterator edge_first, size_t lower_run_length, size_t upper_run_length, uint8_t* include_first /* size = lower_run_length + upper_run_Length */) const { using weight_t = typename thrust:: tuple_element<2, typename thrust::iterator_traits<EdgeIterator>::value_type>::type; auto min_run_length = lower_run_length < upper_run_length ? lower_run_length : upper_run_length; auto max_run_length = lower_run_length < upper_run_length ? upper_run_length : lower_run_length; for (size_t i = 0; i < max_run_length; ++i) { if (i < min_run_length) { thrust::get<2>(*(edge_first + i)) = (thrust::get<2>(*(edge_first + i)) + thrust::get<2>(*(edge_first + lower_run_length + i))) / weight_t{2.0}; // average *(include_first + i) = true; *(include_first + lower_run_length + i) = false; } else { if (lower_run_length > upper_run_length) { *(include_first + i) = !reciprocal; } else { *(include_first + lower_run_length + i) = !reciprocal; } } } } }; template <typename EdgeIterator> struct update_edge_weights_and_flags_t { EdgeIterator edge_first{}; uint8_t* include_first{nullptr}; // 0: remove 1: include size_t num_edges{0}; symmetrize_op_t<EdgeIterator> op{}; __device__ void operator()(size_t i) const { bool first_in_run{}; if (i == 0) { first_in_run = true; } else { auto cur = *(edge_first + i); auto prev = *(edge_first + (i - 1)); auto cur_pair = thrust::get<0>(cur) > thrust::get<1>(cur) ? thrust::make_tuple(thrust::get<0>(cur), thrust::get<1>(cur)) : thrust::make_tuple(thrust::get<1>(cur), thrust::get<0>(cur)); auto prev_pair = thrust::get<0>(prev) > thrust::get<1>(prev) ? thrust::make_tuple(thrust::get<0>(prev), thrust::get<1>(prev)) : thrust::make_tuple(thrust::get<1>(prev), thrust::get<0>(prev)); first_in_run = cur_pair != prev_pair; } if (first_in_run) { auto first = *(edge_first + i); size_t lower_run_length{0}; size_t upper_run_length{0}; auto pair_first = thrust::get<0>(first) > thrust::get<1>(first) ? thrust::make_tuple(thrust::get<0>(first), thrust::get<1>(first)) : thrust::make_tuple(thrust::get<1>(first), thrust::get<0>(first)); while (i + lower_run_length < num_edges) { auto cur = *(edge_first + i + lower_run_length); if ((thrust::get<0>(cur) > thrust::get<1>(cur)) && (thrust::make_tuple(thrust::get<0>(cur), thrust::get<1>(cur)) == pair_first)) { ++lower_run_length; } else { break; } } while (i + lower_run_length + upper_run_length < num_edges) { auto cur = *(edge_first + i + lower_run_length + upper_run_length); if ((thrust::get<0>(cur) < thrust::get<1>(cur)) && (thrust::make_tuple(thrust::get<1>(cur), thrust::get<0>(cur)) == pair_first)) { ++upper_run_length; } else { break; } } op(edge_first + i, lower_run_length, upper_run_length, include_first + i); } } }; template <typename vertex_t> struct to_lower_triangular_t { __device__ thrust::tuple<vertex_t, vertex_t> operator()(thrust::tuple<vertex_t, vertex_t> e) const { return thrust::get<0>(e) > thrust::get<1>(e) ? e : thrust::make_tuple(thrust::get<1>(e), thrust::get<0>(e)); } }; } // namespace namespace detail { template <typename vertex_t, typename weight_t, bool multi_gpu> std::tuple<rmm::device_uvector<vertex_t>, rmm::device_uvector<vertex_t>, std::optional<rmm::device_uvector<weight_t>>> symmetrize_edgelist(raft::handle_t const& handle, rmm::device_uvector<vertex_t>&& edgelist_majors, rmm::device_uvector<vertex_t>&& edgelist_minors, std::optional<rmm::device_uvector<weight_t>>&& edgelist_weights, bool reciprocal) { // 1. separate lower triangular, diagonal (self-loop), and upper triangular edges size_t num_lower_triangular_edges{0}; size_t num_diagonal_edges{0}; if (edgelist_weights) { auto edge_first = thrust::make_zip_iterator(thrust::make_tuple( edgelist_majors.begin(), edgelist_minors.begin(), (*edgelist_weights).begin())); auto lower_triangular_last = thrust::partition(handle.get_thrust_policy(), edge_first, edge_first + edgelist_majors.size(), [] __device__(auto e) { auto major = thrust::get<0>(e); auto minor = thrust::get<1>(e); return major > minor; }); num_lower_triangular_edges = static_cast<size_t>(thrust::distance(edge_first, lower_triangular_last)); auto diagonal_last = thrust::partition(handle.get_thrust_policy(), edge_first + num_lower_triangular_edges, edge_first + edgelist_majors.size(), [] __device__(auto e) { auto major = thrust::get<0>(e); auto minor = thrust::get<1>(e); return major == minor; }); num_diagonal_edges = static_cast<size_t>(thrust::distance(lower_triangular_last, diagonal_last)); } else { auto edge_first = thrust::make_zip_iterator( thrust::make_tuple(edgelist_majors.begin(), edgelist_minors.begin())); auto lower_triangular_last = thrust::partition(handle.get_thrust_policy(), edge_first, edge_first + edgelist_majors.size(), [] __device__(auto e) { auto major = thrust::get<0>(e); auto minor = thrust::get<1>(e); return major > minor; }); num_lower_triangular_edges = static_cast<size_t>(thrust::distance(edge_first, lower_triangular_last)); auto diagonal_last = thrust::partition(handle.get_thrust_policy(), edge_first + num_lower_triangular_edges, edge_first + edgelist_majors.size(), [] __device__(auto e) { auto major = thrust::get<0>(e); auto minor = thrust::get<1>(e); return major == minor; }); num_diagonal_edges = static_cast<size_t>(thrust::distance(lower_triangular_last, diagonal_last)); } rmm::device_uvector<vertex_t> diagonal_majors(num_diagonal_edges, handle.get_stream()); rmm::device_uvector<vertex_t> upper_triangular_majors( edgelist_majors.size() - num_lower_triangular_edges - num_diagonal_edges, handle.get_stream()); thrust::copy(handle.get_thrust_policy(), edgelist_majors.begin() + num_lower_triangular_edges, edgelist_majors.begin() + num_lower_triangular_edges + num_diagonal_edges, diagonal_majors.begin()); thrust::copy(handle.get_thrust_policy(), edgelist_majors.begin() + num_lower_triangular_edges + num_diagonal_edges, edgelist_majors.end(), upper_triangular_majors.begin()); edgelist_majors.resize(num_lower_triangular_edges, handle.get_stream()); edgelist_majors.shrink_to_fit(handle.get_stream()); auto lower_triangular_majors = std::move(edgelist_majors); rmm::device_uvector<vertex_t> upper_triangular_minors(upper_triangular_majors.size(), handle.get_stream()); thrust::copy(handle.get_thrust_policy(), edgelist_minors.begin() + num_lower_triangular_edges + num_diagonal_edges, edgelist_minors.end(), upper_triangular_minors.begin()); edgelist_minors.resize(lower_triangular_majors.size(), handle.get_stream()); edgelist_minors.shrink_to_fit(handle.get_stream()); auto lower_triangular_minors = std::move(edgelist_minors); auto diagonal_weights = edgelist_weights ? std::make_optional<rmm::device_uvector<weight_t>>( diagonal_majors.size(), handle.get_stream()) : std::nullopt; auto upper_triangular_weights = edgelist_weights ? std::make_optional<rmm::device_uvector<weight_t>>( upper_triangular_majors.size(), handle.get_stream()) : std::nullopt; if (edgelist_weights) { thrust::copy(handle.get_thrust_policy(), (*edgelist_weights).begin() + num_lower_triangular_edges, (*edgelist_weights).begin() + num_lower_triangular_edges + num_diagonal_edges, (*diagonal_weights).begin()); thrust::copy(handle.get_thrust_policy(), (*edgelist_weights).begin() + num_lower_triangular_edges + num_diagonal_edges, (*edgelist_weights).end(), (*upper_triangular_weights).begin()); (*edgelist_weights).resize(lower_triangular_majors.size(), handle.get_stream()); (*edgelist_weights).shrink_to_fit(handle.get_stream()); } auto lower_triangular_weights = std::move(edgelist_weights); // 2. shuffle the (to-be-flipped) upper triangular edges if constexpr (multi_gpu) { std::tie(upper_triangular_minors, upper_triangular_majors, upper_triangular_weights) = detail::shuffle_edgelist_by_gpu_id(handle, std::move(upper_triangular_minors), std::move(upper_triangular_majors), std::move(upper_triangular_weights)); } // 3. merge the lower triangular and the (flipped) upper triangular edges rmm::device_uvector<vertex_t> merged_lower_triangular_majors(0, handle.get_stream()); rmm::device_uvector<vertex_t> merged_lower_triangular_minors(0, handle.get_stream()); auto merged_lower_triangular_weights = edgelist_weights ? std::make_optional<rmm::device_uvector<weight_t>>(0, handle.get_stream()) : std::nullopt; if (edgelist_weights) { auto lower_triangular_edge_first = thrust::make_zip_iterator(thrust::make_tuple(lower_triangular_majors.begin(), lower_triangular_minors.begin(), (*lower_triangular_weights).begin())); thrust::sort(handle.get_thrust_policy(), lower_triangular_edge_first, lower_triangular_edge_first + lower_triangular_majors.size()); auto upper_triangular_edge_first = thrust::make_zip_iterator(thrust::make_tuple( upper_triangular_majors.begin(), upper_triangular_minors.begin(), (*upper_triangular_weights) .begin())); // do not flip here to use "lower_triangular = major > minor" thrust::sort(handle.get_thrust_policy(), upper_triangular_edge_first, upper_triangular_edge_first + upper_triangular_majors.size(), compare_upper_triangular_edges_as_lower_triangular_t<vertex_t, weight_t>{}); merged_lower_triangular_majors.resize( lower_triangular_majors.size() + upper_triangular_majors.size(), handle.get_stream()); merged_lower_triangular_minors.resize(merged_lower_triangular_majors.size(), handle.get_stream()); (*merged_lower_triangular_weights) .resize(merged_lower_triangular_majors.size(), handle.get_stream()); auto merged_first = thrust::make_zip_iterator(thrust::make_tuple(merged_lower_triangular_majors.begin(), merged_lower_triangular_minors.begin(), (*merged_lower_triangular_weights).begin())); thrust::merge(handle.get_thrust_policy(), lower_triangular_edge_first, lower_triangular_edge_first + lower_triangular_majors.size(), upper_triangular_edge_first, upper_triangular_edge_first + upper_triangular_majors.size(), merged_first, compare_lower_and_upper_triangular_edges_t<vertex_t, weight_t>{}); lower_triangular_majors.resize(0, handle.get_stream()); lower_triangular_majors.shrink_to_fit(handle.get_stream()); lower_triangular_minors.resize(0, handle.get_stream()); lower_triangular_minors.shrink_to_fit(handle.get_stream()); (*lower_triangular_weights).resize(0, handle.get_stream()); (*lower_triangular_weights).shrink_to_fit(handle.get_stream()); upper_triangular_majors.resize(0, handle.get_stream()); upper_triangular_majors.shrink_to_fit(handle.get_stream()); upper_triangular_minors.resize(0, handle.get_stream()); upper_triangular_minors.shrink_to_fit(handle.get_stream()); (*upper_triangular_weights).resize(0, handle.get_stream()); (*upper_triangular_weights).shrink_to_fit(handle.get_stream()); rmm::device_uvector<uint8_t> includes(merged_lower_triangular_majors.size(), handle.get_stream()); symmetrize_op_t<decltype(merged_first)> symm_op{reciprocal}; thrust::for_each( handle.get_thrust_policy(), thrust::make_counting_iterator(size_t{0}), thrust::make_counting_iterator(merged_lower_triangular_majors.size()), update_edge_weights_and_flags_t<decltype(merged_first)>{ merged_first, includes.data(), merged_lower_triangular_majors.size(), symm_op}); auto merged_edge_and_flag_first = thrust::make_zip_iterator(thrust::make_tuple(merged_lower_triangular_majors.begin(), merged_lower_triangular_minors.begin(), (*merged_lower_triangular_weights).begin(), includes.begin())); merged_lower_triangular_majors.resize( thrust::distance( merged_edge_and_flag_first, thrust::remove_if(handle.get_thrust_policy(), merged_edge_and_flag_first, merged_edge_and_flag_first + merged_lower_triangular_majors.size(), [] __device__(auto t) { return !thrust::get<3>(t); })), handle.get_stream()); merged_lower_triangular_majors.shrink_to_fit(handle.get_stream()); merged_lower_triangular_minors.resize(merged_lower_triangular_majors.size(), handle.get_stream()); merged_lower_triangular_minors.shrink_to_fit(handle.get_stream()); (*merged_lower_triangular_weights) .resize(merged_lower_triangular_majors.size(), handle.get_stream()); (*merged_lower_triangular_weights).shrink_to_fit(handle.get_stream()); auto merged_major_minor_first = thrust::make_zip_iterator(thrust::make_tuple( merged_lower_triangular_majors.begin(), merged_lower_triangular_minors.begin())); thrust::transform(handle.get_thrust_policy(), merged_major_minor_first, merged_major_minor_first + merged_lower_triangular_majors.size(), merged_major_minor_first, to_lower_triangular_t<vertex_t>{}); } else { auto lower_triangular_edge_first = thrust::make_zip_iterator( thrust::make_tuple(lower_triangular_majors.begin(), lower_triangular_minors.begin())); thrust::sort(handle.get_thrust_policy(), lower_triangular_edge_first, lower_triangular_edge_first + lower_triangular_majors.size()); auto upper_triangular_edge_first = thrust::make_zip_iterator(thrust::make_tuple( upper_triangular_minors.begin(), upper_triangular_majors.begin())); // flip thrust::sort(handle.get_thrust_policy(), upper_triangular_edge_first, upper_triangular_edge_first + upper_triangular_majors.size()); merged_lower_triangular_majors.resize( reciprocal ? std::min(num_lower_triangular_edges, upper_triangular_majors.size()) : num_lower_triangular_edges + upper_triangular_majors.size(), handle.get_stream()); merged_lower_triangular_minors.resize(merged_lower_triangular_majors.size(), handle.get_stream()); auto merged_first = thrust::make_zip_iterator(thrust::make_tuple( merged_lower_triangular_majors.begin(), merged_lower_triangular_minors.begin())); auto merged_last = reciprocal ? thrust::set_intersection(handle.get_thrust_policy(), lower_triangular_edge_first, lower_triangular_edge_first + lower_triangular_majors.size(), upper_triangular_edge_first, upper_triangular_edge_first + upper_triangular_majors.size(), merged_first) : thrust::set_union(handle.get_thrust_policy(), lower_triangular_edge_first, lower_triangular_edge_first + lower_triangular_majors.size(), upper_triangular_edge_first, upper_triangular_edge_first + upper_triangular_majors.size(), merged_first); lower_triangular_majors.resize(0, handle.get_stream()); lower_triangular_majors.shrink_to_fit(handle.get_stream()); lower_triangular_minors.resize(0, handle.get_stream()); lower_triangular_minors.shrink_to_fit(handle.get_stream()); upper_triangular_majors.resize(0, handle.get_stream()); upper_triangular_majors.shrink_to_fit(handle.get_stream()); upper_triangular_minors.resize(0, handle.get_stream()); upper_triangular_minors.shrink_to_fit(handle.get_stream()); merged_lower_triangular_majors.resize(thrust::distance(merged_first, merged_last), handle.get_stream()); merged_lower_triangular_majors.shrink_to_fit(handle.get_stream()); merged_lower_triangular_minors.resize(merged_lower_triangular_majors.size(), handle.get_stream()); merged_lower_triangular_minors.shrink_to_fit(handle.get_stream()); } // 4. symmetrize from the merged lower triangular edges & diagonal edges upper_triangular_majors.resize(merged_lower_triangular_majors.size(), handle.get_stream()); thrust::copy(handle.get_thrust_policy(), merged_lower_triangular_minors.begin(), merged_lower_triangular_minors.end(), upper_triangular_majors.begin()); upper_triangular_minors.resize(upper_triangular_majors.size(), handle.get_stream()); thrust::copy(handle.get_thrust_policy(), merged_lower_triangular_majors.begin(), merged_lower_triangular_majors.end(), upper_triangular_minors.begin()); if (edgelist_weights) { (*upper_triangular_weights).resize(upper_triangular_majors.size(), handle.get_stream()); thrust::copy(handle.get_thrust_policy(), (*merged_lower_triangular_weights).begin(), (*merged_lower_triangular_weights).end(), (*upper_triangular_weights).begin()); } if constexpr (multi_gpu) { std::tie(upper_triangular_majors, upper_triangular_minors, upper_triangular_weights) = detail::shuffle_edgelist_by_gpu_id(handle, std::move(upper_triangular_majors), std::move(upper_triangular_minors), std::move(upper_triangular_weights)); } edgelist_majors = std::move(merged_lower_triangular_majors); edgelist_minors = std::move(merged_lower_triangular_minors); edgelist_weights = std::move(merged_lower_triangular_weights); edgelist_majors.resize( edgelist_majors.size() + diagonal_majors.size() + upper_triangular_majors.size(), handle.get_stream()); thrust::copy(handle.get_thrust_policy(), diagonal_majors.begin(), diagonal_majors.end(), edgelist_majors.end() - diagonal_majors.size() - upper_triangular_majors.size()); thrust::copy(handle.get_thrust_policy(), upper_triangular_majors.begin(), upper_triangular_majors.end(), edgelist_majors.end() - upper_triangular_majors.size()); upper_triangular_majors.resize(0, handle.get_stream()); upper_triangular_majors.shrink_to_fit(handle.get_stream()); edgelist_minors.resize(edgelist_majors.size(), handle.get_stream()); thrust::copy(handle.get_thrust_policy(), diagonal_majors.begin(), diagonal_majors.end(), edgelist_minors.end() - diagonal_majors.size() - upper_triangular_minors.size()); // minors == majors if diagonal thrust::copy(handle.get_thrust_policy(), upper_triangular_minors.begin(), upper_triangular_minors.end(), edgelist_minors.end() - upper_triangular_minors.size()); diagonal_majors.resize(0, handle.get_stream()); diagonal_majors.shrink_to_fit(handle.get_stream()); upper_triangular_minors.resize(0, handle.get_stream()); upper_triangular_minors.shrink_to_fit(handle.get_stream()); if (edgelist_weights) { (*edgelist_weights).resize(edgelist_majors.size(), handle.get_stream()); thrust::copy( handle.get_thrust_policy(), (*diagonal_weights).begin(), (*diagonal_weights).end(), (*edgelist_weights).end() - (*diagonal_weights).size() - (*upper_triangular_weights).size()); thrust::copy(handle.get_thrust_policy(), (*upper_triangular_weights).begin(), (*upper_triangular_weights).end(), (*edgelist_weights).end() - (*upper_triangular_weights).size()); (*diagonal_weights).resize(0, handle.get_stream()); (*diagonal_weights).shrink_to_fit(handle.get_stream()); (*upper_triangular_weights).resize(0, handle.get_stream()); (*upper_triangular_weights).shrink_to_fit(handle.get_stream()); } return std::make_tuple( std::move(edgelist_majors), std::move(edgelist_minors), std::move(edgelist_weights)); } } // namespace detail template <typename vertex_t, typename weight_t, bool store_transposed, bool multi_gpu> std::tuple<rmm::device_uvector<vertex_t>, rmm::device_uvector<vertex_t>, std::optional<rmm::device_uvector<weight_t>>> symmetrize_edgelist(raft::handle_t const& handle, rmm::device_uvector<vertex_t>&& edgelist_rows, rmm::device_uvector<vertex_t>&& edgelist_cols, std::optional<rmm::device_uvector<weight_t>>&& edgelist_weights, bool reciprocal) { rmm::device_uvector<vertex_t> edgelist_majors(0, handle.get_stream()); rmm::device_uvector<vertex_t> edgelist_minors(0, handle.get_stream()); std::tie(edgelist_majors, edgelist_minors, edgelist_weights) = detail::symmetrize_edgelist<vertex_t, weight_t, multi_gpu>( handle, store_transposed ? std::move(edgelist_cols) : std::move(edgelist_rows), store_transposed ? std::move(edgelist_rows) : std::move(edgelist_cols), std::move(edgelist_weights), reciprocal); return std::make_tuple(store_transposed ? std::move(edgelist_minors) : std::move(edgelist_majors), store_transposed ? std::move(edgelist_majors) : std::move(edgelist_minors), std::move(edgelist_weights)); } } // namespace cugraph
the_stack
#include <torch/extension.h> #include <cstdint> #include "cuda_util.cuh" #include "render_util.cuh" #include "data_spec_packed.cuh" namespace { namespace device { // From old version (name is hacky whatever) struct BasicSingleRaySpec { __device__ BasicSingleRaySpec(const float* __restrict__ origin, const float* __restrict__ dir) : origin{origin[0], origin[1], origin[2]}, dir{dir[0], dir[1], dir[2]}, vdir(dir) {} float origin[3]; float dir[3]; const float* __restrict__ vdir; }; __device__ __inline__ float compute_skip_dist_nn( const BasicSingleRaySpec& __restrict__ ray, const float* __restrict__ invdir, const float* __restrict__ pos, const int32_t* __restrict__ l, int32_t link_val) { const uint32_t dist = -link_val; const uint32_t cell_ul_shift = (dist - 1); const uint32_t cell_side_len = (1 << cell_ul_shift); // AABB intersection // Consider caching the invdir for the ray float tmax = 1e9f; #pragma unroll for (int i = 0; i < 3; ++i) { int ul = (((l[i]) >> cell_ul_shift) << cell_ul_shift); ul -= l[i]; const float t1 = (ul - pos[i]) * invdir[i]; const float t2 = (ul + cell_side_len - pos[i]) * invdir[i]; if (ray.dir[i] != 0.f) { tmax = fminf(tmax, fmaxf(t1, t2)); } } return tmax; } __device__ __inline__ void trace_ray( const PackedSparseGridSpec& __restrict__ grid, BasicSingleRaySpec ray, RenderOptions& __restrict__ opt, float* __restrict__ out) { // Warning: modifies ray.origin transform_coord(ray.origin, grid._scaling, grid._offset); // Warning: modifies ray.dir const float delta_scale = _get_delta_scale(grid._scaling, ray.dir); float t, tmax; float invdir[3]; #pragma unroll 3 for (int i = 0; i < 3; ++i) { invdir[i] = 1.0 / ray.dir[i]; if (ray.dir[i] == 0.f) invdir[i] = 1e9f; } { float t1, t2; t = 0.0f; tmax = 1e9f; #pragma unroll 3 for (int i = 0; i < 3; ++i) { ray.origin[i] += 0.5f; // Fix offset of nn vs lerp t1 = (0.0f - ray.origin[i]) * invdir[i]; t2 = (grid.size[i] - 1.f - ray.origin[i]) * invdir[i]; t = fmaxf(t, fminf(t1, t2)); tmax = fminf(tmax, fmaxf(t1, t2)); } } if (t > tmax) { // Ray doesn't hit box #pragma unroll 3 for (int j = 0; j < 3; ++j) { out[j] = opt.background_brightness; } return; } else { #pragma unroll 3 for (int j = 0; j < 3; ++j) { out[j] = 0.f; } float pos[3]; int32_t l[3]; float basis_fn[9]; // vdir is unscaled unit dir in world space, for calculating spherical function calc_sh(grid.basis_dim, ray.vdir, basis_fn); float log_transmittance = 0.f; while (t < tmax) { #pragma unroll 3 for (int j = 0; j < 3; ++j) { pos[j] = ray.origin[j] + t * ray.dir[j]; pos[j] = fminf(fmaxf(pos[j], 0.f), grid.size[j] - 1.f); l[j] = min(static_cast<int32_t>(pos[j]), grid.size[j] - 1); pos[j] -= l[j]; } const int32_t link = grid.links[ (l[0] * grid.size[1] + l[1]) * grid.size[2] + l[2] ]; if (link >= 0) { const float delta_t = _intersect_aabb_unit(pos, invdir) + 1e-2f; t += delta_t; float sigma = grid.density_data[link]; if (opt.last_sample_opaque && t + opt.step_size > tmax) { sigma += 1e9; } if (sigma > opt.sigma_thresh) { const float* __restrict__ sample_val = &grid.sh_data[size_t(link) * grid.sh_data_dim]; const float log_transmit = -delta_t * delta_scale * sigma; const float transmittance = expf(log_transmittance); const float weight = transmittance * (1.f - expf(log_transmit)); #pragma unroll 3 for (int k = 0; k < 3; ++k) { const int off = k * grid.basis_dim; float tmp = 0.5f; for (int i = 0; i < grid.basis_dim; ++i) { tmp += basis_fn[i] * sample_val[off + i]; } out[k] += weight * fmaxf(tmp, 0.f); } log_transmittance += log_transmit; if (transmittance <= opt.stop_thresh) { // Full opacity, stop float scale = 1.0 / (1.0 - transmittance); for (int j = 0; j < 3; ++j) { out[j] *= scale; } return; } } } else { float skip = fmaxf(compute_skip_dist_nn(ray, invdir, pos, l, link), 0.f); t += skip + 1e-2f; } } #pragma unroll 3 for (int j = 0; j < 3; ++j) { out[j] += expf(log_transmittance) * opt.background_brightness; } } } __device__ __inline__ void trace_ray_backward( const PackedSparseGridSpec& __restrict__ grid, const float* __restrict__ grad_output, const float* __restrict__ color_cache, BasicSingleRaySpec ray, RenderOptions& __restrict__ opt, PackedGridOutputGrads& __restrict__ grads) { // Warning: modifies ray.origin transform_coord(ray.origin, grid._scaling, grid._offset); // Warning: modifies ray.dir const float delta_scale = _get_delta_scale(grid._scaling, ray.dir); float t, tmax; float invdir[3]; #pragma unroll for (int i = 0; i < 3; ++i) { invdir[i] = 1.0 / ray.dir[i]; if (ray.dir[i] == 0.0f) { invdir[i] = 1e9f; } } { float t1, t2; t = 0.0f; tmax = 1e9f; #pragma unroll 3 for (int i = 0; i < 3; ++i) { ray.origin[i] += 0.5f; // Fix offset of nn vs lerp t1 = (0.0f - ray.origin[i]) * invdir[i]; t2 = (grid.size[i] - 1.f - ray.origin[i]) * invdir[i]; if (ray.dir[i] != 0.0f) { t = fmaxf(t, fminf(t1, t2)); tmax = fminf(tmax, fmaxf(t1, t2)); } } } if (t > tmax) { // Ray doesn't hit box return; } else { float pos[3]; int32_t l[3]; float basis_fn[9]; calc_sh(grid.basis_dim, ray.vdir, basis_fn); float accum = color_cache[0] * grad_output[0] + color_cache[1] * grad_output[1] + color_cache[2] * grad_output[2]; float log_transmittance = 0.f; while (t < tmax) { #pragma unroll 3 for (int j = 0; j < 3; ++j) { pos[j] = ray.origin[j] + t * ray.dir[j]; pos[j] = fminf(fmaxf(pos[j], 0.f), grid.size[j] - 1.f); l[j] = min(static_cast<int32_t>(pos[j]), grid.size[j] - 1); pos[j] -= l[j]; } const int32_t link = grid.links[ (l[0] * grid.size[1] + l[1]) * grid.size[2] + l[2] ]; if (link >= 0) { float delta_t = _intersect_aabb_unit(pos, invdir) + 1e-2f; t += delta_t; float sigma = grid.density_data[link]; if (opt.last_sample_opaque && t + opt.step_size > tmax) { sigma += 1e9; } if (sigma > opt.sigma_thresh) { const float* __restrict__ sample_val = &grid.sh_data[size_t(link) * grid.sh_data_dim]; float* __restrict__ grad_sample_val = &grads.grad_sh_out[size_t(link) * grid.sh_data_dim]; delta_t *= delta_scale; const float log_transmit = -delta_t * sigma; const float weight = expf(log_transmittance) * (1.f - expf(log_transmit)); float total_color = 0.f; #pragma unroll 3 for (int k = 0; k < 3; ++ k) { const int off = k * grid.basis_dim; float tmp = 0.5f; for (int i = 0; i < grid.basis_dim; ++i) { tmp += basis_fn[i] * sample_val[off + i]; } if (tmp > 0.f) { total_color += tmp * grad_output[k]; tmp = weight * grad_output[k]; for (int i = 0; i < grid.basis_dim; ++i) { atomicAdd(&grad_sample_val[off + i], basis_fn[i] * tmp); } } } log_transmittance += log_transmit; accum -= weight * total_color; if (grads.mask_out != nullptr) { grads.mask_out[link] = true; } atomicAdd(&grads.grad_density_out[link], delta_t * (total_color * expf(log_transmittance) - accum)); if (expf(log_transmittance) <= opt.stop_thresh) { return; } } } else { t += fmaxf(compute_skip_dist_nn(ray, invdir, pos, l, link), 0.f) + 1e-2f; } } } } // trace_ray_backward // ** Kernels __global__ void render_ray_svox1_kernel( PackedSparseGridSpec grid, PackedRaysSpec rays, RenderOptions opt, torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> out) { CUDA_GET_THREAD_ID(tid, rays.origins.size(0)); trace_ray( grid, BasicSingleRaySpec(&rays.origins[tid][0], &rays.dirs[tid][0]), opt, &out[tid][0]); } __global__ void render_ray_svox1_backward_kernel( PackedSparseGridSpec grid, const float* __restrict__ grad_output, const float* __restrict__ color_cache, PackedRaysSpec rays, RenderOptions opt, bool grad_out_is_rgb, PackedGridOutputGrads grads ) { CUDA_GET_THREAD_ID(tid, rays.origins.size(0)); float grad_out[3]; if (grad_out_is_rgb) { const float norm_factor = 2.f / (3 * int(rays.origins.size(0))); #pragma unroll 3 for (int i = 0; i < 3; ++i) { const float resid = color_cache[tid * 3 + i] - grad_output[tid * 3 + i]; grad_out[i] = resid * norm_factor; } } else { #pragma unroll 3 for (int i = 0; i < 3; ++i) { grad_out[i] = grad_output[tid * 3 + i]; } } trace_ray_backward( grid, grad_out, color_cache + tid * 3, BasicSingleRaySpec(&rays.origins[tid][0], &rays.dirs[tid][0]), opt, grads); } } // namespace device } // namespace torch::Tensor volume_render_svox1(SparseGridSpec& grid, RaysSpec& rays, RenderOptions& opt) { DEVICE_GUARD(grid.sh_data); TORCH_CHECK(grid.basis_type == BASIS_TYPE_SH); // Only supporting SH for now grid.check(); rays.check(); const auto Q = rays.origins.size(0); const int cuda_n_threads = 512; const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads); torch::Tensor result = torch::zeros({Q, 3}, rays.origins.options()); device::render_ray_svox1_kernel<<<blocks, cuda_n_threads>>>( grid, rays, opt, result.packed_accessor32<float, 2, torch::RestrictPtrTraits>()); CUDA_CHECK_ERRORS; return result; } void volume_render_svox1_backward( SparseGridSpec& grid, RaysSpec& rays, RenderOptions& opt, torch::Tensor grad_out, torch::Tensor color_cache, GridOutputGrads& grads) { DEVICE_GUARD(grid.sh_data); grid.check(); rays.check(); grads.check(); CHECK_INPUT(grad_out); CHECK_INPUT(color_cache); const int Q = rays.origins.size(0); const int cuda_n_threads = 512; const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads); device::render_ray_svox1_backward_kernel<<<blocks, cuda_n_threads>>>( grid, grad_out.data_ptr<float>(), color_cache.data_ptr<float>(), rays, opt, false, grads); CUDA_CHECK_ERRORS; } void volume_render_svox1_fused( SparseGridSpec& grid, RaysSpec& rays, RenderOptions& opt, torch::Tensor rgb_gt, float _, // not supported float _2, // not supported torch::Tensor rgb_out, GridOutputGrads& grads) { DEVICE_GUARD(grid.sh_data); CHECK_INPUT(rgb_gt); CHECK_INPUT(rgb_out); grid.check(); rays.check(); grads.check(); const auto Q = rays.origins.size(0); const int cuda_n_threads = 512; { const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads); device::render_ray_svox1_kernel<<<blocks, cuda_n_threads>>>( grid, rays, opt, // Output rgb_out.packed_accessor32<float, 2, torch::RestrictPtrTraits>()); CUDA_CHECK_ERRORS; } { const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads); device::render_ray_svox1_backward_kernel<<<blocks, cuda_n_threads>>>( grid, rgb_gt.data_ptr<float>(), rgb_out.data_ptr<float>(), rays, opt, true, // Output grads); CUDA_CHECK_ERRORS; } }
the_stack
#include <nbla/array.hpp> #include <nbla/logger.hpp> #include <nbla/variable.hpp> #include <nbla/cuda/array/cuda_array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/cudnn/cudnn.hpp> #include <nbla/cuda/cudnn/function/add2.hpp> #include <nbla/cuda/cudnn/function/batch_normalization.hpp> #include <nbla/cuda/cudnn/function/fused_batch_normalization.hpp> #include <nbla/cuda/cudnn/function/relu.hpp> #include <nbla/cuda/limits.hpp> // If you face any issue when executing fused BN without a residual input // (inputs[5]), try enabling the following macro to do a workaround. // #define WORKAROUND_FOR_BUG_OPS_BN_ACTIVATION namespace nbla { #if CUDNN_VERSION >= 7400 #define DRV_BN_T() get_dtype_by_cudnn_data_type(derived_bn_dtype_) template <typename T> void FusedBatchNormalizationCudaCudnn<T>::setup_impl(const Variables &inputs, const Variables &outputs) { FusedBatchNormalization<T>::setup_impl(inputs, outputs); NBLA_CHECK(this->axes_.size() == 1, error_code::value, "Axes on a single dimension only supported."); bool channel_last = this->axes_[0] == inputs[0]->ndim() - 1; auto inshape = inputs[0]->shape(); NBLA_CHECK(inputs[0]->ndim() >= 2, error_code::value, "Input dimensions must be >= 2."); int C = inshape[this->axes_[0]]; int N = inshape[0]; int H = inputs[0]->size() / (C * N); int W = 1; // Check if the confition we can use faster BN. bool can_use_bn_ex = channel_last && C % 4 == 0; #if _WIN32 // On windows, cudnnBatchNormalization*Ex with fused option raises error with // CUDNN_STATUS_NOT_SUPPORTED. // (The case when bnOps = {CUDNN_BATCHNORM_OPS_BN_ACTIVATION, // CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION}.) // Therefore, can_use_bn_ex is fored to be False and FusedBN fallbackes to the // composite one. if (can_use_bn_ex) { NBLA_LOG_WARN( "[FusedBatchNormalization] " "Currently cudnn doesn't support fusedBatchNormalization on windows. " "Fallbacks to a composite implementation.") can_use_bn_ex = false; } #endif // _WIN32 if (can_use_bn_ex) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, this->device_); if ((prop.major == 5) && (prop.minor == 3)) { NBLA_LOG_WARN("FusedBatchNormalization is not supported by CuDNN on " "compute archtitecture 5.3 - " "fallback to composite implementation.") can_use_bn_ex = false; } } if (!can_use_bn_ex || outputs.size() == 3) { this->fall_back_func_ = make_shared<FusedBatchNormalization<T>>( this->ctx_, this->axes_, this->decay_rate_, this->eps_, this->batch_stat_, this->nonlinearity_); this->fall_back_func_->setup(inputs, outputs); return; } mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; cudnn_handle_ = SingletonManager::get<CudnnHandleManager>()->handle(device_); NBLA_CUDNN_CHECK( cudnnSetTensor4dDescriptor(input_desc_.desc, CUDNN_TENSOR_NHWC, cudnn_data_type<T>::type(), N, C, H, W)); NBLA_CUDNN_CHECK(cudnnSetTensor4dDescriptor( z_desc_.desc, CUDNN_TENSOR_NHWC, cudnn_data_type<T>::type(), N, C, H, W)); NBLA_CUDNN_CHECK( cudnnSetTensor4dDescriptor(output_desc_.desc, CUDNN_TENSOR_NHWC, cudnn_data_type<T>::type(), N, C, H, W)); NBLA_CUDNN_CHECK(cudnnDeriveBNTensorDescriptor( bn_scale_bias_mean_var_desc_.desc, input_desc_.desc, mode_)); int n, c, h, w, sn, sc, sh, sw; // garbage NBLA_CUDNN_CHECK(cudnnGetTensor4dDescriptor(bn_scale_bias_mean_var_desc_.desc, &derived_bn_dtype_, &n, &c, &h, &w, &sn, &sc, &sh, &sw)); // TODO: CUDNN_BATCHNORM_OPS_BN_ACTIVATION cannot pass the unit test this->ops_ = CUDNN_BATCHNORM_OPS_BN_ADD_ACTIVATION; #if !defined(WORKAROUND_FOR_BUG_OPS_BN_ACTIVATION) if (inputs.size() != 6) { this->ops_ = CUDNN_BATCHNORM_OPS_BN_ACTIVATION; } #endif // workspace allocation NBLA_CUDNN_CHECK(cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( this->cudnn_handle_, this->mode_, this->ops_, this->input_desc_.desc, /* x desc */ z_desc_.desc, /* z desc */ this->output_desc_.desc, /* y desc */ this->bn_scale_bias_mean_var_desc_.desc, this->act_desc_.desc, &forward_workspace_size_)); NBLA_CUDNN_CHECK(cudnnGetBatchNormalizationTrainingExReserveSpaceSize( this->cudnn_handle_, this->mode_, this->ops_, this->act_desc_.desc, this->input_desc_.desc, &reserve_size_)); NBLA_CUDNN_CHECK(cudnnGetBatchNormalizationBackwardExWorkspaceSize( this->cudnn_handle_, this->mode_, this->ops_, this->input_desc_.desc, /* x desc */ this->output_desc_.desc, /* y desc */ this->output_desc_.desc, /* dy desc */ this->z_desc_.desc, /*dz desc*/ this->input_desc_.desc, /* dx desc */ this->bn_scale_bias_mean_var_desc_.desc, this->act_desc_.desc, &backward_workspace_size_)); } template <class T> void FusedBatchNormalizationCudaCudnn<T>::fused_batch_norm_forward( const Variables &inputs, const Variables &outputs, const bool update_inputs) { NBLA_CHECK(this->batch_stat_, error_code::runtime, "If batch_stat is false, this function should not be called."); cuda_set_device(std::stoi(this->ctx_.device_id)); Variable *batch_mean = &this->mean_; Variable *batch_var = &this->var_; batch_mean->reshape(inputs[1]->shape(), true); batch_var->reshape(inputs[2]->shape(), true); // Inputs const Tw *x = inputs[0]->get_data_pointer<Tw>(this->ctx_); const void *beta = inputs[1]->data()->get(DRV_BN_T(), this->ctx_)->const_pointer(); const void *gamma = inputs[2]->data()->get(DRV_BN_T(), this->ctx_)->const_pointer(); const void *z = inputs.size() == 6 ? inputs[5]->get_data_pointer<Tw>(this->ctx_) : nullptr; #if defined(WORKAROUND_FOR_BUG_OPS_BN_ACTIVATION) NdArray z_tmp(inputs[0]->shape()); if (z == nullptr) { z_tmp.zero(); z = z_tmp.get(DRV_BN_T(), this->ctx_)->const_pointer(); } #endif // Output Tw *y = outputs[0]->cast_data_and_get_pointer<Tw>(this->ctx_, true); void *m = batch_mean->data() ->cast(DRV_BN_T(), this->ctx_, true) ->pointer(); // batch mean void *v = batch_var->data() ->cast(DRV_BN_T(), this->ctx_, true) ->pointer(); // batch var // Inputs/Outputs void *rm = !update_inputs ? nullptr : inputs[3] ->data() ->cast(DRV_BN_T(), this->ctx_) ->pointer(); // running mean void *rv = !update_inputs ? nullptr : inputs[4] ->data() ->cast(DRV_BN_T(), this->ctx_) ->pointer(); // running var auto a = get_cudnn_scalar_arg<T>(1); auto b = get_cudnn_scalar_arg<T>(0); // Get buffers. NdArray workspace(Shape_t{(Size_t)forward_workspace_size_}); reserve_ = make_shared<NdArray>(Shape_t{(Size_t)reserve_size_}); void *workspace_ptr = workspace.cast(DRV_BN_T(), this->ctx_, true)->pointer(); void *reserve_ptr = reserve_->cast(DRV_BN_T(), this->ctx_, true)->pointer(); // Execute forward. double eps = std::max((double)this->eps_, CUDNN_BN_MIN_EPSILON); NBLA_CUDNN_CHECK(cudnnBatchNormalizationForwardTrainingEx( this->cudnn_handle_, this->mode_, this->ops_, &a, &b, input_desc_.desc, x, /* x */ z_desc_.desc, z, /* z */ output_desc_.desc, y, /* y */ this->bn_scale_bias_mean_var_desc_.desc, gamma, beta, 1 - this->decay_rate_, rm, rv, eps, m, v, this->act_desc_.desc, /* activation descriptor */ workspace_ptr, /* workspace pointer */ forward_workspace_size_, /* workspace size */ reserve_ptr, /* reserve space pointer */ reserve_size_ /* reserve space size */ )); } template <class T> void FusedBatchNormalizationCudaCudnn<T>::forward_impl( const Variables &inputs, const Variables &outputs) { fused_batch_norm_forward(inputs, outputs, true /* update_inputs */); } template <class T> void FusedBatchNormalizationCudaCudnn<T>::recompute_impl( const Variables &inputs, const Variables &outputs) { fused_batch_norm_forward(inputs, outputs, false /* update_inputs */); } template <class T> void FusedBatchNormalizationCudaCudnn<T>::backward_impl( const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { NBLA_CHECK(this->batch_stat_, error_code::runtime, "If batch_stat is false, this function should not be called."); cuda_set_device(std::stoi(this->ctx_.device_id)); if (!(propagate_down[0] || propagate_down[1] || propagate_down[2] || (inputs.size() == 6 && propagate_down[5]))) { return; } // Check whether it outputs batch mean/var. Variable *batch_mean = &this->mean_; Variable *batch_var = &this->var_; // Common inputs wrt. gradient. const Tw *dy = outputs[0]->get_grad_pointer<Tw>(this->ctx_); const Tw *y = outputs[0]->get_data_pointer<Tw>(this->ctx_); const void *m = batch_mean->data()->get(DRV_BN_T(), this->ctx_)->const_pointer(); const void *v = batch_var->data()->get(DRV_BN_T(), this->ctx_)->const_pointer(); const Tw *x = inputs[0]->get_data_pointer<Tw>(this->ctx_); auto a_data = get_cudnn_scalar_arg<T>(propagate_down[0] ? 1 : 0); auto b_data = get_cudnn_scalar_arg<T>(accum[0] && propagate_down[0] ? 1 : 0); auto a_param = get_cudnn_scalar_arg<T>(propagate_down[1] || propagate_down[2] ? 1 : 0); auto b_param = a_param; if (!(accum[1] || accum[2])) { b_param = 0; } size_t prop_down_workspace_size = 0; if (!propagate_down[0]) { prop_down_workspace_size = std::max( prop_down_workspace_size, inputs[0]->size() * sizeof_dtype(DRV_BN_T())); } if (!propagate_down[1] || !propagate_down[2]) { prop_down_workspace_size = std::max( prop_down_workspace_size, inputs[1]->size() * sizeof_dtype(DRV_BN_T())); } void *prop_down_buf = nullptr; NdArray prop_down_workspace; if (prop_down_workspace_size) { prop_down_workspace.reshape({static_cast<Size_t>(prop_down_workspace_size)}, true); prop_down_buf = prop_down_workspace.cast(dtypes::BYTE, this->ctx_, true) ->pointer<void>(); } Tw *dx = propagate_down[0] ? inputs[0]->cast_grad_and_get_pointer<Tw>(this->ctx_, !accum[0]) : (Tw *)prop_down_buf; const void *beta = inputs[1]->data()->get(DRV_BN_T(), this->ctx_)->const_pointer(); const void *gamma = inputs[2]->data()->get(DRV_BN_T(), this->ctx_)->const_pointer(); // Specify write only flag to prevent unnecessary memset. const bool param_diff_write = b_param == 0; void *db = propagate_down[1] ? inputs[1] ->grad() ->cast(DRV_BN_T(), this->ctx_, param_diff_write) ->pointer() : prop_down_buf; void *dg = propagate_down[2] ? inputs[2] ->grad() ->cast(DRV_BN_T(), this->ctx_, param_diff_write) ->pointer() : prop_down_buf; // Get buffers. NdArray workspace(Shape_t{(Size_t)backward_workspace_size_}); NBLA_CHECK(reserve_, error_code::value, "Forward is not called."); void *workspace_ptr = workspace.cast(DRV_BN_T(), this->ctx_, true)->pointer(); void *reserve_ptr = reserve_->cast(DRV_BN_T(), this->ctx_, false /* rw access */)->pointer(); void *dz = (inputs.size() == 6 && propagate_down[5]) ? inputs[5]->cast_grad_and_get_pointer<Tw>(this->ctx_, !accum[5]) : nullptr; // Just garbage NdArray prop_down_dz_buf(inputs[0]->shape()); if (inputs.size() == 6 && !propagate_down[5]) { dz = prop_down_dz_buf.cast(DRV_BN_T(), this->ctx_, true)->pointer(); } #if defined(WORKAROUND_FOR_BUG_OPS_BN_ACTIVATION) if (dz == nullptr) { dz = prop_down_dz_buf.cast(DRV_BN_T(), this->ctx_, true)->pointer(); } #endif // Execute backward. double eps = std::max((double)this->eps_, CUDNN_BN_MIN_EPSILON); NBLA_CUDNN_CHECK(cudnnBatchNormalizationBackwardEx( this->cudnn_handle_, this->mode_, this->ops_, &a_data, &b_data, &a_param, &b_param, input_desc_.desc, x, /* x */ output_desc_.desc, y, /* y */ output_desc_.desc, dy, /* dy */ z_desc_.desc, dz, /* dz */ input_desc_.desc, dx, /* dx */ this->bn_scale_bias_mean_var_desc_.desc, gamma, beta, dg, db, eps, m, v, this->act_desc_.desc, /* activation descriptor */ workspace_ptr, /* workspace pointer */ backward_workspace_size_, /* workspace size */ reserve_ptr, /* reserve space pointer */ reserve_size_ /* reserve space size */ )); // Clear reserved buffer for backward reserve_ = nullptr; } #endif } // namespace nbla
the_stack
using simtbx::nanoBragg::shapetype; using simtbx::nanoBragg::hklParams; using simtbx::nanoBragg::SQUARE; using simtbx::nanoBragg::ROUND; using simtbx::nanoBragg::GAUSS; using simtbx::nanoBragg::GAUSS_ARGCHK; using simtbx::nanoBragg::TOPHAT; static void CheckCudaErrorAux(const char *, unsigned, const char *, cudaError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #define THREADS_PER_BLOCK_X 128 #define THREADS_PER_BLOCK_Y 1 #define THREADS_PER_BLOCK_TOTAL (THREADS_PER_BLOCK_X * THREADS_PER_BLOCK_Y) #define VECTOR_SIZE 4 /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux(const char *file, unsigned line, const char *statement, cudaError_t err) { if (err == cudaSuccess) return; std::cerr << statement << " returned " << cudaGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl; exit(1); } static cudaError_t cudaMemcpyVectorDoubleToDevice(CUDAREAL *dst, double *src, size_t vector_items) { CUDAREAL * temp = new CUDAREAL[vector_items]; for (size_t i = 0; i < vector_items; i++) { temp[i] = src[i]; } cudaError_t ret = cudaMemcpy(dst, temp, sizeof(*dst) * vector_items, cudaMemcpyHostToDevice); delete temp; return ret; } /* make a unit vector pointing in same direction and report magnitude (both args can be same vector) */ double cpu_unitize(double *vector, double *new_unit_vector); double cpu_unitize(double * vector, double * new_unit_vector) { double v1 = vector[1]; double v2 = vector[2]; double v3 = vector[3]; double mag = sqrt(v1 * v1 + v2 * v2 + v3 * v3); if (mag != 0.0) { /* normalize it */ new_unit_vector[0] = mag; new_unit_vector[1] = v1 / mag; new_unit_vector[2] = v2 / mag; new_unit_vector[3] = v3 / mag; } else { /* can't normalize, report zero vector */ new_unit_vector[0] = 0.0; new_unit_vector[1] = 0.0; new_unit_vector[2] = 0.0; new_unit_vector[3] = 0.0; } return mag; } __global__ void nanoBraggSpotsInitCUDAKernel(int spixels, int fpixesl, float * floatimage, float * omega_reduction, float * max_I_x_reduction, float * max_I_y_reduction, bool * rangemap); __global__ void nanoBraggSpotsCUDAKernel(int spixels, int fpixels, int roi_xmin, int roi_xmax, int roi_ymin, int roi_ymax, int oversample, int point_pixel, CUDAREAL pixel_size, CUDAREAL subpixel_size, int steps, CUDAREAL detector_thickstep, int detector_thicksteps, CUDAREAL detector_thick, CUDAREAL detector_mu, const CUDAREAL * __restrict__ sdet_vector, const CUDAREAL * __restrict__ fdet_vector, const CUDAREAL * __restrict__ odet_vector, const CUDAREAL * __restrict__ pix0_vector, int curved_detector, CUDAREAL distance, CUDAREAL close_distance, const CUDAREAL * __restrict__ beam_vector, CUDAREAL Xbeam, CUDAREAL Ybeam, CUDAREAL dmin, CUDAREAL phi0, CUDAREAL phistep, int phisteps, const CUDAREAL * __restrict__ spindle_vector, int sources, const CUDAREAL * __restrict__ source_X, const CUDAREAL * __restrict__ source_Y, const CUDAREAL * __restrict__ source_Z, const CUDAREAL * __restrict__ source_I, const CUDAREAL * __restrict__ source_lambda, const CUDAREAL * __restrict__ a0, const CUDAREAL * __restrict__ b0, const CUDAREAL * __restrict c0, shapetype xtal_shape, CUDAREAL mosaic_spread, int mosaic_domains, const CUDAREAL * __restrict__ mosaic_umats, CUDAREAL Na, CUDAREAL Nb, CUDAREAL Nc, CUDAREAL V_cell, CUDAREAL water_size, CUDAREAL water_F, CUDAREAL water_MW, CUDAREAL r_e_sqr, CUDAREAL fluence, CUDAREAL Avogadro, CUDAREAL spot_scale, int integral_form, CUDAREAL default_F, int interpolate, const CUDAREAL * __restrict__ Fhkl, const hklParams * __restrict__ Fhklparams, int nopolar, const CUDAREAL * __restrict__ polar_vector, CUDAREAL polarization, CUDAREAL fudge, const int unsigned short * __restrict__ maskimage, float * floatimage /*out*/, float * omega_reduction/*out*/, float * max_I_x_reduction/*out*/, float * max_I_y_reduction /*out*/, bool * rangemap); extern "C" void nanoBraggSpotsCUDA(int deviceId, int spixels, int fpixels, int roi_xmin, int roi_xmax, int roi_ymin, int roi_ymax, int oversample, int point_pixel, double pixel_size, double subpixel_size, int steps, double detector_thickstep, int detector_thicksteps, double detector_thick, double detector_mu, double sdet_vector[4], double fdet_vector[4], double odet_vector[4], double pix0_vector[4], int curved_detector, double distance, double close_distance, double beam_vector[4], double Xbeam, double Ybeam, double dmin, double phi0, double phistep, int phisteps, double spindle_vector[4], int sources, double *source_X, double *source_Y, double * source_Z, double * source_I, double * source_lambda, double a0[4], double b0[4], double c0[4], shapetype xtal_shape, double mosaic_spread, int mosaic_domains, double * mosaic_umats, double Na, double Nb, double Nc, double V_cell, double water_size, double water_F, double water_MW, double r_e_sqr, double fluence, double Avogadro, int integral_form, double default_F, int interpolate, double *** Fhkl, int h_min, int h_max, int h_range, int k_min, int k_max, int k_range, int l_min, int l_max, int l_range, int hkls, int nopolar, double polar_vector[4], double polarization, double fudge, int unsigned short * maskimage, float * floatimage /*out*/, double * omega_sum/*out*/, int * sumn /*out*/, double * sum /*out*/, double * sumsqr /*out*/, double * max_I/*out*/, double * max_I_x/*out*/, double * max_I_y /*out*/, double spot_scale) { int total_pixels = spixels * fpixels; cudaSetDevice(deviceId); /*allocate and zero reductions */ bool * rangemap = (bool*) calloc(total_pixels, sizeof(bool)); float * omega_reduction = (float*) calloc(total_pixels, sizeof(float)); float * max_I_x_reduction = (float*) calloc(total_pixels, sizeof(float)); float * max_I_y_reduction = (float*) calloc(total_pixels, sizeof(float)); /* clear memory (TODO: consider this being optional) */ memset(floatimage, 0, sizeof(typeof(*floatimage)) * total_pixels); /*create transfer arguments to device space*/ int cu_spixels = spixels, cu_fpixels = fpixels; int cu_roi_xmin = roi_xmin, cu_roi_xmax = roi_xmax, cu_roi_ymin = roi_ymin, cu_roi_ymax = roi_ymax; int cu_oversample = oversample; int cu_point_pixel = point_pixel; CUDAREAL cu_pixel_size = pixel_size, cu_subpixel_size = subpixel_size; int cu_steps = steps; CUDAREAL cu_detector_thickstep = detector_thickstep, cu_detector_thick = detector_thick, cu_detector_mu = detector_mu; int cu_detector_thicksteps = detector_thicksteps; int cu_curved_detector = curved_detector; CUDAREAL cu_distance = distance, cu_close_distance = close_distance; CUDAREAL cu_Xbeam = Xbeam, cu_Ybeam = Ybeam; CUDAREAL cu_dmin = dmin, cu_phi0 = phi0, cu_phistep = phistep; int cu_phisteps = phisteps; shapetype cu_xtal_shape = xtal_shape; int cu_sources = sources; CUDAREAL cu_mosaic_spread = mosaic_spread; int cu_mosaic_domains = mosaic_domains; CUDAREAL cu_Na = Na, cu_Nb = Nb, cu_Nc = Nc, cu_V_cell = V_cell, cu_water_size = water_size, cu_water_F = water_F, cu_water_MW = water_MW; CUDAREAL cu_r_e_sqr = r_e_sqr, cu_fluence = fluence, cu_Avogadro = Avogadro, cu_spot_scale = spot_scale; int cu_integral_form = integral_form; CUDAREAL cu_default_F = default_F; int cu_interpolate = interpolate; // int cu_h_min = h_min, cu_h_max = h_max, cu_h_range = h_range; // int cu_k_min = k_min, cu_k_max = k_max, cu_k_range = k_range; // int cu_l_min = l_min, cu_l_max = l_max, cu_l_range = l_range; // int cu_hkls = hkls; int cu_nopolar = nopolar; CUDAREAL cu_polarization = polarization, cu_fudge = fudge; hklParams FhklParams = { hkls, h_min, h_max, h_range, k_min, k_max, k_range, l_min, l_max, l_range }; hklParams * cu_FhklParams; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_FhklParams, sizeof(*cu_FhklParams))); CUDA_CHECK_RETURN(cudaMemcpy(cu_FhklParams, &FhklParams, sizeof(*cu_FhklParams), cudaMemcpyHostToDevice)); const int vector_length = 4; CUDAREAL * cu_sdet_vector; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_sdet_vector, sizeof(*cu_sdet_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_sdet_vector, sdet_vector, vector_length)); CUDAREAL * cu_fdet_vector; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_fdet_vector, sizeof(*cu_fdet_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_fdet_vector, fdet_vector, vector_length)); CUDAREAL * cu_odet_vector; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_odet_vector, sizeof(*cu_odet_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_odet_vector, odet_vector, vector_length)); CUDAREAL * cu_pix0_vector; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_pix0_vector, sizeof(*cu_pix0_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_pix0_vector, pix0_vector, vector_length)); CUDAREAL * cu_beam_vector; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_beam_vector, sizeof(*cu_beam_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_beam_vector, beam_vector, vector_length)); CUDAREAL * cu_spindle_vector; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_spindle_vector, sizeof(*cu_spindle_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_spindle_vector, spindle_vector, vector_length)); CUDAREAL * cu_a0; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_a0, sizeof(*cu_a0) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_a0, a0, vector_length)); CUDAREAL * cu_b0; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_b0, sizeof(*cu_b0) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_b0, b0, vector_length)); CUDAREAL * cu_c0; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_c0, sizeof(*cu_c0) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_c0, c0, vector_length)); // Unitize polar vector before sending it to the GPU. Optimization do it only once here rather than multiple time per pixel in the GPU. CUDAREAL * cu_polar_vector; double polar_vector_unitized[4]; cpu_unitize(polar_vector, polar_vector_unitized); CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_polar_vector, sizeof(*cu_polar_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_polar_vector, polar_vector_unitized, vector_length)); CUDAREAL * cu_source_X = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_source_X, sizeof(*cu_source_X) * sources)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_source_X, source_X, sources)); CUDAREAL * cu_source_Y = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_source_Y, sizeof(*cu_source_Y) * sources)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_source_Y, source_Y, sources)); CUDAREAL * cu_source_Z = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_source_Z, sizeof(*cu_source_Z) * sources)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_source_Z, source_Z, sources)); CUDAREAL * cu_source_I = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_source_I, sizeof(*cu_source_I) * sources)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_source_I, source_I, sources)); CUDAREAL * cu_source_lambda = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_source_lambda, sizeof(*cu_source_lambda) * sources)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_source_lambda, source_lambda, sources)); CUDAREAL * cu_mosaic_umats = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_mosaic_umats, sizeof(*cu_mosaic_umats) * mosaic_domains * 9)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_mosaic_umats, mosaic_umats, mosaic_domains * 9)); float * cu_floatimage = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_floatimage, sizeof(*cu_floatimage) * total_pixels)); CUDA_CHECK_RETURN(cudaMemcpy(cu_floatimage, floatimage, sizeof(*cu_floatimage) * total_pixels, cudaMemcpyHostToDevice)); float * cu_omega_reduction = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_omega_reduction, sizeof(*cu_omega_reduction) * total_pixels)); CUDA_CHECK_RETURN(cudaMemcpy(cu_omega_reduction, omega_reduction, sizeof(*cu_omega_reduction) * total_pixels, cudaMemcpyHostToDevice)); float * cu_max_I_x_reduction = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_max_I_x_reduction, sizeof(*cu_max_I_x_reduction) * total_pixels)); CUDA_CHECK_RETURN(cudaMemcpy(cu_max_I_x_reduction, max_I_x_reduction, sizeof(*cu_max_I_x_reduction) * total_pixels, cudaMemcpyHostToDevice)); float * cu_max_I_y_reduction = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_max_I_y_reduction, sizeof(*cu_max_I_y_reduction) * total_pixels)); CUDA_CHECK_RETURN(cudaMemcpy(cu_max_I_y_reduction, max_I_y_reduction, sizeof(*cu_max_I_y_reduction) * total_pixels, cudaMemcpyHostToDevice)); bool * cu_rangemap = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_rangemap, sizeof(*cu_rangemap) * total_pixels)); CUDA_CHECK_RETURN(cudaMemcpy(cu_rangemap, rangemap, sizeof(*cu_rangemap) * total_pixels, cudaMemcpyHostToDevice)); int unsigned short * cu_maskimage = NULL; if (maskimage != NULL) { CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_maskimage, sizeof(*cu_maskimage) * total_pixels)); CUDA_CHECK_RETURN(cudaMemcpy(cu_maskimage, maskimage, sizeof(*cu_maskimage) * total_pixels, cudaMemcpyHostToDevice)); } int hklsize = h_range * k_range * l_range; CUDAREAL * FhklLinear = (CUDAREAL*) calloc(hklsize, sizeof(*FhklLinear)); for (int h = 0; h < h_range; h++) { for (int k = 0; k < k_range; k++) { // memcpy(FhklLinear + (h * k_range * l_range + k * l_range), Fhkl[h][k], sizeof(*FhklLinear) * l_range); for (int l = 0; l < l_range; l++) { // convert Fhkl double to CUDAREAL FhklLinear[h * k_range * l_range + k * l_range + l] = Fhkl[h][k][l]; } } } CUDAREAL * cu_Fhkl = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_Fhkl, sizeof(*cu_Fhkl) * hklsize)); CUDA_CHECK_RETURN(cudaMemcpy(cu_Fhkl, FhklLinear, sizeof(*cu_Fhkl) * hklsize, cudaMemcpyHostToDevice)); free(FhklLinear); //int deviceId = 0; CUDA_CHECK_RETURN(cudaGetDevice(&deviceId)); cudaDeviceProp deviceProps = { 0 }; CUDA_CHECK_RETURN(cudaGetDeviceProperties(&deviceProps, deviceId)); int smCount = deviceProps.multiProcessorCount; // CUDA_CHECK_RETURN(cudaFuncSetCacheConfig(nanoBraggSpotsCUDAKernel, cudaFuncCachePreferShared)); // CUDA_CHECK_RETURN(cudaFuncSetCacheConfig(nanoBraggSpotsCUDAKernel, cudaFuncCachePreferL1)); dim3 threadsPerBlock(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y); // dim3 numBlocks((spixels - 1) / threadsPerBlock.x + 1, (fpixels - 1) / threadsPerBlock.y + 1); dim3 numBlocks(smCount * 8, 1); // initialize the device memory within a kernel. // nanoBraggSpotsInitCUDAKernel<<<numBlocks, threadsPerBlock>>>(cu_spixels, cu_fpixels, cu_floatimage, cu_omega_reduction, cu_max_I_x_reduction, cu_max_I_y_reduction, cu_rangemap); // CUDA_CHECK_RETURN(cudaPeekAtLastError()); // CUDA_CHECK_RETURN(cudaDeviceSynchronize()); nanoBraggSpotsCUDAKernel<<<numBlocks, threadsPerBlock>>>(cu_spixels, cu_fpixels, cu_roi_xmin, cu_roi_xmax, cu_roi_ymin, cu_roi_ymax, cu_oversample, cu_point_pixel, cu_pixel_size, cu_subpixel_size, cu_steps, cu_detector_thickstep, cu_detector_thicksteps, cu_detector_thick, cu_detector_mu, cu_sdet_vector, cu_fdet_vector, cu_odet_vector, cu_pix0_vector, cu_curved_detector, cu_distance, cu_close_distance, cu_beam_vector, cu_Xbeam, cu_Ybeam, cu_dmin, cu_phi0, cu_phistep, cu_phisteps, cu_spindle_vector, cu_sources, cu_source_X, cu_source_Y, cu_source_Z, cu_source_I, cu_source_lambda, cu_a0, cu_b0, cu_c0, cu_xtal_shape, cu_mosaic_spread, cu_mosaic_domains, cu_mosaic_umats, cu_Na, cu_Nb, cu_Nc, cu_V_cell, cu_water_size, cu_water_F, cu_water_MW, cu_r_e_sqr, cu_fluence, cu_Avogadro, cu_spot_scale, cu_integral_form, cu_default_F, cu_interpolate, cu_Fhkl, cu_FhklParams, cu_nopolar, cu_polar_vector, cu_polarization, cu_fudge, cu_maskimage, cu_floatimage /*out*/, cu_omega_reduction/*out*/, cu_max_I_x_reduction/*out*/, cu_max_I_y_reduction /*out*/, cu_rangemap /*out*/); CUDA_CHECK_RETURN(cudaPeekAtLastError()); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaMemcpy(floatimage, cu_floatimage, sizeof(*cu_floatimage) * total_pixels, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaMemcpy(omega_reduction, cu_omega_reduction, sizeof(*cu_omega_reduction) * total_pixels, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaMemcpy(max_I_x_reduction, cu_max_I_x_reduction, sizeof(*cu_max_I_x_reduction) * total_pixels, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaMemcpy(max_I_y_reduction, cu_max_I_y_reduction, sizeof(*cu_max_I_y_reduction) * total_pixels, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaMemcpy(rangemap, cu_rangemap, sizeof(*cu_rangemap) * total_pixels, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaFree(cu_sdet_vector)); CUDA_CHECK_RETURN(cudaFree(cu_fdet_vector)); CUDA_CHECK_RETURN(cudaFree(cu_odet_vector)); CUDA_CHECK_RETURN(cudaFree(cu_pix0_vector)); CUDA_CHECK_RETURN(cudaFree(cu_beam_vector)); CUDA_CHECK_RETURN(cudaFree(cu_spindle_vector)); CUDA_CHECK_RETURN(cudaFree(cu_polar_vector)); CUDA_CHECK_RETURN(cudaFree(cu_a0)); CUDA_CHECK_RETURN(cudaFree(cu_b0)); CUDA_CHECK_RETURN(cudaFree(cu_c0)); CUDA_CHECK_RETURN(cudaFree(cu_source_X)); CUDA_CHECK_RETURN(cudaFree(cu_source_Y)); CUDA_CHECK_RETURN(cudaFree(cu_source_Z)); CUDA_CHECK_RETURN(cudaFree(cu_source_I)); CUDA_CHECK_RETURN(cudaFree(cu_source_lambda)); CUDA_CHECK_RETURN(cudaFree(cu_FhklParams)); CUDA_CHECK_RETURN(cudaFree(cu_mosaic_umats)); CUDA_CHECK_RETURN(cudaFree(cu_floatimage)); CUDA_CHECK_RETURN(cudaFree(cu_omega_reduction)); CUDA_CHECK_RETURN(cudaFree(cu_max_I_x_reduction)); CUDA_CHECK_RETURN(cudaFree(cu_max_I_y_reduction)); CUDA_CHECK_RETURN(cudaFree(cu_maskimage)); CUDA_CHECK_RETURN(cudaFree(cu_rangemap)); CUDA_CHECK_RETURN(cudaFree(cu_Fhkl)); *max_I = 0; *max_I_x = 0; *max_I_y = 0; *sum = 0.0; *sumsqr = 0.0; *sumn = 0; *omega_sum = 0.0; for (int i = 0; i < total_pixels; i++) { if (!rangemap[i]) { continue; } float pixel = floatimage[i]; if (pixel > (double) *max_I) { *max_I = pixel; *max_I_x = max_I_x_reduction[i]; *max_I_y = max_I_y_reduction[i]; } *sum += pixel; *sumsqr += pixel * pixel; ++(*sumn); *omega_sum += omega_reduction[i]; } free(rangemap); free(omega_reduction); free(max_I_x_reduction); free(max_I_y_reduction); } /* cubic spline interpolation functions */ __device__ static void polin2(CUDAREAL *x1a, CUDAREAL *x2a, CUDAREAL ya[4][4], CUDAREAL x1, CUDAREAL x2, CUDAREAL *y); __device__ static void polin3(CUDAREAL *x1a, CUDAREAL *x2a, CUDAREAL *x3a, CUDAREAL ya[4][4][4], CUDAREAL x1, CUDAREAL x2, CUDAREAL x3, CUDAREAL *y); /* rotate a 3-vector about a unit vector axis */ __device__ static CUDAREAL *rotate_axis(const CUDAREAL * __restrict__ v, CUDAREAL *newv, const CUDAREAL * __restrict__ axis, const CUDAREAL phi); /* scale the magnitude of a vector */ __device__ static CUDAREAL vector_scale(CUDAREAL *vector, CUDAREAL *new_vector, CUDAREAL scale); /* Fourier transform of a truncated lattice */ __device__ static CUDAREAL sincg(CUDAREAL x, CUDAREAL N); //__device__ static CUDAREAL sincgrad(CUDAREAL x, CUDAREAL N); /* Fourier transform of a sphere */ __device__ static CUDAREAL sinc3(CUDAREAL x); __device__ __inline__ static int flatten3dindex(int x, int y, int z, int x_range, int y_range, int z_range); __device__ __inline__ CUDAREAL quickFcell_ldg(int hkls, int h_max, int h_min, int k_max, int k_min, int l_min, int l_max, int h0, int k0, int l0, int h_range, int k_range, int l_range, CUDAREAL defaultF, const CUDAREAL * __restrict__ Fhkl); __global__ void nanoBraggSpotsInitCUDAKernel(int spixels, int fpixels, float * floatimage, float * omega_reduction, float * max_I_x_reduction, float * max_I_y_reduction, bool * rangemap) { const int total_pixels = spixels * fpixels; const int fstride = gridDim.x * blockDim.x; const int sstride = gridDim.y * blockDim.y; const int stride = fstride * sstride; for (int pixIdx = (blockDim.y * blockIdx.y + threadIdx.y) * fstride + blockDim.x * blockIdx.x + threadIdx.x; pixIdx < total_pixels; pixIdx += stride) { const int fpixel = pixIdx % fpixels; const int spixel = pixIdx / fpixels; /* position in pixel array */ int j = spixel * fpixels + fpixel; if (j < total_pixels) { floatimage[j] = 0; omega_reduction[j] = 0; max_I_x_reduction[j] = 0; max_I_y_reduction[j] = 0; rangemap[j] = false; } } } __global__ void nanoBraggSpotsCUDAKernel(int spixels, int fpixels, int roi_xmin, int roi_xmax, int roi_ymin, int roi_ymax, int oversample, int point_pixel, CUDAREAL pixel_size, CUDAREAL subpixel_size, int steps, CUDAREAL detector_thickstep, int detector_thicksteps, CUDAREAL detector_thick, CUDAREAL detector_mu, const CUDAREAL * __restrict__ sdet_vector, const CUDAREAL * __restrict__ fdet_vector, const CUDAREAL * __restrict__ odet_vector, const CUDAREAL * __restrict__ pix0_vector, int curved_detector, CUDAREAL distance, CUDAREAL close_distance, const CUDAREAL * __restrict__ beam_vector, CUDAREAL Xbeam, CUDAREAL Ybeam, CUDAREAL dmin, CUDAREAL phi0, CUDAREAL phistep, int phisteps, const CUDAREAL * __restrict__ spindle_vector, int sources, const CUDAREAL * __restrict__ source_X, const CUDAREAL * __restrict__ source_Y, const CUDAREAL * __restrict__ source_Z, const CUDAREAL * __restrict__ source_I, const CUDAREAL * __restrict__ source_lambda, const CUDAREAL * __restrict__ a0, const CUDAREAL * __restrict__ b0, const CUDAREAL * __restrict c0, shapetype xtal_shape, CUDAREAL mosaic_spread, int mosaic_domains, const CUDAREAL * __restrict__ mosaic_umats, CUDAREAL Na, CUDAREAL Nb, CUDAREAL Nc, CUDAREAL V_cell, CUDAREAL water_size, CUDAREAL water_F, CUDAREAL water_MW, CUDAREAL r_e_sqr, CUDAREAL fluence, CUDAREAL Avogadro, CUDAREAL spot_scale, int integral_form, CUDAREAL default_F, int interpolate, const CUDAREAL * __restrict__ Fhkl, const hklParams * __restrict__ FhklParams, int nopolar, const CUDAREAL * __restrict__ polar_vector, CUDAREAL polarization, CUDAREAL fudge, const int unsigned short * __restrict__ maskimage, float * floatimage /*out*/, float * omega_reduction/*out*/, float * max_I_x_reduction/*out*/, float * max_I_y_reduction /*out*/, bool * rangemap) { __shared__ CUDAREAL s_dmin; __shared__ bool s_nopolar; __shared__ int s_phisteps; __shared__ CUDAREAL s_phi0, s_phistep; __shared__ int s_mosaic_domains; __shared__ CUDAREAL s_mosaic_spread; __shared__ shapetype s_xtal_shape; __shared__ CUDAREAL s_Na, s_Nb, s_Nc; __shared__ bool s_interpolate; __shared__ int s_hkls, s_h_max, s_h_min, s_k_max, s_k_min, s_l_max, s_l_min, s_h_range, s_k_range, s_l_range; if (threadIdx.x == 0 && threadIdx.y == 0) { s_dmin = dmin; s_nopolar = nopolar; s_phisteps = phisteps; s_phi0 = phi0; s_phistep = phistep; s_mosaic_domains = mosaic_domains; s_mosaic_spread = mosaic_spread; s_xtal_shape = xtal_shape; s_Na = Na; s_Nb = Nb; s_Nc = Nc; s_interpolate = interpolate; s_hkls = FhklParams->hkls; s_h_max = FhklParams->h_max; s_h_min = FhklParams->h_min; s_k_max = FhklParams->k_max; s_k_min = FhklParams->k_min; s_l_max = FhklParams->l_max; s_l_min = FhklParams->l_min; s_h_range = FhklParams->h_range; s_k_range = FhklParams->k_range; s_l_range = FhklParams->l_range; } __syncthreads(); const int total_pixels = spixels * fpixels; const int fstride = gridDim.x * blockDim.x; const int sstride = gridDim.y * blockDim.y; const int stride = fstride * sstride; // const int tidx = blockDim.x * threadIdx.y * +threadIdx.x; // __shared__ int sharedVectors[THREADS_PER_BLOCK_TOTAL + 1][1][9]; // __shared__ CUDAREAL sharedVectors[THREADS_PER_BLOCK_TOTAL + 1][1][VECTOR_SIZE]; // CUDAREAL * tmpVector1 = sharedVectors[tidx][0]; // CUDAREAL * tmpVector2 = sharedVectors[tidx][1]; /* add background from something amorphous */ CUDAREAL F_bg = water_F; CUDAREAL I_bg = F_bg * F_bg * r_e_sqr * fluence * water_size * water_size * water_size * 1e6 * Avogadro / water_MW; // hklParams[0] = h_min; // hklParams[1] = h_max; // hklParams[2] = h_range; // hklParams[3] = k_min; // hklParams[4] = k_max; // hklParams[5] = k_range; // hklParams[6] = l_min; // hklParams[7] = l_max; // hklParams[8] = l_range; for (int pixIdx = (blockDim.y * blockIdx.y + threadIdx.y) * fstride + blockDim.x * blockIdx.x + threadIdx.x; pixIdx < total_pixels; pixIdx += stride) { const int fpixel = pixIdx % fpixels; const int spixel = pixIdx / fpixels; /* allow for just one part of detector to be rendered */ if (fpixel < roi_xmin || fpixel > roi_xmax || spixel < roi_ymin || spixel > roi_ymax) { //ROI region of interest continue; } /* position in pixel array */ const int j = pixIdx; /* allow for the use of a mask */ if (maskimage != NULL) { /* skip any flagged pixels in the mask */ if (maskimage[j] == 0) { continue; } } /* reset photon count for this pixel */ CUDAREAL I = I_bg; CUDAREAL omega_sub_reduction = 0.0; CUDAREAL max_I_x_sub_reduction = 0.0; CUDAREAL max_I_y_sub_reduction = 0.0; CUDAREAL polar = 0.0; if (s_nopolar) { polar = 1.0; } /* add this now to avoid problems with skipping later */ // move this to the bottom to avoid accessing global device memory. floatimage[j] = I_bg; /* loop over sub-pixels */ int subS, subF; for (subS = 0; subS < oversample; ++subS) { // Y voxel for (subF = 0; subF < oversample; ++subF) { // X voxel /* absolute mm position on detector (relative to its origin) */ CUDAREAL Fdet = subpixel_size * (fpixel * oversample + subF) + subpixel_size / 2.0; // X voxel CUDAREAL Sdet = subpixel_size * (spixel * oversample + subS) + subpixel_size / 2.0; // Y voxel // Fdet = pixel_size*fpixel; // Sdet = pixel_size*spixel; max_I_x_sub_reduction = Fdet; max_I_y_sub_reduction = Sdet; int thick_tic; for (thick_tic = 0; thick_tic < detector_thicksteps; ++thick_tic) { /* assume "distance" is to the front of the detector sensor layer */ CUDAREAL Odet = thick_tic * detector_thickstep; // Z Orthagonal voxel. /* construct detector subpixel position in 3D space */ // pixel_X = distance; // pixel_Y = Sdet-Ybeam; // pixel_Z = Fdet-Xbeam; //CUDAREAL * pixel_pos = tmpVector1; CUDAREAL pixel_pos[4]; pixel_pos[1] = Fdet * __ldg(&fdet_vector[1]) + Sdet * __ldg(&sdet_vector[1]) + Odet * __ldg(&odet_vector[1]) + __ldg(&pix0_vector[1]); // X pixel_pos[2] = Fdet * __ldg(&fdet_vector[2]) + Sdet * __ldg(&sdet_vector[2]) + Odet * __ldg(&odet_vector[2]) + __ldg(&pix0_vector[2]); // X pixel_pos[3] = Fdet * __ldg(&fdet_vector[3]) + Sdet * __ldg(&sdet_vector[3]) + Odet * __ldg(&odet_vector[3]) + __ldg(&pix0_vector[3]); // X // pixel_pos[1] = Fdet * fdet_vector[1] + Sdet * sdet_vector[1] + Odet * odet_vector[1] + pix0_vector[1]; // X // pixel_pos[2] = Fdet * fdet_vector[2] + Sdet * sdet_vector[2] + Odet * odet_vector[2] + pix0_vector[2]; // Y // pixel_pos[3] = Fdet * fdet_vector[3] + Sdet * sdet_vector[3] + Odet * odet_vector[3] + pix0_vector[3]; // Z if (curved_detector) { /* construct detector pixel that is always "distance" from the sample */ CUDAREAL dbvector[4]; dbvector[1] = distance * beam_vector[1]; dbvector[2] = distance * beam_vector[2]; dbvector[3] = distance * beam_vector[3]; /* treat detector pixel coordinates as radians */ CUDAREAL newvector[] = { 0.0, 0.0, 0.0, 0.0 }; rotate_axis(dbvector, newvector, sdet_vector, pixel_pos[2] / distance); rotate_axis(newvector, pixel_pos, fdet_vector, pixel_pos[3] / distance); // rotate(vector,pixel_pos,0,pixel_pos[3]/distance,pixel_pos[2]/distance); } /* construct the diffracted-beam unit vector to this sub-pixel */ //CUDAREAL * diffracted = tmpVector2; CUDAREAL diffracted[4]; CUDAREAL airpath = unitize(pixel_pos, diffracted); /* solid angle subtended by a pixel: (pix/airpath)^2*cos(2theta) */ CUDAREAL omega_pixel = pixel_size * pixel_size / airpath / airpath * close_distance / airpath; /* option to turn off obliquity effect, inverse-square-law only */ if (point_pixel) { omega_pixel = 1.0 / airpath / airpath; } /* now calculate detector thickness effects */ CUDAREAL capture_fraction = 1.0; if (detector_thick > 0.0 && detector_mu> 0.0) { /* inverse of effective thickness increase */ CUDAREAL parallax = dot_product_ldg(odet_vector, diffracted); capture_fraction = exp(-thick_tic * detector_thickstep / detector_mu / parallax) - exp(-(thick_tic + 1) * detector_thickstep / detector_mu / parallax); } /* loop over sources now */ int source; for (source = 0; source < sources; ++source) { /* retrieve stuff from cache */ //CUDAREAL * incident = tmpVector1; CUDAREAL incident[4]; incident[1] = -__ldg(&source_X[source]); incident[2] = -__ldg(&source_Y[source]); incident[3] = -__ldg(&source_Z[source]); CUDAREAL lambda = __ldg(&source_lambda[source]); CUDAREAL source_fraction = __ldg(&source_I[source]); /* construct the incident beam unit vector while recovering source distance */ // TODO[Giles]: Optimization! We can unitize the source vectors before passing them in. unitize(incident, incident); // CUDAREAL source_path = unitize(incident, incident); // CUDAREAL source_path = norm3d(incident[1], incident[2], incident[3]); // CUDAREAL * d = tmpVector2; // d[0] = diffracted[0]; // d[1] = diffracted[1]; // d[2] = diffracted[2]; // d[3] = diffracted[3]; /* construct the scattering vector for this pixel */ // CUDAREAL * scattering = tmpVector1; CUDAREAL scattering[4]; scattering[1] = (diffracted[1] - incident[1]) / lambda; scattering[2] = (diffracted[2] - incident[2]) / lambda; scattering[3] = (diffracted[3] - incident[3]) / lambda; // CUDAREAL scattering[] = { 0.0, (diffracted[1] - incident[1]) / lambda, (diffracted[2] - incident[2]) / lambda, (diffracted[3] // - incident[3]) / lambda }; /* sin(theta)/lambda is half the scattering vector length */ // magnitude(scattering); // CUDAREAL stol = 0.5 * scattering[0]; CUDAREAL stol = 0.5 * norm3d(scattering[1], scattering[2], scattering[3]); /* rough cut to speed things up when we aren't using whole detector */ if (s_dmin > 0.0 && stol > 0.0) { if (s_dmin > 0.5 / stol) { continue; } } /* polarization factor */ if (!s_nopolar) { /* need to compute polarization factor */ polar = polarization_factor(polarization, incident, diffracted, polar_vector); } else { polar = 1.0; } /* sweep over phi angles */ for (int phi_tic = 0; phi_tic < s_phisteps; ++phi_tic) { CUDAREAL phi = s_phistep * phi_tic + s_phi0; // CUDAREAL ap[] = { 0.0, 0.0, 0.0, 0.0 }; // CUDAREAL bp[] = { 0.0, 0.0, 0.0, 0.0 }; // CUDAREAL cp[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL ap[4]; CUDAREAL bp[4]; CUDAREAL cp[4]; /* rotate about spindle if necessary */ rotate_axis_ldg(a0, ap, spindle_vector, phi); rotate_axis_ldg(b0, bp, spindle_vector, phi); rotate_axis_ldg(c0, cp, spindle_vector, phi); /* enumerate mosaic domains */ for (int mos_tic = 0; mos_tic < s_mosaic_domains; ++mos_tic) { /* apply mosaic rotation after phi rotation */ CUDAREAL a[4]; CUDAREAL b[4]; CUDAREAL c[4]; if (s_mosaic_spread > 0.0) { rotate_umat_ldg(ap, a, &mosaic_umats[mos_tic * 9]); rotate_umat_ldg(bp, b, &mosaic_umats[mos_tic * 9]); rotate_umat_ldg(cp, c, &mosaic_umats[mos_tic * 9]); } else { a[1] = ap[1]; a[2] = ap[2]; a[3] = ap[3]; b[1] = bp[1]; b[2] = bp[2]; b[3] = bp[3]; c[1] = cp[1]; c[2] = cp[2]; c[3] = cp[3]; } // printf("%d %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9+0],mosaic_umats[mos_tic*9+1],mosaic_umats[mos_tic*9+2]); // printf("%d %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9+3],mosaic_umats[mos_tic*9+4],mosaic_umats[mos_tic*9+5]); // printf("%d %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9+6],mosaic_umats[mos_tic*9+7],mosaic_umats[mos_tic*9+8]); /* construct fractional Miller indicies */ // CUDAREAL * scat_s = tmpVector2; // scat_s[0] = scattering[0]; // scat_s[1] = scattering[1]; // scat_s[2] = scattering[2]; // scat_s[3] = scattering[3]; // // CUDAREAL h = dot_product(a, scat_s); // CUDAREAL k = dot_product(b, scat_s); // CUDAREAL l = dot_product(c, scat_s); CUDAREAL h = dot_product(a, scattering); CUDAREAL k = dot_product(b, scattering); CUDAREAL l = dot_product(c, scattering); /* round off to nearest whole index */ int h0 = ceil(h - 0.5); int k0 = ceil(k - 0.5); int l0 = ceil(l - 0.5); /* structure factor of the lattice (paralelpiped crystal) F_latt = sin(M_PI*Na*h)*sin(M_PI*Nb*k)*sin(M_PI*Nc*l)/sin(M_PI*h)/sin(M_PI*k)/sin(M_PI*l); */ CUDAREAL F_latt = 1.0; // Shape transform for the crystal. CUDAREAL hrad_sqr = 0.0; if (s_xtal_shape == SQUARE) { /* xtal is a paralelpiped */ if (Na > 1) { // F_latt *= sincgrad(h, s_Na); F_latt *= sincg(M_PI * h, s_Na); } if (Nb > 1) { // F_latt *= sincgrad(k, s_Nb); F_latt *= sincg(M_PI * k, s_Nb); } if (Nc > 1) { // F_latt *= sincgrad(l, s_Nc); F_latt *= sincg(M_PI * l, s_Nc); } } else { /* handy radius in reciprocal space, squared */ hrad_sqr = (h - h0) * (h - h0) * Na * Na + (k - k0) * (k - k0) * Nb * Nb + (l - l0) * (l - l0) * Nc * Nc; } if (s_xtal_shape == ROUND) { /* use sinc3 for elliptical xtal shape, correcting for sqrt of volume ratio between cube and sphere */ F_latt = Na * Nb * Nc * 0.723601254558268 * sinc3(M_PI * sqrt(hrad_sqr * fudge)); } if (s_xtal_shape == GAUSS) { /* fudge the radius so that volume and FWHM are similar to square_xtal spots */ F_latt = Na * Nb * Nc * exp(-(hrad_sqr / 0.63 * fudge)); } if (s_xtal_shape == GAUSS_ARGCHK) { /* fudge the radius so that volume and FWHM are similar to square_xtal spots */ double my_arg = hrad_sqr / 0.63 * fudge; if (my_arg<35.){ F_latt = Na * Nb * Nc * exp(-(my_arg)); } else { F_latt = 0.; } // warps coalesce when blocks of 32 pixels have no Bragg signal } if (s_xtal_shape == TOPHAT) { /* make a flat-top spot of same height and volume as square_xtal spots */ F_latt = Na * Nb * Nc * (hrad_sqr * fudge < 0.3969); } /* no need to go further if result will be zero? */ if (F_latt == 0.0 && water_size == 0.0) continue; /* find nearest point on Ewald sphere surface? */ if (integral_form) { /* need to calculate reciprocal matrix */ /* various cross products */ CUDAREAL a_cross_b[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL b_cross_c[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL c_cross_a[] = { 0.0, 0.0, 0.0, 0.0 }; cross_product(a, b, a_cross_b); cross_product(b, c, b_cross_c); cross_product(c, a, c_cross_a); /* new reciprocal-space cell vectors */ CUDAREAL a_star[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL b_star[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL c_star[] = { 0.0, 0.0, 0.0, 0.0 }; vector_scale(b_cross_c, a_star, 1e20 / V_cell); vector_scale(c_cross_a, b_star, 1e20 / V_cell); vector_scale(a_cross_b, c_star, 1e20 / V_cell); /* reciprocal-space coordinates of nearest relp */ CUDAREAL relp[] = { 0.0, 0.0, 0.0, 0.0 }; relp[1] = h0 * a_star[1] + k0 * b_star[1] + l0 * c_star[1]; relp[2] = h0 * a_star[2] + k0 * b_star[2] + l0 * c_star[2]; relp[3] = h0 * a_star[3] + k0 * b_star[3] + l0 * c_star[3]; // d_star = magnitude(relp) /* reciprocal-space coordinates of center of Ewald sphere */ CUDAREAL Ewald0[] = { 0.0, 0.0, 0.0, 0.0 }; Ewald0[1] = -incident[1] / lambda / 1e10; Ewald0[2] = -incident[2] / lambda / 1e10; Ewald0[3] = -incident[3] / lambda / 1e10; // 1/lambda = magnitude(Ewald0) /* distance from Ewald sphere in lambda=1 units */ CUDAREAL dEwald0[] = { 0.0, 0.0, 0.0, 0.0 }; dEwald0[1] = relp[1] - Ewald0[1]; dEwald0[2] = relp[2] - Ewald0[2]; dEwald0[3] = relp[3] - Ewald0[3]; magnitude(dEwald0); CUDAREAL d_r = dEwald0[0] - 1.0; /* unit vector of diffracted ray through relp */ CUDAREAL diffracted0[] = { 0.0, 0.0, 0.0, 0.0 }; unitize(dEwald0, diffracted0); /* intersection with detector plane */ CUDAREAL xd = dot_product_ldg(fdet_vector, diffracted0); CUDAREAL yd = dot_product_ldg(sdet_vector, diffracted0); CUDAREAL zd = dot_product_ldg(odet_vector, diffracted0); /* where does the central direct-beam hit */ CUDAREAL xd0 = dot_product_ldg(fdet_vector, incident); CUDAREAL yd0 = dot_product_ldg(sdet_vector, incident); CUDAREAL zd0 = dot_product_ldg(odet_vector, incident); /* convert to mm coordinates */ CUDAREAL Fdet0 = distance * (xd / zd) + Xbeam; CUDAREAL Sdet0 = distance * (yd / zd) + Ybeam; //printf("GOTHERE %g %g %g %g\n",Fdet,Sdet,Fdet0,Sdet0); CUDAREAL test = exp(-((Fdet - Fdet0) * (Fdet - Fdet0) + (Sdet - Sdet0) * (Sdet - Sdet0) + d_r * d_r) / 1e-8); } // end of integral form /* structure factor of the unit cell */ CUDAREAL F_cell = default_F; if (s_interpolate) { int h0_flr = floor(h); int k0_flr = floor(k); int l0_flr = floor(l); if (((h - s_h_min + 3) > s_h_range) || (h - 2 < s_h_min) || ((k - s_k_min + 3) > s_k_range) || (k - 2 < s_k_min) || ((l - s_l_min + 3) > s_l_range) || (l - 2 < s_l_min)) { // if (babble) { // babble = 0; // printf("WARNING: out of range for three point interpolation: h,k,l,h0,k0,l0: %g,%g,%g,%d,%d,%d \n", h, k, l, h0, // k0, l0); // printf("WARNING: further warnings will not be printed! "); // } F_cell = quickFcell_ldg(s_hkls, s_h_max, s_h_min, s_k_max, s_k_min, s_l_max, s_l_min, h0, k0, l0, s_h_range, s_k_range, s_l_range, default_F, Fhkl); } else { /* integer versions of nearest HKL indicies */ int h_interp[] = { 0, 0, 0, 0 }; int k_interp[] = { 0, 0, 0, 0 }; int l_interp[] = { 0, 0, 0, 0 }; h_interp[0] = h0_flr - 1; h_interp[1] = h0_flr; h_interp[2] = h0_flr + 1; h_interp[3] = h0_flr + 2; k_interp[0] = k0_flr - 1; k_interp[1] = k0_flr; k_interp[2] = k0_flr + 1; k_interp[3] = k0_flr + 2; l_interp[0] = l0_flr - 1; l_interp[1] = l0_flr; l_interp[2] = l0_flr + 1; l_interp[3] = l0_flr + 2; /* polin function needs doubles */ CUDAREAL h_interp_d[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL k_interp_d[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL l_interp_d[] = { 0.0, 0.0, 0.0, 0.0 }; h_interp_d[0] = (CUDAREAL) h_interp[0]; h_interp_d[1] = (CUDAREAL) h_interp[1]; h_interp_d[2] = (CUDAREAL) h_interp[2]; h_interp_d[3] = (CUDAREAL) h_interp[3]; k_interp_d[0] = (CUDAREAL) k_interp[0]; k_interp_d[1] = (CUDAREAL) k_interp[1]; k_interp_d[2] = (CUDAREAL) k_interp[2]; k_interp_d[3] = (CUDAREAL) k_interp[3]; l_interp_d[0] = (CUDAREAL) l_interp[0]; l_interp_d[1] = (CUDAREAL) l_interp[1]; l_interp_d[2] = (CUDAREAL) l_interp[2]; l_interp_d[3] = (CUDAREAL) l_interp[3]; /* now populate the "y" values (nearest four structure factors in each direction) */ CUDAREAL sub_Fhkl[4][4][4]; int i1, i2, i3; for (i1 = 0; i1 < 4; i1++) { for (i2 = 0; i2 < 4; i2++) { for (i3 = 0; i3 < 4; i3++) { sub_Fhkl[i1][i2][i3] = __ldg( &Fhkl[flatten3dindex(h_interp[i1] - s_h_min, k_interp[i2] - s_k_min, l_interp[i3] - s_l_min, s_h_range, s_k_range, s_l_range)]); } } } /* run the tricubic polynomial interpolation */ polin3(h_interp_d, k_interp_d, l_interp_d, sub_Fhkl, h, k, l, &F_cell); } } else { // if (!interpolate) { // if (hkls && (h0 <= hklParams[1]) && (h0 >= hklParams[0]) && (k0 <= hklParams[4]) && (k0 >= hklParams[3]) && (l0 <= hklParams[7]) && (l0 >= hklParams[6])) { // /* just take nearest-neighbor */ // F_cell = __ldg(&Fhkl[flatten3dindex(h0 - hklParams[0], k0 - hklParams[3], l0 - hklParams[6], hklParams[2], hklParams[5], hklParams[8])]); // } else { // F_cell = default_F; // usually zero // } // } F_cell = quickFcell_ldg(s_hkls, s_h_max, s_h_min, s_k_max, s_k_min, s_l_max, s_l_min, h0, k0, l0, s_h_range, s_k_range, s_l_range, default_F, Fhkl); // if (s_hkls && (h0 <= s_h_max) && (h0 >= s_h_min) && (k0 <= s_k_max) && (k0 >= s_k_min) && (l0 <= s_l_max) && (l0 >= s_l_min)) { // /* just take nearest-neighbor */ // F_cell = __ldg(&Fhkl[flatten3dindex(h0 - s_h_min, k0 - s_k_min, l0 - s_l_min, s_h_range, s_k_range, s_l_range)]); //// F_cell = __ldg(&Fhkl[flatten3dindex(h0 - __ldg(&FhklParams->h_min), k0 - __ldg(&FhklParams->k_min), l0 - __ldg(&FhklParams->l_min), s_h_range, s_k_range, s_l_range)]); //// F_cell = __ldg(&Fhkl[flatten3dindex(h0 - FhklParams->h_min, k0 - FhklParams->k_min, l0 - FhklParams->l_min, FhklParams->h_range, FhklParams->k_range, FhklParams->l_range)]); // } } /* now we have the structure factor for this pixel */ /* convert amplitudes into intensity (photons per steradian) */ I += F_cell * F_cell * F_latt * F_latt * source_fraction * capture_fraction * omega_pixel; omega_sub_reduction += omega_pixel; } /* end of mosaic loop */ } /* end of phi loop */ } /* end of source loop */ } /* end of detector thickness loop */ } /* end of sub-pixel y loop */ } /* end of sub-pixel x loop */ const double photons = I_bg + (r_e_sqr * spot_scale * fluence * polar * I) / steps; floatimage[j] = photons; omega_reduction[j] = omega_sub_reduction; // shared contention max_I_x_reduction[j] = max_I_x_sub_reduction; max_I_y_reduction[j] = max_I_y_sub_reduction; rangemap[j] = true; } } __device__ __inline__ CUDAREAL quickFcell_ldg(int hkls, int h_max, int h_min, int k_max, int k_min, int l_max, int l_min, int h0, int k0, int l0, int h_range, int k_range, int l_range, CUDAREAL defaultF, const CUDAREAL * __restrict__ Fhkl) { if (hkls && (h0 <= h_max) && (h0 >= h_min) && (k0 <= k_max) && (k0 >= k_min) && (l0 <= l_max) && (l0 >= l_min)) { /* just take nearest-neighbor */ // F_cell = __ldg(&Fhkl[flatten3dindex(h0 - s_h_min, k0 - s_k_min, l0 - s_l_min, s_h_range, s_k_range, s_l_range)]); return __ldg(&Fhkl[flatten3dindex(h0 - h_min, k0 - k_min, l0 - l_min, h_range, k_range, l_range)]); } else { return defaultF; // usually zero } } __device__ __inline__ int flatten3dindex(int x, int y, int z, int x_range, int y_range, int z_range) { return x * y_range * z_range + y * z_range + z; } /* rotate a point about a unit vector axis */ __device__ CUDAREAL *rotate_axis(const CUDAREAL * __restrict__ v, CUDAREAL * newv, const CUDAREAL * __restrict__ axis, const CUDAREAL phi) { const CUDAREAL sinphi = sin(phi); const CUDAREAL cosphi = cos(phi); const CUDAREAL a1 = axis[1]; const CUDAREAL a2 = axis[2]; const CUDAREAL a3 = axis[3]; const CUDAREAL v1 = v[1]; const CUDAREAL v2 = v[2]; const CUDAREAL v3 = v[3]; const CUDAREAL dot = (a1 * v1 + a2 * v2 + a3 * v3) * (1.0 - cosphi); newv[1] = a1 * dot + v1 * cosphi + (-a3 * v2 + a2 * v3) * sinphi; newv[2] = a2 * dot + v2 * cosphi + (+a3 * v1 - a1 * v3) * sinphi; newv[3] = a3 * dot + v3 * cosphi + (-a2 * v1 + a1 * v2) * sinphi; return newv; } /* scale magnitude of provided vector */ __device__ CUDAREAL vector_scale(CUDAREAL *vector, CUDAREAL *new_vector, CUDAREAL scale) { new_vector[1] = scale * vector[1]; new_vector[2] = scale * vector[2]; new_vector[3] = scale * vector[3]; magnitude(new_vector); return new_vector[0]; } /* Fourier transform of a grating */ __device__ CUDAREAL sincg(CUDAREAL x, CUDAREAL N) { if (x != 0.0) return sin(x * N) / sin(x); return N; } __device__ CUDAREAL sincgrad(CUDAREAL x, CUDAREAL N) { if (x != 0.0) return sinpi(x * N) / sinpi(x); return N; } /* Fourier transform of a sphere */ __device__ CUDAREAL sinc3(CUDAREAL x) { if (x != 0.0) return 3.0 * (sin(x) / x - cos(x)) / (x * x); return 1.0; } __device__ void polin2(CUDAREAL *x1a, CUDAREAL *x2a, CUDAREAL ya[4][4], CUDAREAL x1, CUDAREAL x2, CUDAREAL *y) { int j; CUDAREAL ymtmp[4]; for (j = 1; j <= 4; j++) { polint(x2a, ya[j - 1], x2, &ymtmp[j - 1]); } polint(x1a, ymtmp, x1, y); } __device__ void polin3(CUDAREAL *x1a, CUDAREAL *x2a, CUDAREAL *x3a, CUDAREAL ya[4][4][4], CUDAREAL x1, CUDAREAL x2, CUDAREAL x3, CUDAREAL *y) { int j; CUDAREAL ymtmp[4]; for (j = 1; j <= 4; j++) { polin2(x2a, x3a, &ya[j - 1][0], x2, x3, &ymtmp[j - 1]); } polint(x1a, ymtmp, x1, y); }
the_stack
#include <iomanip> #include "caffe/FRCNN/frcnn_proposal_layer.hpp" #include "caffe/FRCNN/util/frcnn_utils.hpp" #include "caffe/FRCNN/util/frcnn_helper.hpp" #include "caffe/FRCNN/util/frcnn_param.hpp" #include "caffe/FRCNN/util/frcnn_gpu_nms.hpp" namespace caffe { namespace Frcnn { using std::vector; __global__ void GetIndex(const int n,int *indices){ CUDA_KERNEL_LOOP(index , n){ indices[index] = index; } } template <typename Dtype> __global__ void BBoxTransformInv(const int nthreads, const Dtype* const bottom_rpn_bbox, const int height, const int width, const int feat_stride, const int im_height, const int im_width, const int* sorted_indices, const float* anchors, float* const transform_bbox) { CUDA_KERNEL_LOOP(index , nthreads) { const int score_idx = sorted_indices[index]; const int i = score_idx % width; // width const int j = (score_idx % (width * height)) / width; // height const int k = score_idx / (width * height); // channel float *box = transform_bbox + index * 4; box[0] = anchors[k * 4 + 0] + i * feat_stride; box[1] = anchors[k * 4 + 1] + j * feat_stride; box[2] = anchors[k * 4 + 2] + i * feat_stride; box[3] = anchors[k * 4 + 3] + j * feat_stride; const Dtype det[4] = { bottom_rpn_bbox[(k * 4 + 0) * height * width + j * width + i], bottom_rpn_bbox[(k * 4 + 1) * height * width + j * width + i], bottom_rpn_bbox[(k * 4 + 2) * height * width + j * width + i], bottom_rpn_bbox[(k * 4 + 3) * height * width + j * width + i] }; float src_w = box[2] - box[0] + 1; float src_h = box[3] - box[1] + 1; float src_ctr_x = box[0] + 0.5 * src_w; float src_ctr_y = box[1] + 0.5 * src_h; float pred_ctr_x = det[0] * src_w + src_ctr_x; float pred_ctr_y = det[1] * src_h + src_ctr_y; float pred_w = exp(det[2]) * src_w; float pred_h = exp(det[3]) * src_h; box[0] = pred_ctr_x - 0.5 * pred_w; box[1] = pred_ctr_y - 0.5 * pred_h; box[2] = pred_ctr_x + 0.5 * pred_w; box[3] = pred_ctr_y + 0.5 * pred_h; box[0] = max(0.0f, min(box[0], im_width - 1.0)); box[1] = max(0.0f, min(box[1], im_height - 1.0)); box[2] = max(0.0f, min(box[2], im_width - 1.0)); box[3] = max(0.0f, min(box[3], im_height - 1.0)); } } __global__ void SelectBox(const int nthreads, const float *box, float min_size, int *flags) { CUDA_KERNEL_LOOP(index , nthreads) { if ((box[index * 4 + 2] - box[index * 4 + 0] < min_size) || (box[index * 4 + 3] - box[index * 4 + 1] < min_size)) { flags[index] = 0; } else { flags[index] = 1; } } } template <typename Dtype> __global__ void SelectBoxByIndices(const int nthreads, const float *in_box, int *selected_indices, float *out_box, const Dtype *in_score, Dtype *out_score) { CUDA_KERNEL_LOOP(index , nthreads) { if ((index == 0 && selected_indices[index] == 1) || (index > 0 && selected_indices[index] == selected_indices[index - 1] + 1)) { out_box[(selected_indices[index] - 1) * 4 + 0] = in_box[index * 4 + 0]; out_box[(selected_indices[index] - 1) * 4 + 1] = in_box[index * 4 + 1]; out_box[(selected_indices[index] - 1) * 4 + 2] = in_box[index * 4 + 2]; out_box[(selected_indices[index] - 1) * 4 + 3] = in_box[index * 4 + 3]; if (in_score!=NULL && out_score!=NULL) { out_score[selected_indices[index] - 1] = in_score[index]; } } } } template <typename Dtype> __global__ void SelectBoxAftNMS(const int nthreads, const float *in_box, int *keep_indices, Dtype *top_data, const Dtype *in_score, Dtype* top_score) { CUDA_KERNEL_LOOP(index , nthreads) { top_data[index * 5] = 0; int keep_idx = keep_indices[index]; for (int j = 1; j < 5; ++j) { top_data[index * 5 + j] = in_box[keep_idx * 4 + j - 1]; } if (top_score != NULL && in_score != NULL) { top_score[index] = in_score[keep_idx]; } } } template <typename Dtype> void FrcnnProposalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { // if(this->phase_ == TEST) { // this->use_gpu_nms_in_forward_cpu = true; // set flag to be used in forward_cpu Forward_cpu(bottom, top); // }else{ { #if 0 DLOG(ERROR) << "========== enter proposal layer"; const Dtype *bottom_rpn_score = bottom[0]->gpu_data(); const Dtype *bottom_rpn_bbox = bottom[1]->gpu_data(); // bottom data comes from host memory Dtype bottom_im_info[3]; CHECK_EQ(bottom[2]->count(), 3); CUDA_CHECK(cudaMemcpy(bottom_im_info, bottom[2]->gpu_data(), sizeof(Dtype) * 3, cudaMemcpyDeviceToHost)); const int num = bottom[1]->num(); const int channes = bottom[1]->channels(); const int height = bottom[1]->height(); const int width = bottom[1]->width(); CHECK(num == 1) << "only single item batches are supported"; CHECK(channes % 4 == 0) << "rpn bbox pred channels should be divided by 4"; const float im_height = bottom_im_info[0]; const float im_width = bottom_im_info[1]; int rpn_pre_nms_top_n; int rpn_post_nms_top_n; float rpn_nms_thresh; int rpn_min_size; if (this->phase_ == TRAIN) { rpn_pre_nms_top_n = FrcnnParam::rpn_pre_nms_top_n; rpn_post_nms_top_n = FrcnnParam::rpn_post_nms_top_n; rpn_nms_thresh = FrcnnParam::rpn_nms_thresh; rpn_min_size = FrcnnParam::rpn_min_size; } else { rpn_pre_nms_top_n = FrcnnParam::test_rpn_pre_nms_top_n; rpn_post_nms_top_n = FrcnnParam::test_rpn_post_nms_top_n; rpn_nms_thresh = FrcnnParam::test_rpn_nms_thresh; rpn_min_size = FrcnnParam::test_rpn_min_size; } LOG_IF(ERROR, rpn_pre_nms_top_n <= 0 ) << "rpn_pre_nms_top_n : " << rpn_pre_nms_top_n; LOG_IF(ERROR, rpn_post_nms_top_n <= 0 ) << "rpn_post_nms_top_n : " << rpn_post_nms_top_n; if (rpn_pre_nms_top_n <= 0 || rpn_post_nms_top_n <= 0 ) return; const int config_n_anchors = FrcnnParam::anchors.size() / 4; const int total_anchor_num = config_n_anchors * height * width; //Step 1. -------------------------------Sort the rpn result---------------------- // the first half of rpn_score is the bg score // Note that the sorting operator will change the order fg_scores (bottom_rpn_score) Dtype *fg_scores = (Dtype*)(&bottom_rpn_score[total_anchor_num]); Dtype *sorted_scores = NULL; CUDA_CHECK(cudaMalloc((void**)&sorted_scores, sizeof(Dtype) * total_anchor_num)); cub::DoubleBuffer<Dtype> d_keys(fg_scores, sorted_scores); int *indices = NULL; CUDA_CHECK(cudaMalloc((void**)&indices, sizeof(int) * total_anchor_num)); GetIndex<<<caffe::CAFFE_GET_BLOCKS(total_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS>>>( total_anchor_num, indices); cudaDeviceSynchronize(); int *sorted_indices = NULL; CUDA_CHECK(cudaMalloc((void**)&sorted_indices, sizeof(int) * total_anchor_num)); cub::DoubleBuffer<int> d_values(indices, sorted_indices); void *sort_temp_storage_ = NULL; size_t sort_temp_storage_bytes_ = 0; // calculate the temp_storage_bytes cub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_, d_keys, d_values, total_anchor_num); DLOG(ERROR) << "sort_temp_storage_bytes_ : " << sort_temp_storage_bytes_; CUDA_CHECK(cudaMalloc(&sort_temp_storage_, sort_temp_storage_bytes_)); // sorting cub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_, d_keys, d_values, total_anchor_num); cudaDeviceSynchronize(); //Step 2. ---------------------------bbox transform---------------------------- const int retained_anchor_num = std::min(total_anchor_num, rpn_pre_nms_top_n); // float *transform_bbox = NULL; // CUDA_CHECK(cudaMalloc(&transform_bbox, sizeof(float) * retained_anchor_num * 4)); BBoxTransformInv<Dtype><<<caffe::CAFFE_GET_BLOCKS(retained_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS>>>( retained_anchor_num, bottom_rpn_bbox, height, width, FrcnnParam::feat_stride, im_height, im_width, sorted_indices, anchors_, transform_bbox_); cudaDeviceSynchronize(); //Step 3. -------------------------filter out small box----------------------- // select the box larger than min size // int *selected_flags = NULL; // CUDA_CHECK(cudaMalloc(&selected_flags, sizeof(int) * retained_anchor_num)); SelectBox<<<caffe::CAFFE_GET_BLOCKS(retained_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS>>>( retained_anchor_num, transform_bbox_, bottom_im_info[2] * rpn_min_size, selected_flags_); cudaDeviceSynchronize(); // cumulative sum up the flags to get the copy index int *selected_indices_ = NULL; CUDA_CHECK(cudaMalloc((void**)&selected_indices_, sizeof(int) * retained_anchor_num)); void *cumsum_temp_storage_ = NULL; size_t cumsum_temp_storage_bytes_ = 0; cub::DeviceScan::InclusiveSum(cumsum_temp_storage_, cumsum_temp_storage_bytes_, selected_flags_, selected_indices_, retained_anchor_num); DLOG(ERROR) << "cumsum_temp_storage_bytes : " << cumsum_temp_storage_bytes_; CUDA_CHECK(cudaMalloc(&cumsum_temp_storage_, cumsum_temp_storage_bytes_)); cub::DeviceScan::InclusiveSum(sort_temp_storage_, cumsum_temp_storage_bytes_, selected_flags_, selected_indices_, retained_anchor_num); // CUDA_CHECK(cudaFree(cumsum_temp_storage)); int selected_num = -1; cudaMemcpy(&selected_num, &selected_indices_[retained_anchor_num - 1], sizeof(int), cudaMemcpyDeviceToHost); CHECK_GT(selected_num, 0); Dtype *bbox_score_ = NULL; if (top.size() == 2) CUDA_CHECK(cudaMalloc(&bbox_score_, sizeof(Dtype) * retained_anchor_num)); SelectBoxByIndices<<<caffe::CAFFE_GET_BLOCKS(selected_num), caffe::CAFFE_CUDA_NUM_THREADS>>>( selected_num, transform_bbox_, selected_indices_, transform_bbox_, sorted_scores, bbox_score_); cudaDeviceSynchronize(); //Step 4. -----------------------------apply nms------------------------------- DLOG(ERROR) << "========== apply nms with rpn_nms_thresh : " << rpn_nms_thresh; vector<int> keep_indices(selected_num); int keep_num = -1; gpu_nms(&keep_indices[0], &keep_num, transform_bbox_, selected_num, 4, rpn_nms_thresh); DLOG(ERROR) << "rpn num after gpu nms: " << keep_num; keep_num = std::min(keep_num, rpn_post_nms_top_n); DLOG(ERROR) << "========== copy to top"; cudaMemcpy(gpu_keep_indices_, &keep_indices[0], sizeof(int) * keep_num, cudaMemcpyHostToDevice); top[0]->Reshape(keep_num, 5, 1, 1); Dtype *top_data = top[0]->mutable_gpu_data(); Dtype *top_score = NULL; if (top.size() == 2) { top[1]->Reshape(keep_num, 1, 1, 1); top_score = top[1]->mutable_gpu_data(); } SelectBoxAftNMS<<<caffe::CAFFE_GET_BLOCKS(keep_num), caffe::CAFFE_CUDA_NUM_THREADS>>>( keep_num, transform_bbox_, gpu_keep_indices_, top_data, bbox_score_, top_score); DLOG(ERROR) << "========== exit proposal layer"; //////////////////////////////////// // do not forget to free the malloc memory CUDA_CHECK(cudaFree(sorted_scores)); CUDA_CHECK(cudaFree(indices)); CUDA_CHECK(cudaFree(sorted_indices)); CUDA_CHECK(cudaFree(sort_temp_storage_)); CUDA_CHECK(cudaFree(cumsum_temp_storage_)); CUDA_CHECK(cudaFree(selected_indices_)); if (bbox_score_!=NULL) CUDA_CHECK(cudaFree(bbox_score_)); #endif } } template <typename Dtype> void FrcnnProposalLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { for (int i = 0; i < propagate_down.size(); ++i) { if (propagate_down[i]) { NOT_IMPLEMENTED; } } } INSTANTIATE_LAYER_GPU_FUNCS(FrcnnProposalLayer); } // namespace frcnn } // namespace caffe
the_stack
#include "correlation_cuda_kernel.cuh" #define CUDA_NUM_THREADS 1024 #define THREADS_PER_BLOCK 32 #define FULL_MASK 0xffffffff #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/Dispatch.h> #include <ATen/cuda/CUDAApplyUtils.cuh> using at::Half; template<typename scalar_t> __forceinline__ __device__ scalar_t warpReduceSum(scalar_t val) { for (int offset = 16; offset > 0; offset /= 2) val += __shfl_down_sync(FULL_MASK, val, offset); return val; } template<typename scalar_t> __forceinline__ __device__ scalar_t blockReduceSum(scalar_t val) { static __shared__ scalar_t shared[32]; int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum(val); if (lane == 0) shared[wid] = val; __syncthreads(); val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0; if (wid == 0) val = warpReduceSum(val); return val; } template <typename scalar_t> __global__ void channels_first(const scalar_t* __restrict__ input, scalar_t* rinput, int channels, int height, int width, int pad_size) { // n (batch size), c (num of channels), y (height), x (width) int n = blockIdx.x; int y = blockIdx.y; int x = blockIdx.z; int ch_off = threadIdx.x; scalar_t value; int dimcyx = channels * height * width; int dimyx = height * width; int p_dimx = (width + 2 * pad_size); int p_dimy = (height + 2 * pad_size); int p_dimyxc = channels * p_dimy * p_dimx; int p_dimxc = p_dimx * channels; for (int c = ch_off; c < channels; c += THREADS_PER_BLOCK) { value = input[n * dimcyx + c * dimyx + y * width + x]; rinput[n * p_dimyxc + (y + pad_size) * p_dimxc + (x + pad_size) * channels + c] = value; } } template<typename scalar_t> __global__ void correlation_forward(scalar_t* __restrict__ output, const int nOutputChannels, const int outputHeight, const int outputWidth, const scalar_t* __restrict__ rInput1, const int nInputChannels, const int inputHeight, const int inputWidth, const scalar_t* __restrict__ rInput2, const int pad_size, const int kernel_size, const int max_displacement, const int stride1, const int stride2) { int32_t pInputWidth = inputWidth + 2 * pad_size; int32_t pInputHeight = inputHeight + 2 * pad_size; int32_t kernel_rad = (kernel_size - 1) / 2; int32_t displacement_rad = max_displacement / stride2; int32_t displacement_size = 2 * displacement_rad + 1; int32_t n = blockIdx.x; int32_t y1 = blockIdx.y * stride1 + max_displacement; int32_t x1 = blockIdx.z * stride1 + max_displacement; int32_t c = threadIdx.x; int32_t pdimyxc = pInputHeight * pInputWidth * nInputChannels; int32_t pdimxc = pInputWidth * nInputChannels; int32_t pdimc = nInputChannels; int32_t tdimcyx = nOutputChannels * outputHeight * outputWidth; int32_t tdimyx = outputHeight * outputWidth; int32_t tdimx = outputWidth; int32_t nelems = kernel_size * kernel_size * pdimc; // element-wise product along channel axis for (int tj = -displacement_rad; tj <= displacement_rad; ++tj) { for (int ti = -displacement_rad; ti <= displacement_rad; ++ti) { int x2 = x1 + ti * stride2; int y2 = y1 + tj * stride2; float acc0 = 0.0f; for (int j = -kernel_rad; j <= kernel_rad; ++j) { for (int i = -kernel_rad; i <= kernel_rad; ++i) { // THREADS_PER_BLOCK #pragma unroll for (int ch = c; ch < pdimc; ch += blockDim.x) { int indx1 = n * pdimyxc + (y1 + j) * pdimxc + (x1 + i) * pdimc + ch; int indx2 = n * pdimyxc + (y2 + j) * pdimxc + (x2 + i) * pdimc + ch; acc0 += static_cast<float>(rInput1[indx1] * rInput2[indx2]); } } } if (blockDim.x == warpSize) { __syncwarp(); acc0 = warpReduceSum(acc0); } else { __syncthreads(); acc0 = blockReduceSum(acc0); } if (threadIdx.x == 0) { int tc = (tj + displacement_rad) * displacement_size + (ti + displacement_rad); const int tindx = n * tdimcyx + tc * tdimyx + blockIdx.y * tdimx + blockIdx.z; output[tindx] = static_cast<scalar_t>(acc0 / nelems); } } } } template <typename scalar_t> __global__ void correlation_backward_input1(int item, scalar_t* gradInput1, int nInputChannels, int inputHeight, int inputWidth, const scalar_t* __restrict__ gradOutput, int nOutputChannels, int outputHeight, int outputWidth, const scalar_t* __restrict__ rInput2, int pad_size, int kernel_size, int max_displacement, int stride1, int stride2) { // n (batch size), c (num of channels), y (height), x (width) int n = item; int y = blockIdx.x * stride1 + pad_size; int x = blockIdx.y * stride1 + pad_size; int c = blockIdx.z; int tch_off = threadIdx.x; int kernel_rad = (kernel_size - 1) / 2; int displacement_rad = max_displacement / stride2; int displacement_size = 2 * displacement_rad + 1; int xmin = (x - kernel_rad - max_displacement) / stride1; int ymin = (y - kernel_rad - max_displacement) / stride1; int xmax = (x + kernel_rad - max_displacement) / stride1; int ymax = (y + kernel_rad - max_displacement) / stride1; if (xmax < 0 || ymax < 0 || xmin >= outputWidth || ymin >= outputHeight) { // assumes gradInput1 is pre-allocated and zero filled return; } if (xmin > xmax || ymin > ymax) { // assumes gradInput1 is pre-allocated and zero filled return; } xmin = max(0,xmin); xmax = min(outputWidth-1,xmax); ymin = max(0,ymin); ymax = min(outputHeight-1,ymax); int pInputWidth = inputWidth + 2 * pad_size; int pInputHeight = inputHeight + 2 * pad_size; int pdimyxc = pInputHeight * pInputWidth * nInputChannels; int pdimxc = pInputWidth * nInputChannels; int pdimc = nInputChannels; int tdimcyx = nOutputChannels * outputHeight * outputWidth; int tdimyx = outputHeight * outputWidth; int tdimx = outputWidth; int odimcyx = nInputChannels * inputHeight* inputWidth; int odimyx = inputHeight * inputWidth; int odimx = inputWidth; scalar_t nelems = kernel_size * kernel_size * nInputChannels; __shared__ scalar_t prod_sum[THREADS_PER_BLOCK]; prod_sum[tch_off] = 0; for (int tc = tch_off; tc < nOutputChannels; tc += THREADS_PER_BLOCK) { int i2 = (tc % displacement_size - displacement_rad) * stride2; int j2 = (tc / displacement_size - displacement_rad) * stride2; int indx2 = n * pdimyxc + (y + j2)* pdimxc + (x + i2) * pdimc + c; scalar_t val2 = rInput2[indx2]; for (int j = ymin; j <= ymax; ++j) { for (int i = xmin; i <= xmax; ++i) { int tindx = n * tdimcyx + tc * tdimyx + j * tdimx + i; prod_sum[tch_off] += gradOutput[tindx] * val2; } } } __syncthreads(); if(tch_off == 0) { scalar_t reduce_sum = 0; for(int idx = 0; idx < THREADS_PER_BLOCK; idx++) { reduce_sum += prod_sum[idx]; } const int indx1 = n * odimcyx + c * odimyx + (y - pad_size) * odimx + (x - pad_size); gradInput1[indx1] = reduce_sum / nelems; } } template <typename scalar_t> __global__ void correlation_backward_input2(int item, scalar_t* gradInput2, int nInputChannels, int inputHeight, int inputWidth, const scalar_t* __restrict__ gradOutput, int nOutputChannels, int outputHeight, int outputWidth, const scalar_t* __restrict__ rInput1, int pad_size, int kernel_size, int max_displacement, int stride1, int stride2) { // n (batch size), c (num of channels), y (height), x (width) int n = item; int y = blockIdx.x * stride1 + pad_size; int x = blockIdx.y * stride1 + pad_size; int c = blockIdx.z; int tch_off = threadIdx.x; int kernel_rad = (kernel_size - 1) / 2; int displacement_rad = max_displacement / stride2; int displacement_size = 2 * displacement_rad + 1; int pInputWidth = inputWidth + 2 * pad_size; int pInputHeight = inputHeight + 2 * pad_size; int pdimyxc = pInputHeight * pInputWidth * nInputChannels; int pdimxc = pInputWidth * nInputChannels; int pdimc = nInputChannels; int tdimcyx = nOutputChannels * outputHeight * outputWidth; int tdimyx = outputHeight * outputWidth; int tdimx = outputWidth; int odimcyx = nInputChannels * inputHeight* inputWidth; int odimyx = inputHeight * inputWidth; int odimx = inputWidth; scalar_t nelems = kernel_size * kernel_size * nInputChannels; __shared__ scalar_t prod_sum[THREADS_PER_BLOCK]; prod_sum[tch_off] = 0; for (int tc = tch_off; tc < nOutputChannels; tc += THREADS_PER_BLOCK) { int i2 = (tc % displacement_size - displacement_rad) * stride2; int j2 = (tc / displacement_size - displacement_rad) * stride2; int xmin = (x - kernel_rad - max_displacement - i2) / stride1; int ymin = (y - kernel_rad - max_displacement - j2) / stride1; int xmax = (x + kernel_rad - max_displacement - i2) / stride1; int ymax = (y + kernel_rad - max_displacement - j2) / stride1; if (xmax < 0 || ymax < 0 || xmin >= outputWidth || ymin >= outputHeight) { // assumes gradInput2 is pre-allocated and zero filled continue; } if (xmin > xmax || ymin > ymax) { // assumes gradInput2 is pre-allocated and zero filled continue; } xmin = max(0,xmin); xmax = min(outputWidth-1,xmax); ymin = max(0,ymin); ymax = min(outputHeight-1,ymax); int indx1 = n * pdimyxc + (y - j2)* pdimxc + (x - i2) * pdimc + c; scalar_t val1 = rInput1[indx1]; for (int j = ymin; j <= ymax; ++j) { for (int i = xmin; i <= xmax; ++i) { int tindx = n * tdimcyx + tc * tdimyx + j * tdimx + i; prod_sum[tch_off] += gradOutput[tindx] * val1; } } } __syncthreads(); if(tch_off == 0) { scalar_t reduce_sum = 0; for(int idx = 0; idx < THREADS_PER_BLOCK; idx++) { reduce_sum += prod_sum[idx]; } const int indx2 = n * odimcyx + c * odimyx + (y - pad_size) * odimx + (x - pad_size); gradInput2[indx2] = reduce_sum / nelems; } } int correlation_forward_cuda_kernel(at::Tensor& output, int ob, int oc, int oh, int ow, int osb, int osc, int osh, int osw, at::Tensor& input1, int ic, int ih, int iw, int isb, int isc, int ish, int isw, at::Tensor& input2, int gc, int gsb, int gsc, int gsh, int gsw, at::Tensor& rInput1, at::Tensor& rInput2, int pad_size, int kernel_size, int max_displacement, int stride1, int stride2, int corr_type_multiply, cudaStream_t stream) { int batchSize = ob; int nInputChannels = ic; int inputWidth = iw; int inputHeight = ih; int nOutputChannels = oc; int outputWidth = ow; int outputHeight = oh; dim3 blocks_grid(batchSize, inputHeight, inputWidth); dim3 threads_block(THREADS_PER_BLOCK); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.scalar_type(), "channels_first_fwd_1", ([&] { channels_first<scalar_t><<<blocks_grid,threads_block, 0, stream>>>( input1.data<scalar_t>(), rInput1.data<scalar_t>(), nInputChannels, inputHeight, inputWidth, pad_size); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input2.scalar_type(), "channels_first_fwd_2", ([&] { channels_first<scalar_t><<<blocks_grid,threads_block, 0, stream>>> ( input2.data<scalar_t>(), rInput2.data<scalar_t>(), nInputChannels, inputHeight, inputWidth, pad_size); })); dim3 threadsPerBlock(THREADS_PER_BLOCK); dim3 totalBlocksCorr(batchSize, outputHeight, outputWidth); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.scalar_type(), "correlation_forward", ([&] { correlation_forward<scalar_t><<<totalBlocksCorr, threadsPerBlock, 0, stream>>> (output.data<scalar_t>(), nOutputChannels, outputHeight, outputWidth, rInput1.data<scalar_t>(), nInputChannels, inputHeight, inputWidth, rInput2.data<scalar_t>(), pad_size, kernel_size, max_displacement, stride1, stride2); })); cudaError_t err = cudaGetLastError(); // check for errors if (err != cudaSuccess) { printf("error in correlation_forward_cuda_kernel: %s\n", cudaGetErrorString(err)); return 0; } return 1; } int correlation_backward_cuda_kernel( at::Tensor& gradOutput, int gob, int goc, int goh, int gow, int gosb, int gosc, int gosh, int gosw, at::Tensor& input1, int ic, int ih, int iw, int isb, int isc, int ish, int isw, at::Tensor& input2, int gsb, int gsc, int gsh, int gsw, at::Tensor& gradInput1, int gisb, int gisc, int gish, int gisw, at::Tensor& gradInput2, int ggc, int ggsb, int ggsc, int ggsh, int ggsw, at::Tensor& rInput1, at::Tensor& rInput2, int pad_size, int kernel_size, int max_displacement, int stride1, int stride2, int corr_type_multiply, cudaStream_t stream) { int batchSize = gob; int num = batchSize; int nInputChannels = ic; int inputWidth = iw; int inputHeight = ih; int nOutputChannels = goc; int outputWidth = gow; int outputHeight = goh; dim3 blocks_grid(batchSize, inputHeight, inputWidth); dim3 threads_block(THREADS_PER_BLOCK); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.scalar_type(), "lltm_forward_cuda", ([&] { channels_first<scalar_t><<<blocks_grid, threads_block, 0, stream>>>( input1.data<scalar_t>(), rInput1.data<scalar_t>(), nInputChannels, inputHeight, inputWidth, pad_size ); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input2.scalar_type(), "lltm_forward_cuda", ([&] { channels_first<scalar_t><<<blocks_grid, threads_block, 0, stream>>>( input2.data<scalar_t>(), rInput2.data<scalar_t>(), nInputChannels, inputHeight, inputWidth, pad_size ); })); dim3 threadsPerBlock(THREADS_PER_BLOCK); dim3 totalBlocksCorr(inputHeight, inputWidth, nInputChannels); for (int n = 0; n < num; ++n) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input2.scalar_type(), "lltm_forward_cuda", ([&] { correlation_backward_input1<scalar_t><<<totalBlocksCorr, threadsPerBlock, 0, stream>>> ( n, gradInput1.data<scalar_t>(), nInputChannels, inputHeight, inputWidth, gradOutput.data<scalar_t>(), nOutputChannels, outputHeight, outputWidth, rInput2.data<scalar_t>(), pad_size, kernel_size, max_displacement, stride1, stride2); })); } for(int n = 0; n < batchSize; n++) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(rInput1.scalar_type(), "lltm_forward_cuda", ([&] { correlation_backward_input2<scalar_t><<<totalBlocksCorr, threadsPerBlock, 0, stream>>>( n, gradInput2.data<scalar_t>(), nInputChannels, inputHeight, inputWidth, gradOutput.data<scalar_t>(), nOutputChannels, outputHeight, outputWidth, rInput1.data<scalar_t>(), pad_size, kernel_size, max_displacement, stride1, stride2); })); } // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in correlation_backward_cuda_kernel: %s\n", cudaGetErrorString(err)); return 0; } return 1; }
the_stack
#include <types.h> #include <my_errors.h> #include <kernel.h> #include <functions.h> #include <utilis.h> using namespace std; extern "C" HostError Hermite6th(const double TTIME, double* GTIME, double* ATIME, double* local_time, double* step, const unsigned int N, const unsigned int M, double4* pos_PH, float4* vel_PH, double4* pos_CH, double4* vel_CH, double4* a_H0, const unsigned int MAXDIM, unsigned int NGPU, unsigned int TPB, int rank, int size, unsigned int BFMAX, double ETA6, double ETA4, double DTMAX, double DTMIN, double EPS, double DTPRINT, unsigned int FMAX, const bool warm, double GTW, unsigned int GPUMINTHREADS, double plummer_core, double plummer_mass, double rscale, double mscale, vector<unsigned int> devices, bool *cleanstop, string path){ unsigned int ompthreads = 1; // N must be integer multiple of this number omp_set_num_threads( ompthreads ); unsigned int* vetint = new unsigned int [N]; unsigned int *counter = new unsigned int [ompthreads]; int *next = new int [N]; unsigned long nextsize = N; double NEXTOUT = DTPRINT; double4 **pos_PD = new double4* [NGPU]; float4 **vel_PD = new float4* [NGPU]; float4 **acc_PD = new float4* [NGPU]; double4 **pos_CD = new double4* [NGPU]; double4 **vel_CD = new double4* [NGPU]; int **next_D = new int* [NGPU]; double **loc_D = new double* [NGPU]; #ifdef APE double **step_D = new double* [NGPU]; #endif #ifdef GPUCORR double **step_D = new double* [NGPU]; #endif double4 **a_D = new double4* [NGPU]; double4 **a1_D = new double4* [NGPU]; double4 **a2_D = new double4* [NGPU]; double4 **a_tot_D = new double4* [NGPU]; double4 **a1_tot_D = new double4* [NGPU]; double4 **a2_tot_D = new double4* [NGPU]; double4 **a3_D = new double4* [NGPU]; double4 **p_v_a3_Dev = new double4* [NGPU]; double4 **a_temp_Dev = new double4* [NGPU]; unsigned int malloc_size = MAXDIM*sizeof(double4); //it contains a, adot, a2dots sequentially unsigned int malloc_db4 = nextsize*sizeof(double4); unsigned int malloc_fl4 = nextsize*sizeof(float4); unsigned int malloc_ui = nextsize*sizeof(unsigned int); unsigned int malloc_db = nextsize*sizeof(double); unsigned int malloc_db4_N = N*sizeof(double4); for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); #ifdef GPUCORR DeviceSafeCall(cudaHostAlloc((void**)&step_D[i], malloc_db, cudaHostAllocMapped)); #endif DeviceSafeCall(cudaMalloc((void **)&a_D[i], malloc_size)); DeviceSafeCall(cudaMalloc((void **)&a1_D[i], malloc_size)); DeviceSafeCall(cudaMalloc((void **)&a2_D[i], malloc_size)); DeviceSafeCall(cudaMalloc((void **)&a_tot_D[i], malloc_db4_N)); DeviceSafeCall(cudaMalloc((void **)&a1_tot_D[i], malloc_db4_N)); DeviceSafeCall(cudaMalloc((void **)&a2_tot_D[i], malloc_db4_N)); DeviceSafeCall(cudaMalloc((void **)&pos_PD[i], malloc_db4)); DeviceSafeCall(cudaMalloc((void **)&pos_CD[i], malloc_db4)); DeviceSafeCall(cudaMalloc((void **)&vel_CD[i], malloc_db4)); DeviceSafeCall(cudaMalloc((void **)&vel_PD[i], malloc_fl4)); DeviceSafeCall(cudaMalloc((void **)&acc_PD[i], malloc_fl4)); DeviceSafeCall(cudaMalloc((void **)&next_D[i], malloc_ui)); DeviceSafeCall(cudaMalloc((void **)&loc_D[i], malloc_db)); #ifdef APE DeviceSafeCall(cudaMalloc((void **)&step_D[i], malloc_db)); #endif DeviceSafeCall(cudaMalloc((void **)&a3_D[i], malloc_db4_N)); DeviceSafeCall(cudaMalloc((void **)&p_v_a3_Dev[i], 3*malloc_db4_N)); DeviceSafeCall(cudaMalloc((void **)&a_temp_Dev[i], 3*malloc_db4_N)); DeviceSafeCall(cudaMemcpy( pos_PD[i], pos_PH, malloc_db4, cudaMemcpyHostToDevice )); DeviceSafeCall(cudaMemcpy( vel_PD[i], vel_PH, malloc_fl4, cudaMemcpyHostToDevice )); DeviceSafeCall(cudaMemcpy( pos_CD[i], pos_CH, malloc_db4, cudaMemcpyHostToDevice )); DeviceSafeCall(cudaMemcpy( vel_CD[i], vel_CH, malloc_db4, cudaMemcpyHostToDevice )); DeviceSafeCall(cudaMemcpy(a_tot_D[i], a_H0, malloc_db4_N, cudaMemcpyHostToDevice)); DeviceSafeCall(cudaMemcpy(a1_tot_D[i], &a_H0[N], malloc_db4_N, cudaMemcpyHostToDevice)); DeviceSafeCall(cudaMemcpy(a2_tot_D[i], &a_H0[2*N], malloc_db4_N, cudaMemcpyHostToDevice)); DeviceSafeCall(cudaMemcpy( loc_D[i], local_time, malloc_db, cudaMemcpyHostToDevice)); #ifdef APE DeviceSafeCall(cudaMemcpy( step_D[i], step, malloc_db, cudaMemcpyHostToDevice)); #endif #ifdef APE for(unsigned int j = 0; j < NGPU; j++){ if(j != i) DeviceSafeCall(cudaDeviceEnablePeerAccess(devices[j], 0)); } #endif } #ifdef GPUCORR for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); for(unsigned int j = 0; j < N; j++) step_D[i][j] = step[j]; } #endif for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); int BL = ceil((double)N/TPB); initvectors<<<BL, TPB>>>(a3_D[i], acc_PD[i]); } unsigned int ppG = N/(NGPU*size); unsigned int Bfactor; unsigned long BLOCKS; unsigned int DIMENSION, THREADS, bfmax, SHARED; double *mpi_red_aux = new double [3*N]; double *mpi_red = new double [3*N]; double4* a_H1 = new double4 [3*N]; double4* p_v_a3 = new double4 [3*N]; double4* a3_H = new double4 [N]; double4 *a_H = new double4 [NGPU*3*N]; for(unsigned int i = 0; i < 3*N; i++) a_H1[i] = a_H0[i]; double E0, kk0, pp0; int out_index = 1; ofstream stream; string temp; char *output_name; if(warm){ while(NEXTOUT <= GTW) NEXTOUT += DTPRINT; while(NEXTOUT <= *GTIME) NEXTOUT += DTPRINT; if(rank == 0) HostSafeCall(AcquireEnergy(&E0, path)); } else{ HostSafeCall(Calculate_Energy(pos_CD, vel_CD, N, EPS, TPB, NGPU, rank, ppG, &kk0, &pp0, plummer_core, plummer_mass, rscale, mscale, devices)); E0 = kk0 + pp0; temp = path + "energy.dat"; output_name = to_char(temp); stream.open(output_name, ios::app); stream<<scientific<<setprecision(16); stream<<0.0<<" "<<0.0<<" "<<kk0<<" "<<pp0<<" "<<2.*kk0/fabs(pp0)<<endl; stream.close(); } if(rank == 0){ temp = path + "HiGPUslog.dat"; output_name = to_char(temp); stream.open(output_name, ios::app); stream<<"==============================================="<<endl; stream<<scientific<<setprecision(16); stream<<"#Initial Total Energy : #"<<E0<<"#"<<endl; stream.close(); string str = to_string(FMAX) + ".dat"; stream.open(to_char(str), ios::out); for(unsigned int i = 0; i < M; i++) stream<<pos_CH[i].x<<" "<<pos_CH[i].y<<" "<<pos_CH[i].z<<" "<<vel_CH[i].x<<" "<<vel_CH[i].y<<" "<<vel_CH[i].z<<" "<<pos_CH[i].w<<endl; stream.close(); } MPISafeCall(MPI_Barrier(MPI_COMM_WORLD)); double start = 0.0; double end = 0.0; double start_program = 0.0; double end_program = 0.0; HiGPUsTimes *Times; Times = new HiGPUsTimes [N+1]; for(unsigned int i = 0; i <= N; i++){ Times[i].next_time = 0.0; Times[i].cpynext_time = 0.0; Times[i].predictor_time = 0.0; Times[i].evaluation_time = 0.0; Times[i].reduce_time = 0.0; Times[i].reposition_time = 0.0; Times[i].memcpy2_time = 0.0; Times[i].mpireduce_time = 0.0; Times[i].corrector_time = 0.0; Times[i].reconstruct_time = 0.0; Times[i].energy_time = 0.0; Times[i].rtime = 0.0; Times[i].thr = 0.0; Times[i].totthr = 0.0; Times[i].bfac = 0.0; } // HostSafeCall(GPU_memcheck(NGPU, devices, __FILE__, __LINE__)); HostSafeCall(CPU_memcheck(__FILE__, __LINE__, path)); struct timeval tv; gettimeofday(&tv, NULL); int sec = tv.tv_sec; int microsec = tv.tv_usec; start_program = sec + microsec * 0.000001; if(rank == 0) HostSafeCall(CheckHDDMemory(cleanstop, path)); MPISafeCall(MPI_Barrier(MPI_COMM_WORLD)); MPISafeCall(MPI_Bcast(cleanstop, 1, MPI_C_BOOL, 0, MPI_COMM_WORLD)); if(*cleanstop) return HNoError; #ifdef EXPANDING double plum0 = plummer_core; #endif do{ if(*GTIME >= TTIME){ //ClearAll(); gettimeofday(&tv, NULL); int sec = tv.tv_sec; int microsec = tv.tv_usec; end_program = sec + microsec * 0.000001; delete [] p_v_a3; delete [] a3_H; delete [] a_H; delete [] a_H1; delete [] mpi_red_aux; delete [] mpi_red; delete [] next; delete [] vetint; delete [] counter; delete [] Times; if(rank == 0){ temp = path + "HiGPUslog.dat"; output_name = to_char(temp); stream.open(output_name, ios::app); stream<<scientific<<setprecision(6); stream<<" \n Total integration time : "<<end_program-start_program<<" seconds "<<endl; stream.close(); } return HNoError; } get_times(&start); #ifdef GPUCORR HostSafeCall(NextParticles(N, ompthreads, counter, vetint, *ATIME, local_time, step_D[0], next, &nextsize)); #else HostSafeCall(NextParticles(N, ompthreads, counter, vetint, *ATIME, local_time, step, next, &nextsize)); #endif *GTIME = *ATIME; #ifdef EXPANDING plummer_core = plum0*exp(GTIME); #endif unsigned int dim2 = ceil((double)nextsize/TPB)*TPB; for(unsigned int i = nextsize; i < dim2; i++) next[i] = -1; get_times(&end); set_times(end-start, &(Times[nextsize].next_time)); get_times(&start); for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); DeviceSafeCall(cudaMemcpy(next_D[i], next, dim2 * sizeof( int ), cudaMemcpyHostToDevice)); } get_times(&end); set_times(end-start, &(Times[nextsize].cpynext_time)); get_times(&start); for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); int BL = ppG/TPB + ceil((double)nextsize/TPB); int istart = ppG*(i+rank*NGPU); Predictor <<<BL, TPB>>> (*GTIME, pos_PD[i], vel_PD[i], acc_PD[i], pos_CD[i], vel_CD[i], loc_D[i], a_tot_D[i], a1_tot_D[i], a2_tot_D[i], a3_D[i], istart, next_D[i], ppG, N); } get_times(&end); set_times(end-start, &(Times[nextsize].predictor_time)); THREADS = TPB; bfmax = BFMAX; *ATIME = 1.0e+10; Bfactor = 1; BLOCKS = ceil((double)nextsize/THREADS); while(THREADS*BLOCKS < GPUMINTHREADS && THREADS > 32){ THREADS /= 2; bfmax *= 2; BLOCKS = ceil((double)nextsize/THREADS); } DIMENSION = THREADS*BLOCKS; while(THREADS*BLOCKS < GPUMINTHREADS && Bfactor < bfmax){ BLOCKS *= 2; Bfactor *= 2; } SHARED = THREADS * (sizeof(double4) + 2 * sizeof(float4)); for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); DeviceSafeCall(cudaThreadSynchronize()); } set_times((double)Bfactor, &(Times[nextsize].bfac)); set_times((double)THREADS, &(Times[nextsize].thr)); set_times((double)BLOCKS*THREADS, &(Times[nextsize].totthr)); get_times(&start); for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); DeviceCheckErrors(); int istart = ppG*(i+rank*NGPU); evaluation<<< BLOCKS, THREADS, SHARED >>> ( N, pos_PD[i], vel_PD[i], acc_PD[i], a_D[i], a1_D[i], a2_D[i], istart, ppG, Bfactor, DIMENSION, next_D[i], loc_D[i], *GTIME, EPS, plummer_core, plummer_mass, rscale, mscale); } get_times(&end); set_times(end-start, &(Times[nextsize].evaluation_time)); int bl = BLOCKS; int bf = Bfactor; SHARED = THREADS * sizeof(double4); for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); DeviceSafeCall(cudaThreadSynchronize()); DeviceCheckErrors(); } get_times(&start); while(bf != 1){ bl>>=1; bf>>=1; for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); reduce<<< 3*bl, THREADS, SHARED>>>(a_D[i], a1_D[i], a2_D[i], bf, DIMENSION); } for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); DeviceSafeCall(cudaThreadSynchronize()); DeviceCheckErrors(); } } get_times(&end); set_times(end-start, &(Times[nextsize].reduce_time)); get_times(&start); for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); reposition<<<DIMENSION/THREADS, THREADS>>>(a_D[i], a1_D[i], a2_D[i], a_temp_Dev[i], nextsize); } get_times(&end); set_times(end-start, &(Times[nextsize].reposition_time)); unsigned int cpy_size = 3*nextsize; #ifdef APE for(unsigned int i = 1; i < NGPU; i++){ int SHRD = THREADS*sizeof(double4); DeviceSafeCall(cudaSetDevice(devices[0])); DeviceSafeCall(cudaMemcpyPeer(p_v_a3_Dev[0], devices[0], a_temp_Dev[i], devices[i], cpy_size*sizeof(double4))); sum_partial<<<3*DIMENSION/THREADS, THREADS, SHRD>>>(p_v_a3_Dev[0], a_temp_Dev[0], 3*nextsize); DeviceSafeCall(cudaThreadSynchronize()); } // QUI VA AGGIUNTA LA FUNZIONE CHE RIDUCE A_TEMP_DEV SU TUTTE LE GPU 0 E LO DAI POI A TUTTE LE GPU DI TUTTI I NODI : not yet implemented #else get_times(&start); for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); DeviceCheckErrors(); DeviceSafeCall(cudaMemcpy(&a_H[i*cpy_size], a_temp_Dev[i], cpy_size*sizeof(double4), cudaMemcpyDeviceToHost)); } get_times(&end); set_times(end-start, &(Times[nextsize].memcpy2_time)); get_times(&start); HostSafeCall(ReduceAll(cpy_size, N, NGPU, nextsize, a_H, a_H1, mpi_red_aux, mpi_red, next)); get_times(&end); set_times(end-start, &(Times[nextsize].mpireduce_time)); #endif #ifdef GPUCORR get_times(&start); for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); DeviceSafeCall(cudaMemcpy(a_temp_Dev[i], a_H, 3*nextsize*sizeof( double4 ), cudaMemcpyHostToDevice )); } for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); Corrector_gpu<<<DIMENSION/THREADS, THREADS>>>(*GTIME, loc_D[i], step_D[i], next_D[i], nextsize, pos_CD[i], vel_CD[i], a_tot_D[i], a1_tot_D[i], a2_tot_D[i], a_temp_Dev[i], a3_D[i], ETA6, ETA4, DTMAX, DTMIN, N);// chiama direttamete corrector } DeviceSafeCall(cudaSetDevice(devices[0])); DeviceSafeCall(cudaThreadSynchronize()); for(unsigned int i = 0; i < nextsize; i++){ int who = next[i]; local_time[who] = *GTIME; *ATIME = min (local_time[who] + step_D[0][who], *ATIME); } get_times(&end); set_times(end-start, &(Times[nextsize].corrector_time)); #elif APE for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); Corrector_gpu<<<DIMENSION/THREADS, THREADS>>>(*GTIME, loc_D[i], step_D[i], next_D[i], nextsize, pos_CD[i], vel_CD[i], a_tot_D[i], a1_tot_D[i], a2_tot_D[i], a_temp_Dev[i], a3_D[i], ETA6, ETA4, DTMAX, DTMIN, N);// chiama direttamete corrector } DeviceSafeCall(cudaSetDevice(devices[0])); DeviceSafeCall(cudaThreadSynchronize()); DeviceSafeCall(cudaMemcpy(step, step_D[0], N*sizeof(double), cudaMemcpyDeviceToHost)); for(unsigned int i = 0; i < nextsize; i++){ int who = next[i]; local_time[who] = *GTIME; *ATIME = min (local_time[who] + step[who], *ATIME); } #else // corrector su cpu for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); update_local_time<<<DIMENSION/THREADS, THREADS>>>(next_D[i], loc_D[i], *GTIME); } get_times(&start); HostSafeCall(Corrector(GTIME, ATIME, local_time, step, next, nextsize, pos_CH, vel_CH, a_H0, a_H1, a3_H, p_v_a3, ETA6, ETA4, DTMAX, DTMIN, N)); get_times(&end); set_times(end-start, &(Times[nextsize].corrector_time)); #endif #ifndef APE #ifndef GPUCORR get_times(&start); for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); DeviceSafeCall(cudaMemcpy(p_v_a3_Dev[i], p_v_a3, 3*nextsize*sizeof( double4 ), cudaMemcpyHostToDevice )); DeviceSafeCall(cudaMemcpy(a_temp_Dev[i], a_H, 3*nextsize*sizeof( double4 ), cudaMemcpyHostToDevice )); } get_times(&end); set_times(end-start, &(Times[nextsize].rtime)); for(unsigned int i = 0; i < NGPU; i++){ DeviceSafeCall(cudaSetDevice(devices[i])); int BB = 6*DIMENSION/THREADS; Reconstruct<<< BB, THREADS >>>(next_D[i], nextsize, pos_CD[i], vel_CD[i], a3_D[i], a_tot_D[i], a1_tot_D[i], a2_tot_D[i], p_v_a3_Dev[i], a_temp_Dev[i]); } get_times(&end); set_times(end-start, &(Times[nextsize].reconstruct_time)); #endif #endif if((*GTIME+GTW) >= NEXTOUT ){ DeviceSafeCall(cudaSetDevice(devices[0])); DeviceSafeCall(cudaMemcpy( pos_CH, pos_CD[0], malloc_db4, cudaMemcpyDeviceToHost )); DeviceSafeCall(cudaMemcpy( vel_CH, vel_CD[0], malloc_db4, cudaMemcpyDeviceToHost )); CheckBlocks(step, M, path); double kk,pp; get_times(&start); HostSafeCall(Calculate_Energy(pos_CD, vel_CD, N, EPS, TPB, NGPU, rank, ppG, &kk, &pp, plummer_core, plummer_mass, rscale, mscale, devices)); get_times(&end); set_times(end-start, &(Times[nextsize].energy_time)); if(rank == 0){ HostSafeCall(CheckHDDMemory(cleanstop, path)); #ifdef CHECK_TIMES string ffff = to_string(out_index + FMAX); ffff = path + "times_"+ffff+".dat"; stream.open(to_char(ffff), ios::out); stream<<scientific<<setprecision(6); stream<<"N "<<" NEXT "<<" CPY_NEXT"<<" PRED "<<" EVAL "<<" REDU "<<" REPOS "<<" CPY_ACC "<<" MPI "<<" CORR "<<" CPY_REC "<<" RECON "<<" THREADS "<<" TOTTHREAD "<<" BFACT "<<endl; for(unsigned int i = 1; i <= N; i++){ if(Times[i].next_time != 0.0) stream<<i<<" "<< Times[i].next_time<<" "<< Times[i].cpynext_time<<" "<< Times[i].predictor_time<<" "<< Times[i].evaluation_time<<" "<< Times[i].reduce_time<<" "<< Times[i].reposition_time<<" "<< Times[i].memcpy2_time<<" "<< Times[i].mpireduce_time<<" "<< Times[i].corrector_time<<" "<< Times[i].rtime<<" "<< Times[i].reconstruct_time<<" "<< Times[i].thr<<" "<< Times[i].totthr<<" "<< Times[i].bfac<<endl; Times[i].next_time = 0.0; } stream.close(); #endif double E = kk + pp; temp = path + "energy.dat"; output_name = to_char(temp); stream.open(output_name, ios::app); stream<<scientific<<setprecision(16); stream<<*GTIME+GTW<<" "<<fabs((E-E0)/E0)<<" "<<kk<<" "<<pp<<" "<<2.*kk/fabs(pp)<<endl; stream.close(); string file_name = path + to_string(out_index + FMAX); file_name += ".dat"; stream.open(to_char(file_name), ios::out); stream<<scientific<<setprecision(16); for(unsigned int i = 0; i < M; i++) stream<<pos_CH[i].x<<" "<<pos_CH[i].y<<" "<<pos_CH[i].z<<" "<<vel_CH[i].x<<" "<<vel_CH[i].y<<" "<<vel_CH[i].z<<" "<<pos_CH[i].w<<endl; stream.close(); out_index++; } MPISafeCall(MPI_Barrier(MPI_COMM_WORLD)); MPISafeCall(MPI_Bcast(cleanstop, 1, MPI_C_BOOL, 0, MPI_COMM_WORLD)); if(*cleanstop) return HNoError; NEXTOUT+=DTPRINT; } }while(1); }
the_stack
#include <cassert> #include <cstring> #include <iostream> #include <tuple> #include <vector> namespace fastertransformer { static inline void set_alpha(uint32_t& alpha, float norm, Data_type dtype) { if (dtype == DATA_TYPE_FP16) { half2 h2 = __float2half2_rn(norm); alpha = reinterpret_cast<const uint32_t&>(h2); } else if (dtype == DATA_TYPE_FP32) { alpha = reinterpret_cast<const uint32_t&>(norm); } else if (dtype == DATA_TYPE_INT32) { int32_t inorm = static_cast<int32_t>(norm); alpha = reinterpret_cast<const uint32_t&>(inorm); } else { assert(false); } } class FusedMHARunnerFP16v2::mhaImpl { public: mhaImpl(FusedMHARunnerFP16v2* interface) : interface(interface) , sm(interface->mSm) , xmmaKernel(getXMMAKernelsV2(DATA_TYPE_FP16, sm)) { assert((sm == kSM_72 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86) && "Unsupported architecture"); params.clear(); } ~mhaImpl() {} size_t getPackedMaskSizeInBytes() const { // check that we initialized assert(xmmas_m > 0); assert(threads_per_cta > 0); assert(interface->mB > 0); return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t); } void setup(const int S, const int B) { // TODO these implementation details might be better centralized into the XMMA code, since they are needed in // several places (also outside of this plugin) size_t warps_m = 2, warps_n = 2, warps_k = 1; if (sm == 70) { if (S == 64 || S == 96) { warps_m = 2; warps_n = 2; } else if (S == 128) { warps_m = 1; warps_n = 4; } else if (S == 256 || S == 384) { warps_m = 1; warps_n = 8; } else { assert(false && "Unsupporte seqlen"); } } else { if (S == 64 || S == 96 || S == 128) { warps_m = 2; warps_n = 2; } else if (S == 256) { warps_m = 1; warps_n = 4; } else if (S == 384) { warps_m = 1; warps_n = 8; } else { assert(false && "Unsupporte seqlen"); } } // The number of threads per CTA. threads_per_cta = warps_m * warps_n * warps_k * 32; // The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension. xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m); // The number of xmmas in the N dimension. xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n); const float scale_bmm1 = interface->mRsqrtHeadSize; const float scale_softmax = 1.f; // Seems to be only required for int8 const float scale_bmm2 = 1.f; Data_type scale_type = DATA_TYPE_FP16; set_alpha(params.scale_bmm1, scale_bmm1, scale_type); set_alpha(params.scale_softmax, scale_softmax, scale_type); set_alpha(params.scale_bmm2, scale_bmm2, scale_type); params.b = B; params.h = interface->mNumHeads; params.s = S; params.d = interface->mHeadSize; // mLdQKV = 3 * B * mNumHeads * mHeadSize; // mLdOut = B * mNumHeads * mHeadSize; params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half); params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t); params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half); } void run(const void* qkvPtr, const void* maskPtr, const void* cuSeqlenPtr, void* output, void* workspace, cudaStream_t stream) { params.qkv_ptr = const_cast<void*>(qkvPtr); params.packed_mask_ptr = const_cast<void*>(maskPtr); params.o_ptr = output; params.cu_seqlens = static_cast<int*>(const_cast<void*>(cuSeqlenPtr)); xmmaKernel->run(params, stream); check_cuda_error(cudaPeekAtLastError()); } bool isValid(int s) const { return xmmaKernel->isValid(s); } int getSFromMaxSeqLen(const int max_seq_len) { int S = 1024; if (max_seq_len <= 64) { S = 64; } else if (max_seq_len <= 96) { S = 96; } else if (max_seq_len <= 128) { S = 128; } else if (max_seq_len <= 256) { S = 256; } else if (max_seq_len <= 384) { S = 384; } else if (max_seq_len <= 512) { S = 512; } return S; } private: FusedMHARunnerFP16v2* interface; Fused_multihead_attention_params_v2 params; int sm; const FusedMultiHeadAttentionXMMAKernelV2* xmmaKernel; size_t xmmas_m; size_t xmmas_n; size_t threads_per_cta; }; FusedMHARunnerFP16v2::FusedMHARunnerFP16v2(const int numHeads, const int headSize, const int sm, const float q_scaling) : MHARunner(numHeads, headSize, 2, q_scaling) , mSm(sm) , pimpl(new mhaImpl(this)) { } void FusedMHARunnerFP16v2::setup(const int S, const int B) { MHARunner::setup(S, B); pimpl->setup(S, B); } size_t FusedMHARunnerFP16v2::getWorkspaceSize() const { return 0; } void FusedMHARunnerFP16v2::run(const void* input, const void* mask, void* workspace, void* output, cudaStream_t stream) { assert(false && "not implemented"); } void FusedMHARunnerFP16v2::run(const void* input, const void* mask, const void* seqlen, void* workspace, void* output, cudaStream_t stream) { pimpl->run(input, mask, seqlen, output, workspace, stream); } bool FusedMHARunnerFP16v2::isValid(int s) const { return pimpl->isValid(s); } void FusedMHARunnerFP16v2::setScaleList(const float scaleQkv, const float dqProbs, const float scaleCtx) { } int FusedMHARunnerFP16v2::getSFromMaxSeqLen(const int max_seq_len) { return pimpl->getSFromMaxSeqLen(max_seq_len); } // Int8 starts here: TODO refactor the duplicate stuff class FusedMHARunnerInt8v2::mhaImpl { public: mhaImpl(FusedMHARunnerInt8v2* interface) : interface(interface) , sm(interface->mSm) , xmmaKernel(getXMMAKernelsV2(DATA_TYPE_INT8, sm)) { assert((sm == kSM_72 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86) && "Unsupported architecture"); params.clear(); } ~mhaImpl() {} size_t getPackedMaskSizeInBytes() const { assert(xmmas_m > 0); assert(threads_per_cta > 0); assert(interface->mB > 0); return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t); } void setup(const int S, const int B) { size_t warps_m, warps_n, warps_k = 1; if ((sm == 75 || sm == 80) && S == 64) { warps_m = 2; warps_n = 2; } else if (S == 128) { warps_m = 2; warps_n = 2; } else if (S == 192 || S == 256) { warps_m = 1; warps_n = 4; } else if (S == 384) { warps_m = 1; warps_n = 8; } else { assert(false && "Unsupported seqlen."); } // The number of threads per CTA. threads_per_cta = warps_m * warps_n * warps_k * 32; // The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension. xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m); // The number of xmmas in the N dimension. xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n); params.b = B; params.h = interface->mNumHeads; params.s = S; params.d = interface->mHeadSize; params.use_int8_scale_max = true; params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t); params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(int8_t); params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(int8_t); float scaleQkv = interface->mScaleQkv; float scaleCtx = interface->mScaleCtx; float scaleBmm1 = scaleQkv * scaleQkv * (1.f / sqrtf(interface->mHeadSize)); float scaleBmm2 = interface->mDqProbs * scaleQkv / scaleCtx; float scaleSoftmax = 1.f / interface->mDqProbs; params.scale_bmm1 = reinterpret_cast<const uint32_t&>(scaleBmm1); params.scale_bmm2 = reinterpret_cast<const uint32_t&>(scaleBmm2); params.scale_softmax = reinterpret_cast<const uint32_t&>(scaleSoftmax); params.enable_i2f_trick = -double(1 << 22) * double(scaleBmm2) <= -128.f && double(1 << 22) * double(scaleBmm2) >= 127.f; } void run(const void* qkvPtr, const void* maskPtr, const void* cuSeqlenPtr, void* output, void* workspace, cudaStream_t stream) { params.qkv_ptr = const_cast<void*>(qkvPtr); params.o_ptr = output; params.cu_seqlens = static_cast<int*>(const_cast<void*>(cuSeqlenPtr)); xmmaKernel->run(params, stream); } bool isValid(int s) const { return xmmaKernel->isValid(s); } int getSFromMaxSeqLen(const int max_seq_len) { int S = 1024; if (sm == 75 || sm == 80) { if (max_seq_len <= 64) { S = 64; } else if (max_seq_len <= 128) { S = 128; } else if (max_seq_len <= 192) { S = 192; } else if (max_seq_len <= 256) { S = 256; } else if (max_seq_len <= 384) { S = 384; } else if (max_seq_len <= 512) { S = 512; } } else { if (max_seq_len <= 128) { S = 128; } else if (max_seq_len <= 192) { S = 192; } else if (max_seq_len <= 256) { S = 256; } else if (max_seq_len <= 384) { S = 384; } else if (max_seq_len <= 512) { S = 512; } } return S; } private: FusedMHARunnerInt8v2* interface; Fused_multihead_attention_params_v2 params; int sm; const FusedMultiHeadAttentionXMMAKernelV2* xmmaKernel; size_t xmmas_m; size_t xmmas_n; size_t threads_per_cta; }; FusedMHARunnerInt8v2::FusedMHARunnerInt8v2(const int numHeads, const int headSize, const int sm) : MHARunner(numHeads, headSize, 1) , mSm(sm) , pimpl(new mhaImpl(this)) { } void FusedMHARunnerInt8v2::setScaleList(const float scaleQkv, const float dqProbs, const float scaleCtx) { mDqProbs = dqProbs; mScaleQkv = scaleQkv; mScaleCtx = scaleCtx; } void FusedMHARunnerInt8v2::setup(const int S, const int B) { pimpl->setup(S, B); } size_t FusedMHARunnerInt8v2::getWorkspaceSize() const { return 0; } void FusedMHARunnerInt8v2::run(const void* input, const void* mask, void* workspace, void* output, cudaStream_t stream) { assert(false && "Not implemented"); } void FusedMHARunnerInt8v2::run(const void* input, const void* mask, const void* seqlen, void* workspace, void* output, cudaStream_t stream) { pimpl->run(input, mask, seqlen, output, workspace, stream); } bool FusedMHARunnerInt8v2::isValid(int s) const { return pimpl->isValid(s); } int FusedMHARunnerInt8v2::getSFromMaxSeqLen(const int max_seq_len) { return pimpl->getSFromMaxSeqLen(max_seq_len); } } // namespace fastertransformer
the_stack
// layernorm code modified from Nvidia's DeepLearningExamples // https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v3.1/fastertransformer/cuda/open_decoder.cu#L1369-L81429 template <typename T> __global__ void add_bias_input_layernorm(T* out, const T* input, const T* bias, const T* gamma, const T* beta, int m, int n) { int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float local_out = 0.0f; local_out += (float)(out[blockIdx.x * n + tid] + input[blockIdx.x * n + tid] + __ldg(&bias[tid])); mean = blockReduceSum<float>(local_out); if(threadIdx.x == 0) s_mean = mean / n; __syncthreads(); variance = blockReduceSum<float>((local_out - s_mean) * (local_out - s_mean)); if(threadIdx.x == 0) s_variance = variance / n + 1e-6f; __syncthreads(); out[blockIdx.x * n + tid] = (T)(((local_out - s_mean) * rsqrtf(s_variance)) * (float)(__ldg(&gamma[tid])) + (float)(__ldg(&beta[tid]))); } template <> __global__ void add_bias_input_layernorm(half* out, const half* input, const half* bias, const half* gamma, const half* beta, int m, int n) { int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float2 local_out_fp2; half2* out_ptr = (half2*)out; const half2* input_ptr = (const half2*)input; const half2* bias_ptr = (const half2*)bias; const half2* gamma_ptr = (const half2*)gamma; const half2* beta_ptr = (const half2*)beta; float local_out = 0.0f; int id = blockIdx.x * n / 2 + tid; local_out_fp2 = __half22float2(__hadd2(__hadd2(out_ptr[id], input_ptr[id]), __ldg(&bias_ptr[tid]))); local_out += local_out_fp2.x; local_out += local_out_fp2.y; mean = blockReduceSum<float>(local_out); if(threadIdx.x == 0) s_mean = mean / n; __syncthreads(); variance = (local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean); variance += (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean); variance = blockReduceSum<float>(variance); if(threadIdx.x == 0) s_variance = rsqrtf(variance / n + 1e-6f); __syncthreads(); float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid])); float2 beta_val = __half22float2(__ldg(&beta_ptr[tid])); local_out_fp2.x = (local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x; local_out_fp2.y = (local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y; out_ptr[id] = __float22half2_rn(local_out_fp2); } template <typename T> __global__ void add_bias_input_layernorm_v2(T *out, const T *__restrict input, const T *__restrict bias, const T *__restrict gamma, const T *__restrict beta, int n) { const int ite = 4; const int tid = threadIdx.x; const int bid = blockIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float local_out[ite]; float sum = 0.0f; #pragma unroll for (int i = 0; i < ite; i++) { int col_id = i * blockDim.x + tid; int id = bid * n + col_id; local_out[i] = (float)(out[id] + __ldg(&input[id]) + __ldg(&bias[col_id])); sum += local_out[i]; } mean = blockReduceSum<float>(sum); if (tid == 0) s_mean = mean / n; __syncthreads(); float var = 0.0f; #pragma unroll for (int i = 0; i < ite; i++) { float diff = local_out[i] - s_mean; var += diff * diff; } variance = blockReduceSum<float>(var); if (tid == 0) s_variance = rsqrtf(variance / n + 1e-6f); __syncthreads(); #pragma unroll for (int i = 0; i < ite; i++) { int col_id = i * blockDim.x + tid; int id = bid * n + col_id; out[id] = (T)((local_out[i] - s_mean) * s_variance * (float)__ldg(&gamma[col_id]) + (float)__ldg(&beta[col_id])); } } template <> __global__ void add_bias_input_layernorm_v2(half *out, const half *__restrict input, const half *__restrict bias, const half *__restrict gamma, const half *__restrict beta, int n) { const int ite = 4; const int tid = threadIdx.x; const int bid = blockIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; half2 local_out_half2[ite]; half2 *out_ptr = (half2 *)out; const half2 *input_ptr = (const half2 *)input; const half2 *bias_ptr = (const half2 *)bias; const half2 *gamma_ptr = (const half2 *)gamma; const half2 *beta_ptr = (const half2 *)beta; // float sum = 0.0f; half2 sum = __float2half2_rn(0.0f); #pragma unroll for (int i = 0; i < ite; i++) { int col_id = i * blockDim.x + tid; int id = bid * n / 2 + col_id; local_out_half2[i] = out_ptr[id] + __ldg(&input_ptr[id]) + __ldg(&bias_ptr[col_id]); sum += local_out_half2[i]; } mean = blockReduceSum<float>((float)(sum.x + sum.y)); if (threadIdx.x == 0) s_mean = mean / n; __syncthreads(); float var = 0.0f; half2 s_mean_2 = __float2half2_rn(s_mean); #pragma unroll for (int i = 0; i < ite; i++) { local_out_half2[i] = local_out_half2[i] - s_mean_2; float v1 = (float)local_out_half2[i].x; float v2 = (float)local_out_half2[i].y; var += v1 * v1 + v2 * v2; } variance = blockReduceSum<float>(var); if (threadIdx.x == 0) s_variance = rsqrtf(variance / n + 1e-6f); __syncthreads(); half2 s_var_2 = __float2half2_rn(s_variance); #pragma unroll for (int i = 0; i < ite; i++) { int col_id = i * blockDim.x + tid; int id = bid * n / 2 + col_id; out_ptr[id] = local_out_half2[i] * s_var_2 * __ldg(&gamma_ptr[col_id]) + __ldg(&beta_ptr[col_id]); } } template <int item_per_thread> __global__ void decoder_norm1_kernel_opt(const float *__restrict input, const float *__restrict gamma, const float *__restrict beta, float *output, int m, int n) { int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; //float local_out = tid < n ? (float)(__ldg(&input[blockIdx.x * n + tid])) : 0.0f; float local_out[item_per_thread]; for (int i = 0; i < item_per_thread; i++) { local_out[i] = (tid * item_per_thread + i) < n ? (float)(__ldg(&input[blockIdx.x * n + tid * item_per_thread + i])) : 0.0f; } mean = blockReduceSum_opt<float, item_per_thread>(local_out); if (threadIdx.x == 0) s_mean = mean / n; __syncthreads(); float tmp[item_per_thread]; for (int i = 0; i < item_per_thread; i++) { tmp[i] = (tid * item_per_thread + i) < n ? (local_out[i] - s_mean) * (local_out[i] - s_mean) : 0.0f; } //要保证第二次归约能把所有的和算出来,这个item_per_thread需要设置的足够大 variance = blockReduceSum_opt<float, item_per_thread>(tmp); if (threadIdx.x == 0) s_variance = rsqrtf(variance / n + 1e-6); __syncthreads(); for (int i = 0; i < item_per_thread; i++) { if (tid * item_per_thread + i < n) { output[blockIdx.x * n + tid * item_per_thread + i] = (float)(((local_out[i] - s_mean) * s_variance) * (float)(__ldg(&gamma[tid * item_per_thread + i])) + (float)(__ldg(&beta[tid * item_per_thread + i]))); } } } template <int item_per_thread> __global__ void decoder_norm1_kernel_opt(const half *__restrict input, const half *__restrict gamma, const half *__restrict beta, half *output, int m, int n) { const int tid = threadIdx.x; __shared__ float s_mean; __shared__ float s_variance; float mean = 0.0f; float variance = 0.0f; float2 local_out_fp2[item_per_thread]; const half2 *input_ptr = (const half2 *)input; const half2 *gamma_ptr = (const half2 *)gamma; const half2 *beta_ptr = (const half2 *)beta; half2 *output_ptr = (half2 *)output; float local_out[item_per_thread]; for (int i = 0; i < item_per_thread; i++) { local_out[i] = 0.0f; } for (int i = 0; i < item_per_thread; i++) { local_out_fp2[i] = (tid * item_per_thread + i) < n ? __half22float2((__ldg(&input_ptr[blockIdx.x * (n >> 1) + tid * item_per_thread + i]))) : make_float2(0.0f, 0.0f); local_out[i] += local_out_fp2[i].x; local_out[i] += local_out_fp2[i].y; } mean = blockReduceSum_opt<float, item_per_thread>(local_out); if (tid == 0) s_mean = mean / n; __syncthreads(); float tmp[item_per_thread]; for (int i = 0; i < item_per_thread; i++) { tmp[i] = (tid * item_per_thread + i) < n ? (local_out_fp2[i].x - s_mean) * (local_out_fp2[i].x - s_mean) + (local_out_fp2[i].y - s_mean) * (local_out_fp2[i].y - s_mean) : 0.0f; } variance = blockReduceSum_opt<float, item_per_thread>(tmp); if (tid == 0) s_variance = rsqrtf(variance / n + 1e-6); __syncthreads(); for (int i = 0; i < item_per_thread; i++) { if (tid * item_per_thread + i < n) { float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid * item_per_thread + i])); float2 beta_val = __half22float2(__ldg(&beta_ptr[tid * item_per_thread + i])); local_out_fp2[i].x = (local_out_fp2[i].x - s_mean) * s_variance * gamma_val.x + beta_val.x; local_out_fp2[i].y = (local_out_fp2[i].y - s_mean) * s_variance * gamma_val.y + beta_val.y; output_ptr[blockIdx.x * (n >> 1) + tid * item_per_thread + i] = __float22half2_rn(local_out_fp2[i]); } } } template <typename T> void layernorm(const void *input, const void *gamma, const void *beta, void *output, const int& m, const int& n, const cudaStream_t stream) { dim3 grid(m); dim3 block; //n mast be less than 16384 assert(n <= 16384); if (n <= 1024) { block.x = ceil(n / (32.0 * 1)) * 32; // item_per_thread = 1 block.x = block.x / (4 / sizeof(T)); // if using half, only need half of block.x decoder_norm1_kernel_opt<1><<<grid, block, 0, stream>>>((T*)input, (T*)gamma, (T*)beta, (T*)output, m, n); } else if (n <= 2048) { block.x = ceil(n / (32.0 * 2)) * 32; // item_per_thread = 2 block.x = block.x / (4 / sizeof(T)); // if using half, only need half of block.x decoder_norm1_kernel_opt<2><<<grid, block, 0, stream>>>((T*)input, (T*)gamma, (T*)beta, (T*)output, m, n); } else if (n <= 4096) { block.x = ceil(n / (32.0 * 4)) * 32; // item_per_thread = 4 block.x = block.x / (4 / sizeof(T)); // if using half, only need half of block.x decoder_norm1_kernel_opt<4><<<grid, block, 0, stream>>>((T*)input, (T*)gamma, (T*)beta, (T*)output, m, n); } else if (n <= 8192) { block.x = ceil(n / (32.0 * 8)) * 32; // item_per_thread = 8 block.x = block.x / (4 / sizeof(T)); // if using half, only need half of block.x decoder_norm1_kernel_opt<8><<<grid, block, 0, stream>>>((T*)input, (T*)gamma, (T*)beta, (T*)output, m, n); } else if (n <= 16384) { block.x = ceil(n / (32.0 * 16)) * 32; // item_per_thread = 16 block.x = block.x / (4 / sizeof(T)); // if using half, only need half of block.x decoder_norm1_kernel_opt<16><<<grid, block, 0, stream>>>((T*)input,(T*)gamma, (T*)beta,(T*) output, m, n); } else { std::cout << "not support size for layernorm" << std::endl; } } template <class T> void add_bias_input_layernorm_kernel(void *output, const void *input, const void *bias, const void *gamma, const void *beta, const int& m, int& n, const cudaStream_t stream) { if (sizeof(T) == sizeof(float)) { dim3 grid(m); dim3 block(n); assert(n <= 1024); if (n == 768 || n == 1024) add_bias_input_layernorm_v2<<<grid, n / 4, 0, stream>>>((T*)output, (T*)input, (T*)bias, (T*)gamma, (T*)beta, n); else add_bias_input_layernorm<<<grid, block, 0, stream>>>((T*)output, (T*)input, (T*)bias, (T*)gamma, (T*)beta, m, n); } else { dim3 grid(m); dim3 block(n / 2); assert(n / 2 <= 1024); if (m >= 512 && (n == 768 || n == 1024)) add_bias_input_layernorm_v2<<<grid, n / 8, 0, stream>>>((T*)output, (T*)input, (T*)bias, (T*)gamma, (T*)beta, n); else add_bias_input_layernorm<<<grid, block, 0, stream>>>((T*)output, (T*)input, (T*)bias, (T*)gamma, (T*)beta, m, n); } } template void add_bias_input_layernorm_kernel<float>(void *output, const void *input, const void *bias, const void *gamma, const void *beta, const int& m, int& n, const cudaStream_t stream); template void add_bias_input_layernorm_kernel<half>(void *output, const void *input, const void *bias, const void *gamma, const void *beta, const int& m, int& n, const cudaStream_t stream); template void layernorm<float>(const void *input, const void *gamma, const void *beta, void *output, const int& m, const int& n,const cudaStream_t stream); template void layernorm<half>(const void *input, const void *gamma, const void *beta, void *output, const int& m, const int& n,const cudaStream_t stream);
the_stack
__global__ void sum_cols_%float_type%(%float_type% *A, %float_type% *out, const int increment, const int a0, const int a1) { const int t_i = threadIdx.y; const int t_j = threadIdx.x; const int dim_i = blockDim.y; const int dim_j = blockDim.x; const int col = dim_j*blockIdx.x + t_j; const int A_offset = t_i*a1 + col; const int data_offset = t_i*dim_j + t_j; extern __shared__ float shared_data[]; %float_type%* data = (%float_type%*)shared_data; // stage 1: loop threads across A to reduce to shared memory block const int step = dim_i*a1; const int limit = a0*a1; %float_type% sum = 0; int index = A_offset; for (int i=0; i < limit; i += step) { if (index < limit) sum += A[index]; index += step; } data[data_offset] = sum; // stage 2: reduction within block // note: assumes that dim_i is divisible by 2 for (int s=dim_i/2; s > 0; s>>=1) { __syncthreads(); /* if (t_i == 0 && t_j == 0) { printf("data: "); for (int i=0; i < blockDim.x*blockDim.y; i++) printf("%f ", data[i]); printf("\n"); } */ if (t_i < s) data[data_offset] += data[data_offset + s*dim_j]; } if (t_i == 0) { if (increment) out[col] += data[t_j]; else out[col] = data[t_j]; } } __global__ void iadd_%float_type%(%float_type% *A, %float_type% *v, const int a0, const int a1) { // in-place addition with broadcasting along first axis // (adding vector v to matrix A) const int row = blockDim.y*blockIdx.y + threadIdx.y; const int col = blockDim.x*blockIdx.x + threadIdx.x; if (row >= a0 || col >= a1) return; // load the appropriate part of v for this block into shared memory __shared__ %float_type% v_share[32]; if (threadIdx.y == 0) v_share[threadIdx.x] = v[col]; __syncthreads(); // add v to A A[row*a1 + col] += v_share[threadIdx.x]; } __global__ void multiply_%float_type%(%float_type% *A, %float_type% *B, %float_type% *out, const int size, const int increment) { // TODO: would it be faster to have each thread compute a couple entries? const int index = blockDim.x*blockIdx.x + threadIdx.x; if (index >= size) return; if (increment) out[index] += A[index] * B[index]; else out[index] = A[index] * B[index]; } __global__ void arange(long long *out, const int size, const long long start0, const int inc0, const long long start1, const int inc1, const long long start2, const int inc2) { const int index = blockDim.x*blockIdx.x + threadIdx.x; if (index >= size) return; if (blockIdx.y == 0) out[index] = start0 + index*inc0; else if(blockIdx.y == 1) out[index + size] = start1 + index*inc1; else out[index + (size<<1)] = start2 + index*inc2; } __global__ void shared_m_dot_%float_type%_%transpose_a%_%transpose_b%( %float_type% *A, %float_type% *B, %float_type% *C, const int a0, const int a1, const int b1, const int increment) { // multiplying an [a0,a1] matrix by an [a1,b1] matrix. each thread will // compute one element of c, which is a[i,:] * b[:,j]. however, we can // make this more efficient by e.g. loading a[i,:] into shared memory, and // then computing all the c[i,:] elements off that. we probably can't fit // all the necessary data into shared memory, so we load in a[i,:tile_len] // and b[:tile_len, j] sized chunks one at a time. // this doesn't necessarily need to be the case, but it seems to be most // efficient to make the tiles square (tile_len = blockDim.x = blockDim.y) // each thread is responsible for loading one cell from a and one cell // from b into shared memory. however, note that the cells a thread // loads into memory are not necessarily tied to the rows/cols it needs // to multiply to compute c[i,j] (they are disconnected so that we can // make sure that the memory loading is always occurring in the most // efficient way possible). // side length of square tile const int tile_len = blockDim.x; // thread variables // note: we use x to index cols and y to index rows because warps will be // clustered along x, which we want to be memory aligned to promote // coalescing const int t_i = threadIdx.y; const int t_j = threadIdx.x; // row/col for this block const int block_i = blockIdx.y*blockDim.y; const int block_j = blockIdx.x*blockDim.x; // row/col for this thread const int a_i = block_i + t_i; const int b_j = block_j + t_j; // if this thread is involved in computing a c[i,j] entry (it can still be // involved in loading data even if this is false) const bool active_c = a_i < a0 && b_j < b1; #if %transpose_a% // whether this thread is involved in loading data for the A tile const bool active_a = block_i + t_j < a0; // the index where this thread will load from const int A_off = block_i + t_i*a0 + t_j; // the distance along the tile axis that this thread loads from const int A_axis_off = t_i; #else const bool active_a = block_i + t_i < a0; const int A_off = (block_i + t_i)*a1 + t_j; const int A_axis_off = t_j; #endif #if %transpose_b% const bool active_b = block_j + t_i < b1; const int B_off = (block_j + t_i)*a1 + t_j; const int B_axis_off = t_j; #else const bool active_b = block_j + t_j < b1; const int B_off = block_j + t_i*b1 + t_j; const int B_axis_off = t_i; #endif // the index where this thread puts its data in the tile const int tile_off = t_i*(tile_len+1) + t_j; #if %transpose_a% // loop variables for outer loop (across tiles) const int outer_A_step = a0*tile_len; // loop variables for inner loop (within tile) const int inner_A_start = t_i; const int inner_A_step = tile_len+1; #else const int outer_A_step = tile_len; const int inner_A_start = t_i*(tile_len+1); const int inner_A_step = 1; #endif #if %transpose_b% const int outer_B_step = tile_len; const int inner_B_start = t_j*(tile_len+1); const int inner_B_step = 1; #else const int outer_B_step = b1*tile_len; const int inner_B_start = t_j; const int inner_B_step = tile_len+1; #endif // c will accumulate the value of c[i,j] across tiles const int C_off = a_i*b1 + b_j; %float_type% c = 0; /* if (a_i == 1 && b_j == 1) { printf("a_i %d, b_j %d \n", a_i, b_j); printf("transpose_b %d, increment %d \n", transpose_b, increment); printf("A_block_off %d, A_limit %d, outer_A_step %d \n", A_block_off, A_limit, tile_len); printf("A_off %d \n", A_off); printf("B_block_off %d, B_limit %d, outer_B_step %d \n", B_block_off, B_limit, outer_B_step); printf("B_off %d \n", B_off); printf("tile_off %d \n", tile_off); printf("inner_A_start %d, inner_A_end %d \n", inner_A_start, inner_A_end); printf("inner_B_start %d, inner_B_step %d \n", inner_B_start, inner_B_step); } */ // create the tiles // note: we add an extra column (which will just be zero) so that // when we are writing data in by column the writes are all offset, // reducing bank conflicts extern __shared__ float shared_data[]; %float_type%* A_tile = (%float_type%*)shared_data; %float_type%* B_tile = A_tile + (tile_len+1)*blockDim.y; // loop over the tiles // tile_i and tile_j point to the top left corner of the A/B tiles int tile_i = A_off; int tile_j = B_off; for (int tile=0; tile < a1; tile+=tile_len) { // each thread loads in its part of A/B // we need to check whether the location of this thread in the current // tile extends past the rows/cols of A and B, for the case when those // rows/cols are not evenly divisible by block len // TODO: we could create an "unsafe" version of this kernel that // assumes everything is evenly divisible by block len if (active_a && tile + A_axis_off < a1) A_tile[tile_off] = A[tile_i]; else A_tile[tile_off] = 0; if (active_b && tile + B_axis_off < a1) B_tile[tile_off] = B[tile_j]; else B_tile[tile_off] = 0; // wait for all threads to finish loading in their data __syncthreads(); /* if (a_i == 1 && b_j == 1) { printf("tile_i %d, tile_j %d \n", tile_i, tile_j); printf("tile_off %d \n", tile_off); printf(" %f %f \n", A[tile_i + A_off], B[tile_j + B_off]); printf(" %f %f \n", A_tile[tile_off], B_tile[tile_off]); for(int i=0; i < blockDim.x * tile_len; i++) printf("%f ", A_tile[i]); printf("\n"); for(int i=0; i < blockDim.y * tile_len; i++) printf("%f ", B_tile[i]); printf("\n"); printf("\n"); } */ // accumulate the product for this thread if (active_c) { int A_index = inner_A_start; int B_index = inner_B_start; for (int i = 0; i < tile_len; i++) { c += A_tile[A_index] * B_tile[B_index]; /* if (a_i == 1 && b_j == 1) { printf("%d %d \n", i, j); printf("%f * %f, %f \n", A_tile[i], B_tile[j], c); } */ A_index += inner_A_step; B_index += inner_B_step; } } tile_i += outer_A_step; tile_j += outer_B_step; // wait for all threads to finish their computation before loading // the next tile __syncthreads(); } if (active_c && increment) C[C_off] += c; else if (active_c) C[C_off] = c; } __global__ void mv_batched_%float_type%_%transpose_a%( %float_type% *A, %float_type% *v, %float_type% *out, const int a0, const int a1, const int increment) { // batched matrix-vector product const int t_i = threadIdx.y; const int t_j = threadIdx.x; const int dim_i = blockDim.y; const int dim_j = blockDim.x; // note: right now this code assumes that dim_i == dim_j // it also assumes that dim_i is evenly divisible by 2 // batch offset A += blockIdx.y * a0 * a1; v += blockIdx.y * a1; out += blockIdx.y * a0; extern __shared__ float shared_data[]; %float_type%* data = (%float_type%*)shared_data; %float_type%* v_share = data + dim_i*(dim_j+1); #if %transpose_a% const int start = dim_j*blockIdx.x; const int step = dim_i; const int offset_step = step*a0; const int limit = a0*a1; const int v_index = t_i; const int data_offset = t_i*(dim_j+1) + t_j; const bool active = dim_j*blockIdx.x + t_j < a0; const int block_offset = t_i*a0 + t_j; #else const int start = dim_i*blockIdx.x*a1; const int step = dim_j; const int offset_step = step; const int limit = start + (t_i+1)*a1; const int v_index = t_j; const int data_offset = t_j*(dim_j+1) + t_i; const bool active = dim_i*blockIdx.x + t_i < a0; const int block_offset = t_i*a1 + t_j; #endif %float_type% sum = 0; int block_index = start + block_offset; for (int i=0; i < a1; i+=step) { if (t_i == 0 && i + t_j < a1) v_share[t_j] = v[i + t_j]; __syncthreads(); if (active && block_index < limit) sum += A[block_index] * v_share[v_index]; block_index += offset_step; __syncthreads(); } data[data_offset] = sum; /* if (blockIdx.y == 3 && t_i == 15 && t_j == 0) { printf("v:"); for (int i=0; i < 32; i++) printf(" %f", v[i]); printf("\n"); printf("v_share:"); for (int i=0; i < 32; i++) printf(" %f", v_share[i]); printf("\n"); printf("data:"); for (int i=0; i < 1024; i++) printf(" %f", data[i]); printf("\n"); } */ // stage 2: reduction within block // note: we always order things in the block such that we can do a // column-wise reduction (to keep warps intact) const int out_offset = blockIdx.x*dim_j + t_j; if (out_offset >= a0) return; const int reduction_offset = t_i*(dim_j+1) + t_j; for (int s=dim_i/2; s > 0; s>>=1) { __syncthreads(); /* if (t_i == 0 && t_j == 0) { printf("data:"); for (int i=0; i < 32; i++) printf(" %f", data[i]); printf("\n"); } */ if (t_i < s) data[reduction_offset] += data[reduction_offset + s*(dim_j+1)]; } if (increment && t_i == 0) out[out_offset] += data[t_j]; else if (t_i == 0) out[out_offset] = data[t_j]; }
the_stack
typedef enum { SUM = 0, MEAN = 1, MAX = 2 } reduce_t; #define CHECK_CUDA(x) \ TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) \ TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) \ CHECK_CUDA(x); \ CHECK_CONTIGUOUS(x) namespace { int const threadsPerBlock = 512; int const maxGridDim = 50000; } // namespace __device__ __forceinline__ static void reduceMax(float *address, float val) { int *address_as_i = reinterpret_cast<int *>(address); int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __float_as_int(fmaxf(val, __int_as_float(assumed)))); } while (assumed != old || __int_as_float(old) < val); } __device__ __forceinline__ static void reduceMax(double *address, double val) { unsigned long long *address_as_ull = reinterpret_cast<unsigned long long *>(address); unsigned long long old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS( address_as_ull, assumed, __double_as_longlong(fmax(val, __longlong_as_double(assumed)))); } while (assumed != old || __longlong_as_double(old) < val); } // get rid of meaningless warnings when compiling host code #ifdef __CUDA_ARCH__ __device__ __forceinline__ static void reduceAdd(float *address, float val) { #if (__CUDA_ARCH__ < 200) #warning \ "compute capability lower than 2.x. fall back to use CAS version of atomicAdd for float32" int *address_as_i = reinterpret_cast<int *>(address); int old = *address_as_i, assumed; do { assumed = old; old = atomicCAS(address_as_i, assumed, __float_as_int(val + __int_as_float(assumed))); } while (assumed != old); #else atomicAdd(address, val); #endif } __device__ __forceinline__ static void reduceAdd(double *address, double val) { #if (__CUDA_ARCH__ < 600) #warning \ "compute capability lower than 6.x. fall back to use CAS version of atomicAdd for float64" unsigned long long *address_as_ull = reinterpret_cast<unsigned long long *>(address); unsigned long long old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); #else atomicAdd(address, val); #endif } #endif template <typename T_int> __global__ void coors_id_kernel(const T_int *coors, const T_int *dim, int64_t *coors_id, const int num_input, const int NDim) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; x += gridDim.x * blockDim.x) { const T_int *coor_x = coors + x * NDim; auto coor_id = 0; for (int i = 0; i < NDim && coor_id != -1; i++) { coor_id *= dim[i]; auto t = static_cast<int64_t>(coor_x[i]); coor_id = (t < 0) ? -1 : coor_id + t; } coors_id[x] = coor_id; } } template <typename T_int> __global__ void coors_map_init_kernel(const int64_t *coors_id, const T_int *coors_id_argsort, int32_t *coors_map, const int num_input) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; x += gridDim.x * blockDim.x) { auto here = coors_id[coors_id_argsort[x]]; if (x == 0) { if (here == -1) { // there is invalid points coors_map[0] = -1; } else { coors_map[0] = 0; } continue; } auto left = coors_id[coors_id_argsort[x - 1]]; coors_map[x] = (left < here) ? 1 : 0; } } template <typename T, typename T_int> __global__ void feats_reduce_kernel(const T *feats, const T_int *coors, int32_t *coors_map, int32_t *reduce_count, // shall be 0 at initialization T *reduced_feats, // shall be 0 at initialization T_int *out_coors, const int num_input, const int num_feats, const int NDim, const reduce_t reduce_type) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; x += gridDim.x * blockDim.x) { int32_t reduce_to = coors_map[x]; if (reduce_to == -1) continue; const T_int *coors_offset = coors + x * NDim; T_int *out_coors_offset = out_coors + reduce_to * NDim; for (int i = 0; i < NDim; i++) { out_coors_offset[i] = coors_offset[i]; } const T *feats_offset = feats + x * num_feats; T *reduced_feats_offset = reduced_feats + reduce_to * num_feats; if (reduce_type == reduce_t::MAX) { for (int i = 0; i < num_feats; i++) { reduceMax(&reduced_feats_offset[i], feats_offset[i]); } } else { if (reduce_type == reduce_t::MEAN) { atomicAdd(&reduce_count[reduce_to], static_cast<int32_t>(1)); } for (int i = 0; i < num_feats; i++) { reduceAdd(&reduced_feats_offset[i], feats_offset[i]); } } } } template <typename T> __global__ void add_reduce_traceback_grad_kernel( T *grad_feats, const T *grad_reduced_feats, const int32_t *coors_map, const int32_t *reduce_count, const int num_input, const int num_feats, const reduce_t reduce_type) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; x += gridDim.x * blockDim.x) { int32_t reduce_to = coors_map[x]; if (reduce_to == -1) { continue; } const int input_offset = x * num_feats; T *grad_feats_offset = grad_feats + input_offset; const int reduced_offset = reduce_to * num_feats; const T *grad_reduced_feats_offset = grad_reduced_feats + reduced_offset; if (reduce_type == reduce_t::SUM) { for (int i = 0; i < num_feats; i++) { grad_feats_offset[i] = grad_reduced_feats_offset[i]; } } else if (reduce_type == reduce_t::MEAN) { for (int i = 0; i < num_feats; i++) { grad_feats_offset[i] = grad_reduced_feats_offset[i] / static_cast<T>(reduce_count[reduce_to]); } } } } template <typename T> __global__ void max_reduce_traceback_scatter_idx_kernel( const T *feats, const T *reduced_feats, int32_t *reduce_from, const int32_t *coors_map, const int num_input, const int num_feats) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_input; x += gridDim.x * blockDim.x) { int32_t reduce_to = coors_map[x]; const int input_offset = x * num_feats; const T *feats_offset = feats + input_offset; if (reduce_to == -1) { continue; } const int reduced_offset = reduce_to * num_feats; const T *reduced_feats_offset = reduced_feats + reduced_offset; int32_t *reduce_from_offset = reduce_from + reduced_offset; for (int i = 0; i < num_feats; i++) { if (feats_offset[i] == reduced_feats_offset[i]) { atomicMin(&reduce_from_offset[i], static_cast<int32_t>(x)); } } } } template <typename T> __global__ void max_reduce_scatter_grad_kernel(T *grad_feats, const T *grad_reduced_feats, const int32_t *reduce_from, const int num_reduced, const int num_feats) { for (int x = blockIdx.x * blockDim.x + threadIdx.x; x < num_reduced; x += gridDim.x * blockDim.x) { const int reduced_offset = x * num_feats; const int32_t *scatter_to_offset = reduce_from + reduced_offset; const T *grad_reduced_feats_offset = grad_reduced_feats + reduced_offset; for (int i = 0; i < num_feats; i++) { grad_feats[scatter_to_offset[i] * num_feats + i] = grad_reduced_feats_offset[i]; } } } namespace voxelization { std::vector<at::Tensor> dynamic_point_to_voxel_forward_gpu(const at::Tensor &feats, const at::Tensor &coors, const reduce_t reduce_type) { CHECK_INPUT(feats); CHECK_INPUT(coors); const int NDim = coors.size(1); const int num_input = feats.size(0); const int num_feats = feats.size(1); auto coors_id = at::empty({num_input}, coors.options().dtype(torch::kInt64)); auto coor_space_dim = coors.max_values(0) + 1; auto coors_map_sorted = at::empty({num_input}, coors.options().dtype(torch::kInt32)); auto coors_map = at::empty({num_input}, coors.options().dtype(torch::kInt32)); auto num_coors = at::zeros({1}, coors.options().dtype(torch::kInt32)); AT_DISPATCH_INTEGRAL_TYPES( coors.scalar_type(), "coors_id_kernel", ([&] { dim3 blocks(std::min(at::cuda::ATenCeilDiv(num_input, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); coors_id_kernel<<<blocks, threads>>>( coors.data_ptr<scalar_t>(), coor_space_dim.data_ptr<scalar_t>(), coors_id.data_ptr<int64_t>(), num_input, NDim); })); AT_CUDA_CHECK(cudaGetLastError()); auto coors_id_argsort = coors_id.argsort(); AT_DISPATCH_INTEGRAL_TYPES( coors_id_argsort.scalar_type(), "coors_map_init_kernel", ([&] { dim3 blocks(std::min(at::cuda::ATenCeilDiv(num_input, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); coors_map_init_kernel<<<blocks, threads>>>( coors_id.data_ptr<int64_t>(), coors_id_argsort.data_ptr<scalar_t>(), coors_map_sorted.data_ptr<int32_t>(), num_input); })); AT_CUDA_CHECK(cudaGetLastError()); coors_map_sorted = coors_map_sorted.cumsum(0, torch::kInt32); coors_map.index_put_(coors_id_argsort, coors_map_sorted); const int num_coors_cpu = coors_map_sorted[-1].cpu().data_ptr<int32_t>()[0] + 1; auto out_coors = at::empty({num_coors_cpu, NDim}, coors.options()); auto reduced_feats = at::empty({num_coors_cpu, num_feats}, feats.options()); auto reduce_count = at::zeros({num_coors_cpu}, coors.options().dtype(torch::kInt32)); AT_DISPATCH_FLOATING_TYPES( feats.scalar_type(), "feats_reduce_kernel", ([&] { using F_t = scalar_t; AT_DISPATCH_INTEGRAL_TYPES( coors.scalar_type(), "feats_reduce_kernel", ([&] { using I_t = scalar_t; if (reduce_type == reduce_t::MAX) reduced_feats.fill_(-std::numeric_limits<F_t>::infinity()); else reduced_feats.fill_(static_cast<F_t>(0)); dim3 blocks( std::min(at::cuda::ATenCeilDiv(num_input, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); feats_reduce_kernel<<<blocks, threads>>>( feats.data_ptr<F_t>(), coors.data_ptr<I_t>(), coors_map.data_ptr<int32_t>(), reduce_count.data_ptr<int32_t>(), reduced_feats.data_ptr<F_t>(), out_coors.data_ptr<I_t>(), num_input, num_feats, NDim, reduce_type); if (reduce_type == reduce_t::MEAN) reduced_feats /= reduce_count.unsqueeze(-1).to(reduced_feats.dtype()); })); })); AT_CUDA_CHECK(cudaGetLastError()); return {reduced_feats, out_coors, coors_map, reduce_count}; } void dynamic_point_to_voxel_backward_gpu( at::Tensor &grad_feats, const at::Tensor &grad_reduced_feats, const at::Tensor &feats, const at::Tensor &reduced_feats, const at::Tensor &coors_map, const at::Tensor &reduce_count, const reduce_t reduce_type) { CHECK_INPUT(grad_feats); CHECK_INPUT(grad_reduced_feats); CHECK_INPUT(feats); CHECK_INPUT(reduced_feats); CHECK_INPUT(coors_map); CHECK_INPUT(reduce_count); const int num_input = feats.size(0); const int num_reduced = reduced_feats.size(0); const int num_feats = feats.size(1); grad_feats.fill_(0); // copy voxel grad to points if (reduce_type == reduce_t::MEAN || reduce_type == reduce_t::SUM) { AT_DISPATCH_FLOATING_TYPES( grad_reduced_feats.scalar_type(), "add_reduce_traceback_grad_kernel", ([&] { dim3 blocks (std::min(at::cuda::ATenCeilDiv(num_input, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); add_reduce_traceback_grad_kernel<<<blocks, threads>>>( grad_feats.data_ptr<scalar_t>(), grad_reduced_feats.data_ptr<scalar_t>(), coors_map.data_ptr<int32_t>(), reduce_count.data_ptr<int32_t>(), num_input, num_feats, reduce_type); })); AT_CUDA_CHECK(cudaGetLastError()); } else { auto reduce_from = at::full({num_reduced, num_feats}, num_input, coors_map.options().dtype(torch::kInt32)); AT_DISPATCH_FLOATING_TYPES( grad_reduced_feats.scalar_type(), "max_reduce_traceback_scatter_idx_kernel", ([&] { dim3 blocks (std::min(at::cuda::ATenCeilDiv(num_input, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); max_reduce_traceback_scatter_idx_kernel<<<blocks, threads>>>( feats.data_ptr<scalar_t>(), reduced_feats.data_ptr<scalar_t>(), reduce_from.data_ptr<int32_t>(), coors_map.data_ptr<int32_t>(), num_input, num_feats); })); AT_CUDA_CHECK(cudaGetLastError()); AT_DISPATCH_FLOATING_TYPES( grad_reduced_feats.scalar_type(), "max_reduce_traceback_scatter_idx_kernel", ([&] { dim3 blocks( std::min(at::cuda::ATenCeilDiv(num_reduced, threadsPerBlock), maxGridDim)); dim3 threads(threadsPerBlock); max_reduce_scatter_grad_kernel<<<blocks, threads>>>( grad_feats.data_ptr<scalar_t>(), grad_reduced_feats.data_ptr<scalar_t>(), reduce_from.data_ptr<int32_t>(), num_reduced, num_feats); })); AT_CUDA_CHECK(cudaGetLastError()); } return; } } // namespace voxelization
the_stack
#include <math.h> // required for fabs() #include <float.h> // required for DBL_EPSILON // Externally Defined Routines // extern "C" __host__ __device__ double xChebyshev_Tn_Series(double x, const double a[], int degree); // Internally Defined Routines // __host__ __device__ double Fresnel_Auxiliary_Sine_Integral( double x ); __host__ __device__ double xFresnel_Auxiliary_Sine_Integral( double x ); __host__ __device__ static double Chebyshev_Expansion_0_1(double x); __host__ __device__ static double Chebyshev_Expansion_1_3(double x); __host__ __device__ static double Chebyshev_Expansion_3_5(double x); __host__ __device__ static double Chebyshev_Expansion_5_7(double x); __host__ __device__ static double Asymptotic_Series( double x ); // Internally Defined Constants // static double const sqrt_2pi = 2.506628274631000502415765284811045253006; //////////////////////////////////////////////////////////////////////////////// // double xFresnel_Auxiliary_Sine_Integral( double x ) // // // // Description: // // The Fresnel auxiliary sine integral, g(x), is the integral from 0 to // // infinity of the integrand // // sqrt(2/pi) exp(-2xt) sin(t^2) dt // // where x >= 0. // // // // Arguments: // // double x The argument of the Fresnel auxiliary sine integral // // g() where x >= 0. // // // // Return Value: // // The value of the Fresnel auxiliary sine integral g evaluated at // // x >= 0. // // // // Example: // // double y, x; // // // // ( code to initialize x ) // // // // y = xFresnel_Auxiliary_Sine_Integral( x ); // //////////////////////////////////////////////////////////////////////////////// __host__ __device__ double xFresnel_Auxiliary_Sine_Integral( double x ) { if (x == 0.0) return 0.5; if (x <= 1.0) return Chebyshev_Expansion_0_1(x); if (x <= 3.0) return Chebyshev_Expansion_1_3(x); if (x <= 5.0) return Chebyshev_Expansion_3_5(x); if (x <= 7.0) return Chebyshev_Expansion_5_7(x); return Asymptotic_Series( x ); } //////////////////////////////////////////////////////////////////////////////// // static double Chebyshev_Expansion_0_1( double x ) // // // // Description: // // Evaluate the Fresnel auxiliary sine integral, g(x), on the interval // // 0 < x <= 1 using the Chebyshev interpolation formula. // // // // Arguments: // // double x The argument of the Fresnel auxiliary sine integral // // where 0 < x <= 1. // // // // Return Value: // // The value of the Fresnel auxiliary sine integral g evaluated at // // x where 0 < x <= 1. // // // // Example: // // double y, x; // // // // ( code to initialize x ) // // // // y = Chebyshev_Expansion_0_1(x); // //////////////////////////////////////////////////////////////////////////////// __host__ __device__ static double Chebyshev_Expansion_0_1( double x ) { static double const c[] = { +2.560134650043040830997e-1, -1.993005146464943284549e-1, +4.025503636721387266117e-2, -4.459600454502960250729e-3, +6.447097305145147224459e-5, +7.544218493763717599380e-5, -1.580422720690700333493e-5, +1.755845848573471891519e-6, -9.289769688468301734718e-8, -5.624033192624251079833e-9, +1.854740406702369495830e-9, -2.174644768724492443378e-10, +1.392899828133395918767e-11, -6.989216003725983789869e-14, -9.959396121060010838331e-14, +1.312085140393647257714e-14, -9.240470383522792593305e-16, +2.472168944148817385152e-17, +2.834615576069400293894e-18, -4.650983461314449088349e-19, +3.544083040732391556797e-20 }; static const int degree = sizeof(c) / sizeof(double) - 1; static const double midpoint = 0.5; static const double scale = 0.5; return xChebyshev_Tn_Series( (x - midpoint) / scale, c, degree ); } //////////////////////////////////////////////////////////////////////////////// // static double Chebyshev_Expansion_1_3( double x ) // // // // Description: // // Evaluate the Fresnel auxiliary sine integral, g(x), on the interval // // 1 < x <= 3 using the Chebyshev interpolation formula. // // // // Arguments: // // double x The argument of the Fresnel auxiliary sine integral // // where 1 < x <= 3. // // // // Return Value: // // The value of the Fresnel auxiliary sine integral g evaluated at // // x where 1 < x <= 3. // // // // Example: // // double y, x; // // // // ( code to initialize x ) // // // // y = Chebyshev_Expansion_1_3(x); // //////////////////////////////////////////////////////////////////////////////// __host__ __device__ static double Chebyshev_Expansion_1_3( double x ) { static double const c[] = { +3.470341566046115476477e-2, -3.855580521778624043304e-2, +1.420604309383996764083e-2, -4.037349972538938202143e-3, +9.292478174580997778194e-4, -1.742730601244797978044e-4, +2.563352976720387343201e-5, -2.498437524746606551732e-6, -1.334367201897140224779e-8, +7.436854728157752667212e-8, -2.059620371321272169176e-8, +3.753674773239250330547e-9, -5.052913010605479996432e-10, +4.580877371233042345794e-11, -7.664740716178066564952e-13, -7.200170736686941995387e-13, +1.812701686438975518372e-13, -2.799876487275995466163e-14, +3.048940815174731772007e-15, -1.936754063718089166725e-16, -7.653673328908379651914e-18, +4.534308864750374603371e-18, -8.011054486030591219007e-19, +9.374587915222218230337e-20, -7.144943099280650363024e-21, +1.105276695821552769144e-22, +6.989334213887669628647e-23 }; static const int degree = sizeof(c) / sizeof(double) - 1; static const double midpoint = 2.0; return xChebyshev_Tn_Series( (x - midpoint), c, degree ); } //////////////////////////////////////////////////////////////////////////////// // static double Chebyshev_Expansion_3_5( double x ) // // // // Description: // // Evaluate the Fresnel auxiliary sine integral, g(x), on the interval // // 3 < x <= 5 using the Chebyshev interpolation formula. // // // // Arguments: // // double x The argument of the Fresnel auxiliary sine integral // // where 3 < x <= 5. // // // // Return Value: // // The value of the Fresnel auxiliary sine integral g evaluated at // // x where 3 < x <= 5. // // // // Example: // // double y, x; // // // // ( code to initialize x ) // // // // y = Chebyshev_Expansion_3_5(x); // //////////////////////////////////////////////////////////////////////////////// __host__ __device__ static double Chebyshev_Expansion_3_5( double x ) { static double const c[] = { +3.684922395955255848372e-3, -2.624595437764014386717e-3, +6.329162500611499391493e-4, -1.258275676151483358569e-4, +2.207375763252044217165e-5, -3.521929664607266176132e-6, +5.186211398012883705616e-7, -7.095056569102400546407e-8, +9.030550018646936241849e-9, -1.066057806832232908641e-9, +1.157128073917012957550e-10, -1.133877461819345992066e-11, +9.633572308791154852278e-13, -6.336675771012312827721e-14, +1.634407356931822107368e-15, +3.944542177576016972249e-16, -9.577486627424256130607e-17, +1.428772744117447206807e-17, -1.715342656474756703926e-18, +1.753564314320837957805e-19, -1.526125102356904908532e-20, +1.070275366865736879194e-21, -4.783978662888842165071e-23 }; static const int degree = sizeof(c) / sizeof(double) - 1; static const double midpoint = 4.0; return xChebyshev_Tn_Series( (x - midpoint), c, degree ); } //////////////////////////////////////////////////////////////////////////////// // static double Chebyshev_Expansion_5_7( double x ) // // // // Description: // // Evaluate the Fresnel auxiliary sine integral, g(x), on the interval // // 5 < x <= 7 using the Chebyshev interpolation formula. // // // // Arguments: // // double x The argument of the Fresnel auxiliary sine integral // // where 5 < x <= 7. // // // // Return Value: // // The value of the Fresnel auxiliary sine integral g evaluated at // // x where 5 < x <= 7. // // // // Example: // // double y, x; // // // // ( code to initialize x ) // // // // y = Chebyshev_Expansion_5_7(x); // //////////////////////////////////////////////////////////////////////////////// __host__ __device__ static double Chebyshev_Expansion_5_7( double x ) { static double const c[] = { +1.000801217561417083840e-3, -4.915205279689293180607e-4, +8.133163567827942356534e-5, -1.120758739236976144656e-5, +1.384441872281356422699e-6, -1.586485067224130537823e-7, +1.717840749804993618997e-8, -1.776373217323590289701e-9, +1.765399783094380160549e-10, -1.692470022450343343158e-11, +1.568238301528778401489e-12, -1.405356860742769958771e-13, +1.217377701691787512346e-14, -1.017697418261094517680e-15, +8.186068056719295045596e-17, -6.305153620995673221364e-18, +4.614110100197028845266e-19, -3.165914620159266813849e-20, +1.986716456911232767045e-21, -1.078418278174434671506e-22, +4.255983404468350776788e-24 }; static const int degree = sizeof(c) / sizeof(double) - 1; static const double midpoint = 6.0; return xChebyshev_Tn_Series( (x - midpoint), c, degree ); } //////////////////////////////////////////////////////////////////////////////// // static double Asymptotic_Series( double x ) // // // // Description: // // For a large argument x, the auxiliary Fresnel sine integral, g(x), // // can be expressed as the asymptotic series // // g(x) ~ 1/(x^3 * sqrt(8pi))[1 - 15/4x^4 + 945/16x^8 + ... + // // (4j+1)!!/(-4x^4)^j + ... ] // // // // Arguments: // // double x The argument of the Fresnel auxiliary sine integral // // where x > 7. // // // // Return Value: // // The value of the Fresnel auxiliary sine integral g evaluated at // // x where x > 7. // // // // Example: // // double y, x; // // // // ( code to initialize x ) // // // // y = Asymptotic_Series( x ); // //////////////////////////////////////////////////////////////////////////////// #define NUM_ASYMPTOTIC_TERMS 35 __host__ __device__ static double Asymptotic_Series( double x ) { double x2 = x * x; double x4 = -4.0 * x2 * x2; double xn = 1.0; double factorial = 1.0; double g = 0.0; double term[NUM_ASYMPTOTIC_TERMS + 1]; double epsilon = DBL_EPSILON / 4.0; int j = 5; int i = 0; term[0] = 1.0; term[NUM_ASYMPTOTIC_TERMS] = 0.0; for (i = 1; i < NUM_ASYMPTOTIC_TERMS; i++) { factorial *= ( (double)j * (double)(j - 2)); xn *= x4; term[i] = factorial / xn; j += 4; if (fabs(term[i]) >= fabs(term[i-1])) { i--; break; } if (fabs(term[i]) <= epsilon) break; } for (; i >= 0; i--) g += term[i]; g /= ( x * sqrt_2pi); return g / (x2 + x2); }
the_stack
* Test of BlockRadixSort utilities ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <stdio.h> #include <algorithm> #include <iostream> #include <cub/block/block_radix_sort.cuh> #include <cub/block/block_load.cuh> #include <cub/block/block_store.cuh> #include <cub/util_allocator.cuh> #include "test_util.h" using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- bool g_verbose = false; CachingDeviceAllocator g_allocator(true); //--------------------------------------------------------------------- // Test kernels //--------------------------------------------------------------------- /// Specialized descending, blocked -> blocked template <int BLOCK_THREADS, typename BlockRadixSort, int ITEMS_PER_THREAD, typename Key, typename Value> __device__ __forceinline__ void TestBlockSort( typename BlockRadixSort::TempStorage &temp_storage, Key (&keys)[ITEMS_PER_THREAD], Value (&values)[ITEMS_PER_THREAD], Key *d_keys, Value *d_values, int begin_bit, int end_bit, clock_t &stop, Int2Type<true> is_descending, Int2Type<true> is_blocked_output) { BlockRadixSort(temp_storage).SortDescending(keys, values, begin_bit, end_bit); stop = clock(); StoreDirectBlocked(threadIdx.x, d_keys, keys); StoreDirectBlocked(threadIdx.x, d_values, values); } /// Specialized descending, blocked -> striped template <int BLOCK_THREADS, typename BlockRadixSort, int ITEMS_PER_THREAD, typename Key, typename Value> __device__ __forceinline__ void TestBlockSort( typename BlockRadixSort::TempStorage &temp_storage, Key (&keys)[ITEMS_PER_THREAD], Value (&values)[ITEMS_PER_THREAD], Key *d_keys, Value *d_values, int begin_bit, int end_bit, clock_t &stop, Int2Type<true> is_descending, Int2Type<false> is_blocked_output) { BlockRadixSort(temp_storage).SortDescendingBlockedToStriped(keys, values, begin_bit, end_bit); stop = clock(); StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_keys, keys); StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_values, values); } /// Specialized ascending, blocked -> blocked template <int BLOCK_THREADS, typename BlockRadixSort, int ITEMS_PER_THREAD, typename Key, typename Value> __device__ __forceinline__ void TestBlockSort( typename BlockRadixSort::TempStorage &temp_storage, Key (&keys)[ITEMS_PER_THREAD], Value (&values)[ITEMS_PER_THREAD], Key *d_keys, Value *d_values, int begin_bit, int end_bit, clock_t &stop, Int2Type<false> is_descending, Int2Type<true> is_blocked_output) { BlockRadixSort(temp_storage).Sort(keys, values, begin_bit, end_bit); stop = clock(); StoreDirectBlocked(threadIdx.x, d_keys, keys); StoreDirectBlocked(threadIdx.x, d_values, values); } /// Specialized ascending, blocked -> striped template <int BLOCK_THREADS, typename BlockRadixSort, int ITEMS_PER_THREAD, typename Key, typename Value> __device__ __forceinline__ void TestBlockSort( typename BlockRadixSort::TempStorage &temp_storage, Key (&keys)[ITEMS_PER_THREAD], Value (&values)[ITEMS_PER_THREAD], Key *d_keys, Value *d_values, int begin_bit, int end_bit, clock_t &stop, Int2Type<false> is_descending, Int2Type<false> is_blocked_output) { BlockRadixSort(temp_storage).SortBlockedToStriped(keys, values, begin_bit, end_bit); stop = clock(); StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_keys, keys); StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_values, values); } /** * BlockRadixSort kernel */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, int RADIX_BITS, bool MEMOIZE_OUTER_SCAN, BlockScanAlgorithm INNER_SCAN_ALGORITHM, cudaSharedMemConfig SMEM_CONFIG, int DESCENDING, int BLOCKED_OUTPUT, typename Key, typename Value> __launch_bounds__ (BLOCK_THREADS, 1) __global__ void Kernel( Key *d_keys, Value *d_values, int begin_bit, int end_bit, clock_t *d_elapsed) { // Threadblock load/store abstraction types typedef BlockRadixSort< Key, BLOCK_THREADS, ITEMS_PER_THREAD, Value, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG> BlockRadixSortT; // Allocate temp storage in shared memory __shared__ typename BlockRadixSortT::TempStorage temp_storage; // Items per thread Key keys[ITEMS_PER_THREAD]; Value values[ITEMS_PER_THREAD]; LoadDirectBlocked(threadIdx.x, d_keys, keys); LoadDirectBlocked(threadIdx.x, d_values, values); // Start cycle timer clock_t stop; clock_t start = clock(); TestBlockSort<BLOCK_THREADS, BlockRadixSortT>( temp_storage, keys, values, d_keys, d_values, begin_bit, end_bit, stop, Int2Type<DESCENDING>(), Int2Type<BLOCKED_OUTPUT>()); // Store time if (threadIdx.x == 0) *d_elapsed = (start > stop) ? start - stop : stop - start; } //--------------------------------------------------------------------- // Host testing subroutines //--------------------------------------------------------------------- /** * Simple key-value pairing */ template < typename Key, typename Value, bool IS_FLOAT = (Traits<Key>::CATEGORY == FLOATING_POINT)> struct Pair { Key key; Value value; bool operator<(const Pair &b) const { return (key < b.key); } }; /** * Simple key-value pairing (specialized for floating point types) */ template <typename Key, typename Value> struct Pair<Key, Value, true> { Key key; Value value; bool operator<(const Pair &b) const { if (key < b.key) return true; if (key > b.key) return false; // Key in unsigned bits typedef typename Traits<Key>::UnsignedBits UnsignedBits; // Return true if key is negative zero and b.key is positive zero UnsignedBits key_bits = *reinterpret_cast<UnsignedBits*>(const_cast<Key*>(&key)); UnsignedBits b_key_bits = *reinterpret_cast<UnsignedBits*>(const_cast<Key*>(&b.key)); UnsignedBits HIGH_BIT = Traits<Key>::HIGH_BIT; return ((key_bits & HIGH_BIT) != 0) && ((b_key_bits & HIGH_BIT) == 0); } }; /** * Initialize key-value sorting problem. */ template <bool DESCENDING, typename Key, typename Value> void Initialize( GenMode gen_mode, Key *h_keys, Value *h_values, Key *h_reference_keys, Value *h_reference_values, int num_items, int entropy_reduction, int begin_bit, int end_bit) { Pair<Key, Value> *h_pairs = new Pair<Key, Value>[num_items]; for (int i = 0; i < num_items; ++i) { InitValue(gen_mode, h_keys[i], i); RandomBits(h_values[i]); // Mask off unwanted portions int num_bits = end_bit - begin_bit; if ((begin_bit > 0) || (end_bit < sizeof(Key) * 8)) { unsigned long long base = 0; memcpy(&base, &h_keys[i], sizeof(Key)); base &= ((1ull << num_bits) - 1) << begin_bit; memcpy(&h_keys[i], &base, sizeof(Key)); } h_pairs[i].key = h_keys[i]; h_pairs[i].value = h_values[i]; } if (DESCENDING) std::reverse(h_pairs, h_pairs + num_items); std::stable_sort(h_pairs, h_pairs + num_items); if (DESCENDING) std::reverse(h_pairs, h_pairs + num_items); for (int i = 0; i < num_items; ++i) { h_reference_keys[i] = h_pairs[i].key; h_reference_values[i] = h_pairs[i].value; } delete[] h_pairs; } /** * Test BlockRadixSort kernel */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, int RADIX_BITS, bool MEMOIZE_OUTER_SCAN, BlockScanAlgorithm INNER_SCAN_ALGORITHM, cudaSharedMemConfig SMEM_CONFIG, bool DESCENDING, bool BLOCKED_OUTPUT, typename Key, typename Value> void TestDriver( GenMode gen_mode, int entropy_reduction, int begin_bit, int end_bit) { enum { TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD, KEYS_ONLY = Equals<Value, NullType>::VALUE, }; // Allocate host arrays Key *h_keys = new Key[TILE_SIZE]; Key *h_reference_keys = new Key[TILE_SIZE]; Value *h_values = new Value[TILE_SIZE]; Value *h_reference_values = new Value[TILE_SIZE]; // Allocate device arrays Key *d_keys = NULL; Value *d_values = NULL; clock_t *d_elapsed = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_keys, sizeof(Key) * TILE_SIZE)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_values, sizeof(Value) * TILE_SIZE)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t))); // Initialize problem and solution on host Initialize<DESCENDING>(gen_mode, h_keys, h_values, h_reference_keys, h_reference_values, TILE_SIZE, entropy_reduction, begin_bit, end_bit); // Copy problem to device CubDebugExit(cudaMemcpy(d_keys, h_keys, sizeof(Key) * TILE_SIZE, cudaMemcpyHostToDevice)); CubDebugExit(cudaMemcpy(d_values, h_values, sizeof(Value) * TILE_SIZE, cudaMemcpyHostToDevice)); printf("%s " "BLOCK_THREADS(%d) " "ITEMS_PER_THREAD(%d) " "RADIX_BITS(%d) " "MEMOIZE_OUTER_SCAN(%d) " "INNER_SCAN_ALGORITHM(%d) " "SMEM_CONFIG(%d) " "DESCENDING(%d) " "BLOCKED_OUTPUT(%d) " "sizeof(Key)(%d) " "sizeof(Value)(%d) " "gen_mode(%d), " "entropy_reduction(%d) " "begin_bit(%d) " "end_bit(%d), " "samples(%d)\n", ((KEYS_ONLY) ? "Keys-only" : "Key-value"), BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, DESCENDING, BLOCKED_OUTPUT, (int) sizeof(Key), (int) sizeof(Value), gen_mode, entropy_reduction, begin_bit, end_bit, g_num_rand_samples); // Set shared memory config cudaDeviceSetSharedMemConfig(SMEM_CONFIG); // Run kernel Kernel<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, DESCENDING, BLOCKED_OUTPUT><<<1, BLOCK_THREADS>>>( d_keys, d_values, begin_bit, end_bit, d_elapsed); // Flush kernel output / errors CubDebugExit(cudaPeekAtLastError()); CubDebugExit(cudaDeviceSynchronize()); // Check keys results printf("\tKeys: "); int compare = CompareDeviceResults(h_reference_keys, d_keys, TILE_SIZE, g_verbose, g_verbose); printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); // Check value results if (!KEYS_ONLY) { printf("\tValues: "); int compare = CompareDeviceResults(h_reference_values, d_values, TILE_SIZE, g_verbose, g_verbose); printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); } printf("\n"); printf("\tElapsed clocks: "); DisplayDeviceResults(d_elapsed, 1); printf("\n"); // Cleanup if (h_keys) delete[] h_keys; if (h_reference_keys) delete[] h_reference_keys; if (h_values) delete[] h_values; if (h_reference_values) delete[] h_reference_values; if (d_keys) CubDebugExit(g_allocator.DeviceFree(d_keys)); if (d_values) CubDebugExit(g_allocator.DeviceFree(d_values)); if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed)); } /** * Test driver (valid tile size <= MAX_SMEM_BYTES) */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, int RADIX_BITS, bool MEMOIZE_OUTER_SCAN, BlockScanAlgorithm INNER_SCAN_ALGORITHM, cudaSharedMemConfig SMEM_CONFIG, bool DESCENDING, bool BLOCKED_OUTPUT, typename Key, typename Value> void TestValid(Int2Type<true> fits_smem_capacity) { // Iterate begin_bit for (int begin_bit = 0; begin_bit <= 1; begin_bit++) { // Iterate end bit for (int end_bit = begin_bit + 1; end_bit <= sizeof(Key) * 8; end_bit = end_bit * 2 + begin_bit) { // Uniform key distribution TestDriver<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, DESCENDING, BLOCKED_OUTPUT, Key, Value>( UNIFORM, 0, begin_bit, end_bit); // Sequential key distribution TestDriver<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, DESCENDING, BLOCKED_OUTPUT, Key, Value>( INTEGER_SEED, 0, begin_bit, end_bit); // Iterate random with entropy_reduction for (int entropy_reduction = 0; entropy_reduction <= 9; entropy_reduction += 3) { TestDriver<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, DESCENDING, BLOCKED_OUTPUT, Key, Value>( RANDOM, entropy_reduction, begin_bit, end_bit); } } } } /** * Test driver (invalid tile size) */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, int RADIX_BITS, bool MEMOIZE_OUTER_SCAN, BlockScanAlgorithm INNER_SCAN_ALGORITHM, cudaSharedMemConfig SMEM_CONFIG, bool DESCENDING, bool BLOCKED_OUTPUT, typename Key, typename Value> void TestValid(Int2Type<false> fits_smem_capacity) {} /** * Test ascending/descending and to-blocked/to-striped */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, int RADIX_BITS, bool MEMOIZE_OUTER_SCAN, BlockScanAlgorithm INNER_SCAN_ALGORITHM, cudaSharedMemConfig SMEM_CONFIG, typename Key, typename Value> void Test() { // Check size of smem storage for the target arch to make sure it will fit typedef BlockRadixSort<Key, BLOCK_THREADS, ITEMS_PER_THREAD, Value, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG> BlockRadixSortT; #if defined(SM100) || defined(SM110) || defined(SM130) Int2Type<sizeof(typename BlockRadixSortT::TempStorage) <= 16 * 1024> fits_smem_capacity; #else Int2Type<(sizeof(typename BlockRadixSortT::TempStorage) <= 48 * 1024)> fits_smem_capacity; #endif // Sort-ascending, to-striped TestValid<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, true, false, Key, Value>(fits_smem_capacity); // Sort-descending, to-blocked TestValid<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, false, true, Key, Value>(fits_smem_capacity); // Not necessary // TestValid<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, false, false, Key, Value>(fits_smem_capacity); // TestValid<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, SMEM_CONFIG, true, true, Key, Value>(fits_smem_capacity); } /** * Test value type and smem config */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, int RADIX_BITS, bool MEMOIZE_OUTER_SCAN, BlockScanAlgorithm INNER_SCAN_ALGORITHM, typename Key> void TestKeys() { // Test keys-only sorting with both smem configs Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, cudaSharedMemBankSizeFourByte, Key, NullType>(); // Keys-only (4-byte smem bank config) #if !defined(SM100) && !defined(SM110) && !defined(SM130) && !defined(SM200) Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, cudaSharedMemBankSizeEightByte, Key, NullType>(); // Keys-only (8-byte smem bank config) #endif } /** * Test value type and smem config */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, int RADIX_BITS, bool MEMOIZE_OUTER_SCAN, BlockScanAlgorithm INNER_SCAN_ALGORITHM, typename Key> void TestKeysAndPairs() { // Test pairs sorting with only 4-byte configs Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, cudaSharedMemBankSizeFourByte, Key, char>(); // With small-values Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, cudaSharedMemBankSizeFourByte, Key, Key>(); // With same-values Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, cudaSharedMemBankSizeFourByte, Key, TestFoo>(); // With large values } /** * Test key type */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, int RADIX_BITS, bool MEMOIZE_OUTER_SCAN, BlockScanAlgorithm INNER_SCAN_ALGORITHM> void Test() { // Get ptx version int ptx_version; CubDebugExit(PtxVersion(ptx_version)); #ifdef TEST_KEYS_ONLY // Test unsigned types with keys-only TestKeys<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, unsigned char>(); TestKeys<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, unsigned short>(); TestKeys<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, unsigned int>(); TestKeys<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, unsigned long>(); TestKeys<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, unsigned long long>(); #else // Test signed and fp types with paired values TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, char>(); TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, short>(); TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, int>(); TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, long>(); TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, long long>(); TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, float>(); if (ptx_version > 120) { // Don't check doubles on PTX120 or below because they're down-converted TestKeysAndPairs<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, INNER_SCAN_ALGORITHM, double>(); } #endif } /** * Test inner scan algorithm */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, int RADIX_BITS, bool MEMOIZE_OUTER_SCAN> void Test() { Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, BLOCK_SCAN_RAKING>(); Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, MEMOIZE_OUTER_SCAN, BLOCK_SCAN_WARP_SCANS>(); } /** * Test outer scan algorithm */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, int RADIX_BITS> void Test() { Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, true>(); Test<BLOCK_THREADS, ITEMS_PER_THREAD, RADIX_BITS, false>(); } /** * Test radix bits */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD> void Test() { Test<BLOCK_THREADS, ITEMS_PER_THREAD, 1>(); Test<BLOCK_THREADS, ITEMS_PER_THREAD, 2>(); Test<BLOCK_THREADS, ITEMS_PER_THREAD, 5>(); } /** * Test items per thread */ template <int BLOCK_THREADS> void Test() { Test<BLOCK_THREADS, 1>(); #if defined(SM100) || defined(SM110) || defined(SM130) // Open64 compiler can't handle the number of test cases #else Test<BLOCK_THREADS, 4>(); #endif Test<BLOCK_THREADS, 11>(); } /** * Main */ int main(int argc, char** argv) { // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--device=<device-id>] " "[--v] " "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); #ifdef QUICK_TEST { typedef float T; TestDriver<32, 4, 4, true, BLOCK_SCAN_WARP_SCANS, cudaSharedMemBankSizeFourByte, false, false, T, NullType>(INTEGER_SEED, 0, 0, sizeof(T) * 8); } /* // Compile/run quick tests typedef unsigned int T; TestDriver<64, 17, 4, true, BLOCK_SCAN_WARP_SCANS, cudaSharedMemBankSizeFourByte, false, false, T, NullType>(RANDOM, 0, 0, sizeof(T) * 8); TestDriver<96, 8, 4, true, BLOCK_SCAN_WARP_SCANS, cudaSharedMemBankSizeFourByte, false, false, T, NullType>(RANDOM, 0, 0, sizeof(T) * 8); TestDriver<128, 2, 4, true, BLOCK_SCAN_WARP_SCANS, cudaSharedMemBankSizeFourByte, false, false, T, NullType>(RANDOM, 0, 0, sizeof(T) * 8); */ #else // Compile/run thorough tests Test<32>(); Test<64>(); Test<160>(); #endif // QUICK_TEST return 0; }
the_stack
#include <algorithm> #include <cmath> #include "amir_cuda_util/cuda_util.h" using namespace amirstan::cuda; template <typename scalar_t> __device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { const int h_low = floor(h); const int w_low = floor(w); const int h_high = h_low + 1; const int w_high = w_low + 1; const scalar_t lh = h - h_low; const scalar_t lw = w - w_low; const scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __global__ void deformable_im2col_gpu_kernel( const int n, const scalar_t *data_im, const scalar_t *data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; // const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * // height + h_in) * width + w_in; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { // const scalar_t map_h = i * dilation_h + offset_h; // const scalar_t map_w = j * dilation_w + offset_w; // const int cur_height = height - h_in; // const int cur_width = width - w_in; // val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, // cur_width, map_h, map_w); val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } template <typename scalar_t> __device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __global__ void modulated_deformable_im2col_gpu_kernel( const int n, const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; // const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * // height + h_in) * width + w_in; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; // if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { // const float map_h = i * dilation_h + offset_h; // const float map_w = j * dilation_w + offset_w; // const int cur_height = height - h_in; // const int cur_width = width - w_in; // val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, // cur_width, map_h, map_w); val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; // data_col_ptr += height_col * width_col; } } } } template <typename scalar_t> __global__ void output_add_bias_kernel(scalar_t *output, scalar_t *bias, size_t step_batch, size_t step_channel, size_t n) { CUDA_KERNEL_LOOP(index, n) { output[index] += bias[(index % step_batch) / step_channel]; } } void deformable_im2col(float *data_input, float *data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, float *data_col, cudaStream_t stream) { // num_axes should be smaller than block size // todo: check parallel_imgs is correctly passed in int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; deformable_im2col_gpu_kernel<float> <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_input, data_offset, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_im2col_cuda( const float *data_im_, const float *data_offset_, const float *data_mask_, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float *data_col_, cudaStream_t stream) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; modulated_deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col_); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); } } template <typename scalar_t> void output_add_bias(scalar_t *output, scalar_t *bias, size_t batch, size_t channel, size_t height, size_t width, cudaStream_t stream) { size_t step_channel = height * width; size_t step_batch = step_channel * channel; size_t n = step_batch * batch; output_add_bias_kernel<<<GET_BLOCKS(n), CUDA_NUM_THREADS, 0, stream>>>( output, bias, step_batch, step_channel, n); } template void output_add_bias<float>(float *output, float *bias, size_t batch_size, size_t channel, size_t height, size_t width, cudaStream_t stream); void tensorPermute(float *dst, float *src, int *src_size, int *permute, int src_dim, cudaStream_t stream) { amirstan::cuda::memcpyPermute(dst, src, src_size, permute, src_dim, stream); }
the_stack
#if defined(THC_REAL_IS_HALF) #define _REAL(val) THC_float2half(val) #else #define _REAL(val) (val) #endif static int nn_(StepLSTM_updateOutput)(lua_State *L) { THCState *state = getCudaState(L); THCTensor *weight = (THCTensor *)luaT_checkudata(L, 1, torch_Tensor); THCTensor *bias = (THCTensor *)luaT_checkudata(L, 2, torch_Tensor); THCTensor *gates = (THCTensor *)luaT_checkudata(L, 3, torch_Tensor); THCTensor *cur_x = (THCTensor *)luaT_checkudata(L, 4, torch_Tensor); THCTensor *prev_h = (THCTensor *)luaT_checkudata(L, 5, torch_Tensor); THCTensor *prev_c = (THCTensor *)luaT_checkudata(L, 6, torch_Tensor); int inputsize = luaL_checkinteger(L, 7); int hiddensize = luaL_checkinteger(L, 8); int outputsize = luaL_checkinteger(L, 9); THCTensor *next_h = (THCTensor *)luaT_checkudata(L, 10, torch_Tensor); // when LSTMP pass hidden[t] THCTensor *next_c = (THCTensor *)luaT_checkudata(L, 11, torch_Tensor); int batchsize = THCTensor_(size)(state, cur_x, 0); if (THCTensor_(size)(state, cur_x, 1) != inputsize) return LUA_HANDLE_ERROR_STR(L, "expected input[1]:size(2) == inputsize"); THLongStorage* size = THLongStorage_newWithSize2(1, 4 * hiddensize); THCTensor *buffer = THCTensor_(newView)(state, bias, size); buffer->stride[0] = 0; buffer->size[0] = batchsize; THCTensor *Wx = THCTensor_(newNarrow)(state, weight, 0, 0, inputsize); THCTensor *Wh = THCTensor_(newNarrow)(state, weight, 0, inputsize, outputsize); THCTensor_(resize2d)(state, next_h, batchsize, hiddensize); THCTensor_(resize2d)(state, next_c, batchsize, hiddensize); long nElement = THCTensor_(nElement)(state, gates); THCTensor_(resize2d)(state, gates, batchsize, 4 * hiddensize); if (nElement != batchsize * 4 * hiddensize) THCTensor_(fill)(state, gates, _REAL(0)); // forward THCTensor_(addmm)(state, gates, _REAL(1), buffer, _REAL(1), cur_x, Wx); THCTensor_(addmm)(state, gates, _REAL(1), gates, _REAL(1), prev_h, Wh); THCTensor_(narrow)(state, buffer, gates, 1, 0, 3 * hiddensize); THCTensor_(sigmoid)(state, buffer, buffer); THCTensor_(narrow)(state, buffer, gates, 1, 3 * hiddensize, hiddensize); THCTensor_(tanh)(state, buffer, buffer); THCTensor *input_gate = THCTensor_(newNarrow)(state, gates, 1, 0, hiddensize); THCTensor *forget_gate = THCTensor_(newNarrow)(state, gates, 1, hiddensize, hiddensize); THCTensor *output_gate = THCTensor_(newNarrow)(state, gates, 1, 2*hiddensize, hiddensize); THCTensor *input_transform = THCTensor_(newNarrow)(state, gates, 1, 3*hiddensize, hiddensize); THCTensor_(cmul)(state, next_h, input_gate, input_transform); THCTensor_(cmul)(state, next_c, forget_gate, prev_c); THCTensor_(cadd)(state, next_c, next_c, _REAL(1), next_h); THCTensor_(tanh)(state, next_h, next_c); THCTensor_(cmul)(state, next_h, next_h, output_gate); THCTensor_(free)(state, Wx); THCTensor_(free)(state, Wh); THCTensor_(free)(state, buffer); THCTensor_(free)(state, input_gate); THCTensor_(free)(state, forget_gate); THCTensor_(free)(state, output_gate); THCTensor_(free)(state, input_transform); THLongStorage_free(size); if (lua_gettop(L) > 11) // implements LSTMP (P stands for projection layer) { THCTensor *hidden = next_h; THCTensor *weightO = (THCTensor *)luaT_checkudata(L, 12, torch_Tensor); next_h = (THCTensor *)luaT_checkudata(L, 13, torch_Tensor); THCTensor_(resize2d)(state, next_h, batchsize, outputsize); THCTensor_(addmm)(state, next_h, _REAL(0), next_h, _REAL(1), hidden, weightO); // push results onto stack luaT_pushudata(L, next_c, torch_Tensor); } return 2; } static int nn_(StepLSTM_backward)(lua_State *L) { THCState *state = getCudaState(L); THCTensor *weight = (THCTensor *)luaT_checkudata(L, 1, torch_Tensor); THCTensor *gates = (THCTensor *)luaT_checkudata(L, 2, torch_Tensor); THCTensor *gradWeight = (THCTensor *)luaT_checkudata(L, 3, torch_Tensor); THCTensor *grad_b = (THCTensor *)luaT_checkudata(L, 4, torch_Tensor); THCTensor *grad_gates = (THCTensor *)luaT_checkudata(L, 5, torch_Tensor); THCTensor *grad_gates_sum = (THCTensor *)luaT_checkudata(L, 6, torch_Tensor); THCTensor *cur_x = (THCTensor *)luaT_checkudata(L, 7, torch_Tensor); THCTensor *prev_h = (THCTensor *)luaT_checkudata(L, 8, torch_Tensor); THCTensor *prev_c = (THCTensor *)luaT_checkudata(L, 9, torch_Tensor); THCTensor *next_c = (THCTensor *)luaT_checkudata(L, 10, torch_Tensor); THCTensor *grad_next_h = (THCTensor *)luaT_checkudata(L, 11, torch_Tensor); THCTensor *grad_next_c = (THCTensor *)luaT_checkudata(L, 12, torch_Tensor); lua_Number scale = luaL_checknumber(L, 13); int inputsize = luaL_checkinteger(L, 14); int hiddensize = luaL_checkinteger(L, 15); int outputsize = luaL_checkinteger(L, 16); THCTensor *grad_cur_x = (THCTensor *)luaT_checkudata(L, 17, torch_Tensor); THCTensor *grad_prev_h = (THCTensor *)luaT_checkudata(L, 18, torch_Tensor); THCTensor *grad_prev_c = (THCTensor *)luaT_checkudata(L, 19, torch_Tensor); int batchsize = THCTensor_(size)(state, cur_x, 0); if (THCTensor_(size)(state, cur_x, 1) != inputsize) return LUA_HANDLE_ERROR_STR(L, "expected input[1]:size(2) == inputsize"); if (THCTensor_(size)(state, grad_next_h, 1) != outputsize) return LUA_HANDLE_ERROR_STR(L, "expected gradOutput[1]:size(2) == outputsize"); if (lua_gettop(L) > 19) // LSTMP { THCTensor *weightO = (THCTensor *)luaT_checkudata(L, 20, torch_Tensor); THCTensor *hidden = (THCTensor *)luaT_checkudata(L, 21, torch_Tensor); THCTensor *gradWeightO = (THCTensor *)luaT_checkudata(L, 22, torch_Tensor); THCTensor *grad_hidden = (THCTensor *)luaT_checkudata(L, 23, torch_Tensor); THCTensor *hidden_t = THCTensor_(newTranspose)(state, hidden, 0, 1); THCTensor *weightO_t = THCTensor_(newTranspose)(state, weightO, 0, 1); THCTensor_(addmm)(state, gradWeightO, _REAL(scale), gradWeightO, _REAL(1), hidden_t, grad_next_h); THCTensor_(resize2d)(state, grad_hidden, batchsize, hiddensize); THCTensor_(addmm)(state, grad_hidden, _REAL(0), grad_hidden, _REAL(1), grad_next_h, weightO_t); grad_next_h = grad_hidden; THCTensor_(free)(state, hidden_t); THCTensor_(free)(state, weightO_t); // push results to top of stack luaT_pushudata(L, grad_cur_x, torch_Tensor); luaT_pushudata(L, grad_prev_h, torch_Tensor); luaT_pushudata(L, grad_prev_c, torch_Tensor); } THCTensor_(resize2d)(state, grad_cur_x, batchsize, inputsize); THCTensor_(resize2d)(state, grad_prev_h, batchsize, outputsize); THCTensor_(resize2d)(state, grad_prev_c, batchsize, hiddensize); // these tensors were set-up in updateOutput THCTensor *Wx = THCTensor_(newNarrow)(state, weight, 0, 0, inputsize); THCTensor *Wh = THCTensor_(newNarrow)(state, weight, 0, inputsize, outputsize); THCTensor *input_gate = THCTensor_(newNarrow)(state, gates, 1, 0, hiddensize); THCTensor *forget_gate = THCTensor_(newNarrow)(state, gates, 1, hiddensize, hiddensize); THCTensor *output_gate = THCTensor_(newNarrow)(state, gates, 1, 2*hiddensize, hiddensize); THCTensor *input_transform = THCTensor_(newNarrow)(state, gates, 1, 3*hiddensize, hiddensize); // set-up grad tensors THCTensor *grad_Wx = THCTensor_(newNarrow)(state, gradWeight, 0, 0, inputsize); THCTensor *grad_Wh = THCTensor_(newNarrow)(state, gradWeight, 0, inputsize, outputsize); THCTensor_(resize2d)(state, grad_gates, batchsize, 4 * hiddensize); THCTensor *grad_input_gate = THCTensor_(newNarrow)(state, grad_gates, 1, 0, hiddensize); THCTensor *grad_forget_gate = THCTensor_(newNarrow)(state, grad_gates, 1, hiddensize, hiddensize); THCTensor *grad_output_gate = THCTensor_(newNarrow)(state, grad_gates, 1, 2*hiddensize, hiddensize); THCTensor *grad_input_transform = THCTensor_(newNarrow)(state, grad_gates, 1, 3*hiddensize, hiddensize); // backward // we use grad_[input,forget,output]_gate as temporary buffers to compute grad_prev_c. THCTensor_(tanh)(state, grad_input_gate, next_c); THCTensor_(cmul)(state, grad_forget_gate, grad_input_gate, grad_input_gate); THCTensor_(fill)(state, grad_output_gate, _REAL(1)); THCTensor_(cadd)(state, grad_output_gate, grad_output_gate, _REAL(-1), grad_forget_gate); THCTensor_(cmul)(state, grad_output_gate, grad_output_gate, output_gate); THCTensor_(cmul)(state, grad_output_gate, grad_output_gate, grad_next_h); THCTensor_(cadd)(state, grad_prev_c, grad_next_c, _REAL(1), grad_output_gate); // we use above grad_input_gate to compute grad_output_gate THCTensor_(fill)(state, grad_output_gate, _REAL(1)); THCTensor_(cadd)(state, grad_output_gate, grad_output_gate, _REAL(-1), output_gate); THCTensor_(cmul)(state, grad_output_gate, grad_output_gate, output_gate); THCTensor_(cmul)(state, grad_output_gate, grad_output_gate, grad_input_gate); THCTensor_(cmul)(state, grad_output_gate, grad_output_gate, grad_next_h); // Use grad_input_gate as a temporary buffer for computing grad_input_transform THCTensor_(cmul)(state, grad_input_gate, input_transform, input_transform); THCTensor_(fill)(state, grad_input_transform, _REAL(1)); THCTensor_(cadd)(state, grad_input_transform, grad_input_transform, _REAL(-1), grad_input_gate); THCTensor_(cmul)(state, grad_input_transform, grad_input_transform, input_gate); THCTensor_(cmul)(state, grad_input_transform, grad_input_transform, grad_prev_c); // We don't need any temporary storage for these so do them last THCTensor_(fill)(state, grad_input_gate, _REAL(1)); THCTensor_(cadd)(state, grad_input_gate, grad_input_gate, _REAL(-1), input_gate); THCTensor_(cmul)(state, grad_input_gate, grad_input_gate, input_gate); THCTensor_(cmul)(state, grad_input_gate, grad_input_gate, input_transform); THCTensor_(cmul)(state, grad_input_gate, grad_input_gate, grad_prev_c); THCTensor_(fill)(state, grad_forget_gate, _REAL(1)); THCTensor_(cadd)(state, grad_forget_gate, grad_forget_gate, _REAL(-1), forget_gate); THCTensor_(cmul)(state, grad_forget_gate, grad_forget_gate, forget_gate); THCTensor_(cmul)(state, grad_forget_gate, grad_forget_gate, prev_c); THCTensor_(cmul)(state, grad_forget_gate, grad_forget_gate, grad_prev_c); // now for the main dish THCTensor *Wx_t = THCTensor_(newTranspose)(state, Wx, 0, 1); THCTensor *Wh_t = THCTensor_(newTranspose)(state, Wh, 0, 1); THCTensor *cur_x_t = THCTensor_(newTranspose)(state, cur_x, 0, 1); THCTensor *prev_h_t = THCTensor_(newTranspose)(state, prev_h, 0, 1); THCTensor_(addmm)(state, grad_cur_x, _REAL(0), grad_cur_x, _REAL(1), grad_gates, Wx_t); THCTensor_(addmm)(state, grad_Wx, _REAL(1), grad_Wx, _REAL(scale), cur_x_t, grad_gates); THCTensor_(addmm)(state, grad_Wh, _REAL(1), grad_Wh, _REAL(scale), prev_h_t, grad_gates); THCTensor_(resize2d)(state, grad_gates_sum, 1, 4 * hiddensize); THCTensor_(sum)(state, grad_gates_sum, grad_gates, 0, 0); THCTensor_(cadd)(state, grad_b, grad_b, _REAL(scale), grad_gates_sum); THCTensor_(addmm)(state, grad_prev_h, _REAL(0), grad_prev_h, _REAL(1), grad_gates, Wh_t); THCTensor_(cmul)(state, grad_prev_c, grad_prev_c, forget_gate); THCTensor_(free)(state, Wx); THCTensor_(free)(state, Wh); THCTensor_(free)(state, input_gate); THCTensor_(free)(state, forget_gate); THCTensor_(free)(state, output_gate); THCTensor_(free)(state, input_transform); THCTensor_(free)(state, grad_Wx); THCTensor_(free)(state, grad_Wh); THCTensor_(free)(state, grad_input_gate); THCTensor_(free)(state, grad_forget_gate); THCTensor_(free)(state, grad_output_gate); THCTensor_(free)(state, grad_input_transform); THCTensor_(free)(state, Wx_t); THCTensor_(free)(state, Wh_t); THCTensor_(free)(state, cur_x_t); THCTensor_(free)(state, prev_h_t); return 3; } static const struct luaL_Reg nn_(StepLSTM__) [] = { {"StepLSTM_updateOutput", nn_(StepLSTM_updateOutput)}, {"StepLSTM_backward", nn_(StepLSTM_backward)}, {NULL, NULL} }; static void nn_(StepLSTM_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, nn_(StepLSTM__), "nn"); lua_pop(L,1); } #undef _REAL #endif
the_stack
#define TPB80 384 #define TPB64 384 extern "C" { #include <sph/sph_whirlpool.h> #include <miner.h> } #include <cuda_helper.h> #include <cuda_vector_uint2x4.h> #include <cuda_vectors.h> #define xor3x(a,b,c) (a^b^c) #include "cuda_whirlpool_tables.cuh" __device__ static uint64_t b0[256]; __device__ static uint64_t b7[256]; __constant__ static uint2 precomputed_round_key_64[72]; __constant__ static uint2 precomputed_round_key_80[80]; __device__ static uint2 c_PaddedMessage80[16]; /** * Round constants. */ __device__ uint2 InitVector_RC[10]; static uint32_t *d_resNonce[MAX_GPUS] = { 0 }; //--------START OF WHIRLPOOL DEVICE MACROS--------------------------------------------------------------------------- __device__ __forceinline__ void static TRANSFER(uint2 *const __restrict__ dst,const uint2 *const __restrict__ src){ dst[0] = src[ 0]; dst[1] = src[ 1]; dst[2] = src[ 2]; dst[3] = src[ 3]; dst[4] = src[ 4]; dst[5] = src[ 5]; dst[6] = src[ 6]; dst[7] = src[ 7]; } __device__ __forceinline__ static uint2 d_ROUND_ELT_LDG(const uint2 sharedMemory[7][256],const uint2 *const __restrict__ in,const int i0, const int i1, const int i2, const int i3, const int i4, const int i5, const int i6, const int i7){ uint2 ret = __ldg((uint2*)&b0[__byte_perm(in[i0].x, 0, 0x4440)]); ret ^= sharedMemory[1][__byte_perm(in[i1].x, 0, 0x4441)]; ret ^= sharedMemory[2][__byte_perm(in[i2].x, 0, 0x4442)]; ret ^= sharedMemory[3][__byte_perm(in[i3].x, 0, 0x4443)]; ret ^= sharedMemory[4][__byte_perm(in[i4].y, 0, 0x4440)]; ret ^= ROR24(__ldg((uint2*)&b0[__byte_perm(in[i5].y, 0, 0x4441)])); ret ^= ROR8(__ldg((uint2*)&b7[__byte_perm(in[i6].y, 0, 0x4442)])); ret ^= __ldg((uint2*)&b7[__byte_perm(in[i7].y, 0, 0x4443)]); return ret; } __device__ __forceinline__ static uint2 d_ROUND_ELT(const uint2 sharedMemory[7][256],const uint2 *const __restrict__ in,const int i0, const int i1, const int i2, const int i3, const int i4, const int i5, const int i6, const int i7){ uint2 ret = __ldg((uint2*)&b0[__byte_perm(in[i0].x, 0, 0x4440)]); ret ^= sharedMemory[1][__byte_perm(in[i1].x, 0, 0x4441)]; ret ^= sharedMemory[2][__byte_perm(in[i2].x, 0, 0x4442)]; ret ^= sharedMemory[3][__byte_perm(in[i3].x, 0, 0x4443)]; ret ^= sharedMemory[4][__byte_perm(in[i4].y, 0, 0x4440)]; ret ^= sharedMemory[5][__byte_perm(in[i5].y, 0, 0x4441)]; ret ^= ROR8(__ldg((uint2*)&b7[__byte_perm(in[i6].y, 0, 0x4442)])); ret ^= __ldg((uint2*)&b7[__byte_perm(in[i7].y, 0, 0x4443)]); return ret; } __device__ __forceinline__ static uint2 d_ROUND_ELT1_LDG(const uint2 sharedMemory[7][256],const uint2 *const __restrict__ in,const int i0, const int i1, const int i2, const int i3, const int i4, const int i5, const int i6, const int i7, const uint2 c0){ uint2 ret = __ldg((uint2*)&b0[__byte_perm(in[i0].x, 0, 0x4440)]); ret ^= sharedMemory[1][__byte_perm(in[i1].x, 0, 0x4441)]; ret ^= sharedMemory[2][__byte_perm(in[i2].x, 0, 0x4442)]; ret ^= sharedMemory[3][__byte_perm(in[i3].x, 0, 0x4443)]; ret ^= sharedMemory[4][__byte_perm(in[i4].y, 0, 0x4440)]; ret ^= ROR24(__ldg((uint2*)&b0[__byte_perm(in[i5].y, 0, 0x4441)])); ret ^= ROR8(__ldg((uint2*)&b7[__byte_perm(in[i6].y, 0, 0x4442)])); ret ^= __ldg((uint2*)&b7[__byte_perm(in[i7].y, 0, 0x4443)]); ret ^= c0; return ret; } __device__ __forceinline__ static uint2 d_ROUND_ELT1(const uint2 sharedMemory[7][256],const uint2 *const __restrict__ in,const int i0, const int i1, const int i2, const int i3, const int i4, const int i5, const int i6, const int i7, const uint2 c0){ uint2 ret = __ldg((uint2*)&b0[__byte_perm(in[i0].x, 0, 0x4440)]); ret ^= sharedMemory[1][__byte_perm(in[i1].x, 0, 0x4441)]; ret ^= sharedMemory[2][__byte_perm(in[i2].x, 0, 0x4442)]; ret ^= sharedMemory[3][__byte_perm(in[i3].x, 0, 0x4443)]; ret ^= sharedMemory[4][__byte_perm(in[i4].y, 0, 0x4440)]; ret ^= sharedMemory[5][__byte_perm(in[i5].y, 0, 0x4441)]; ret ^= ROR8(__ldg((uint2*)&b7[__byte_perm(in[i6].y, 0, 0x4442)]));//sharedMemory[6][__byte_perm(in[i6].y, 0, 0x4442)] ret ^= __ldg((uint2*)&b7[__byte_perm(in[i7].y, 0, 0x4443)]);//sharedMemory[7][__byte_perm(in[i7].y, 0, 0x4443)] ret ^= c0; return ret; } //--------END OF WHIRLPOOL DEVICE MACROS----------------------------------------------------------------------------- //--------START OF WHIRLPOOL HOST MACROS----------------------------------------------------------------------------- #define table_skew(val,num) SPH_ROTL64(val,8*num) #define BYTE(x, n) ((unsigned)((x) >> (8 * (n))) & 0xFF) #define ROUND_ELT(table, in, i0, i1, i2, i3, i4, i5, i6, i7) \ (table[BYTE(in[i0], 0)] \ ^ table_skew(table[BYTE(in[i1], 1)], 1) \ ^ table_skew(table[BYTE(in[i2], 2)], 2) \ ^ table_skew(table[BYTE(in[i3], 3)], 3) \ ^ table_skew(table[BYTE(in[i4], 4)], 4) \ ^ table_skew(table[BYTE(in[i5], 5)], 5) \ ^ table_skew(table[BYTE(in[i6], 6)], 6) \ ^ table_skew(table[BYTE(in[i7], 7)], 7)) #define ROUND(table, in, out, c0, c1, c2, c3, c4, c5, c6, c7) do { \ out[0] = ROUND_ELT(table, in, 0, 7, 6, 5, 4, 3, 2, 1) ^ c0; \ out[1] = ROUND_ELT(table, in, 1, 0, 7, 6, 5, 4, 3, 2) ^ c1; \ out[2] = ROUND_ELT(table, in, 2, 1, 0, 7, 6, 5, 4, 3) ^ c2; \ out[3] = ROUND_ELT(table, in, 3, 2, 1, 0, 7, 6, 5, 4) ^ c3; \ out[4] = ROUND_ELT(table, in, 4, 3, 2, 1, 0, 7, 6, 5) ^ c4; \ out[5] = ROUND_ELT(table, in, 5, 4, 3, 2, 1, 0, 7, 6) ^ c5; \ out[6] = ROUND_ELT(table, in, 6, 5, 4, 3, 2, 1, 0, 7) ^ c6; \ out[7] = ROUND_ELT(table, in, 7, 6, 5, 4, 3, 2, 1, 0) ^ c7; \ } while (0) __host__ static void ROUND_KSCHED(const uint64_t *in,uint64_t *out,const uint64_t c){ const uint64_t *a = in; uint64_t *b = out; ROUND(old1_T0, a, b, c, 0, 0, 0, 0, 0, 0, 0); } //--------END OF WHIRLPOOL HOST MACROS------------------------------------------------------------------------------- __host__ void x15_whirlpool_cpu_init(int thr_id, uint32_t threads, int mode) { uint64_t* table0 = NULL; switch (mode) { case 0: /* x15 with rotated T1-T7 (based on T0) */ table0 = (uint64_t*)plain_T0; cudaMemcpyToSymbol(InitVector_RC, plain_RC, 10*sizeof(uint64_t),0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(precomputed_round_key_64, plain_precomputed_round_key_64, 72*sizeof(uint64_t),0, cudaMemcpyHostToDevice); break; case 1: /* old whirlpool */ table0 = (uint64_t*)old1_T0; cudaMemcpyToSymbol(InitVector_RC, old1_RC, 10*sizeof(uint64_t),0,cudaMemcpyHostToDevice); cudaMemcpyToSymbol(precomputed_round_key_64, old1_precomputed_round_key_64, 72*sizeof(uint64_t),0, cudaMemcpyHostToDevice); break; default: applog(LOG_ERR,"Bad whirlpool mode"); exit(0); } cudaMemcpyToSymbol(b0, table0, 256*sizeof(uint64_t),0, cudaMemcpyHostToDevice); uint64_t table7[256]; for(int i=0;i<256;i++){ table7[i] = ROTR64(table0[i],8); } cudaMemcpyToSymbol(b7, table7, 256*sizeof(uint64_t),0, cudaMemcpyHostToDevice); CUDA_SAFE_CALL(cudaMalloc(&d_resNonce[thr_id], 2 * sizeof(uint32_t))); cuda_get_arch(thr_id); } __host__ static void whirl_midstate(void *state, const void *input) { sph_whirlpool_context ctx; sph_whirlpool1_init(&ctx); sph_whirlpool1(&ctx, input, 64); memcpy(state, ctx.state, 64); } __host__ void whirlpool512_setBlock_80(void *pdata, const void *ptarget) { uint64_t PaddedMessage[16]; memcpy(PaddedMessage, pdata, 80); memset(((uint8_t*)&PaddedMessage)+80, 0, 48); ((uint8_t*)&PaddedMessage)[80] = 0x80; /* ending */ // compute constant first block uint64_t midstate[16] = { 0 }; whirl_midstate(midstate, pdata); memcpy(PaddedMessage, midstate, 64); uint64_t round_constants[80]; uint64_t n[8]; n[0] = PaddedMessage[0] ^ PaddedMessage[8]; //read data n[1] = PaddedMessage[1] ^ PaddedMessage[9]; n[2] = PaddedMessage[2] ^ 0x0000000000000080; //whirlpool n[3] = PaddedMessage[3]; n[4] = PaddedMessage[4]; n[5] = PaddedMessage[5]; n[6] = PaddedMessage[6]; n[7] = PaddedMessage[7] ^ 0x8002000000000000; ROUND_KSCHED(PaddedMessage,round_constants,old1_RC[0]); for(int i=1;i<10;i++){ ROUND_KSCHED(&round_constants[8*(i-1)],&round_constants[8*i],old1_RC[i]); } //USE the same memory place to store keys and state round_constants[ 0]^= old1_T0[BYTE(n[0], 0)] ^ table_skew(old1_T0[BYTE(n[7], 1)], 1) ^ table_skew(old1_T0[BYTE(n[6], 2)], 2) ^ table_skew(old1_T0[BYTE(n[5], 3)], 3) ^ table_skew(old1_T0[BYTE(n[4], 4)], 4) ^ table_skew(old1_T0[BYTE(n[3], 5)], 5) ^ table_skew(old1_T0[BYTE(n[2], 6)], 6); round_constants[ 1]^= old1_T0[BYTE(n[1], 0)] ^ table_skew(old1_T0[BYTE(n[0], 1)], 1) ^ table_skew(old1_T0[BYTE(n[7], 2)], 2) ^ table_skew(old1_T0[BYTE(n[6], 3)], 3) ^ table_skew(old1_T0[BYTE(n[5], 4)], 4) ^ table_skew(old1_T0[BYTE(n[4], 5)], 5) ^ table_skew(old1_T0[BYTE(n[3], 6)], 6) ^ table_skew(old1_T0[BYTE(n[2], 7)], 7); round_constants[ 2]^= old1_T0[BYTE(n[2], 0)] ^ table_skew(old1_T0[BYTE(n[1], 1)], 1) ^ table_skew(old1_T0[BYTE(n[0], 2)], 2) ^ table_skew(old1_T0[BYTE(n[7], 3)], 3) ^ table_skew(old1_T0[BYTE(n[6], 4)], 4) ^ table_skew(old1_T0[BYTE(n[5], 5)], 5) ^ table_skew(old1_T0[BYTE(n[4], 6)], 6) ^ table_skew(old1_T0[BYTE(n[3], 7)], 7); round_constants[ 3]^= old1_T0[BYTE(n[3], 0)] ^ table_skew(old1_T0[BYTE(n[2], 1)], 1) ^ table_skew(old1_T0[BYTE(n[1], 2)], 2) ^ table_skew(old1_T0[BYTE(n[0], 3)], 3) ^ table_skew(old1_T0[BYTE(n[7], 4)], 4) ^ table_skew(old1_T0[BYTE(n[6], 5)], 5) ^ table_skew(old1_T0[BYTE(n[5], 6)], 6) ^ table_skew(old1_T0[BYTE(n[4], 7)], 7); round_constants[ 4]^= old1_T0[BYTE(n[4], 0)] ^ table_skew(old1_T0[BYTE(n[3], 1)], 1) ^ table_skew(old1_T0[BYTE(n[2], 2)], 2) ^ table_skew(old1_T0[BYTE(n[1], 3)], 3) ^ table_skew(old1_T0[BYTE(n[0], 4)], 4) ^ table_skew(old1_T0[BYTE(n[7], 5)], 5) ^ table_skew(old1_T0[BYTE(n[6], 6)], 6) ^ table_skew(old1_T0[BYTE(n[5], 7)], 7); round_constants[ 5]^= old1_T0[BYTE(n[5], 0)] ^ table_skew(old1_T0[BYTE(n[4], 1)], 1) ^ table_skew(old1_T0[BYTE(n[3], 2)], 2) ^ table_skew(old1_T0[BYTE(n[2], 3)], 3) ^ table_skew(old1_T0[BYTE(n[0], 5)], 5) ^ table_skew(old1_T0[BYTE(n[7], 6)], 6) ^ table_skew(old1_T0[BYTE(n[6], 7)], 7); round_constants[ 6]^= old1_T0[BYTE(n[6], 0)] ^ table_skew(old1_T0[BYTE(n[5], 1)], 1) ^ table_skew(old1_T0[BYTE(n[4], 2)], 2) ^ table_skew(old1_T0[BYTE(n[3], 3)], 3) ^ table_skew(old1_T0[BYTE(n[2], 4)], 4) ^ table_skew(old1_T0[BYTE(n[0], 6)], 6) ^ table_skew(old1_T0[BYTE(n[7], 7)], 7); round_constants[ 7]^= old1_T0[BYTE(n[7], 0)] ^ table_skew(old1_T0[BYTE(n[6], 1)], 1) ^ table_skew(old1_T0[BYTE(n[5], 2)], 2) ^ table_skew(old1_T0[BYTE(n[4], 3)], 3) ^ table_skew(old1_T0[BYTE(n[3], 4)], 4) ^ table_skew(old1_T0[BYTE(n[2], 5)], 5) ^ table_skew(old1_T0[BYTE(n[0], 7)], 7); for(int i=1;i<5;i++) n[i] = round_constants[i]; round_constants[ 8]^= table_skew(old1_T0[BYTE(n[4], 4)], 4) ^ table_skew(old1_T0[BYTE(n[3], 5)], 5) ^ table_skew(old1_T0[BYTE(n[2], 6)], 6) ^ table_skew(old1_T0[BYTE(n[1], 7)], 7); round_constants[ 9]^= old1_T0[BYTE(n[1], 0)] ^ table_skew(old1_T0[BYTE(n[4], 5)], 5) ^ table_skew(old1_T0[BYTE(n[3], 6)], 6) ^ table_skew(old1_T0[BYTE(n[2], 7)], 7); round_constants[10]^= old1_T0[BYTE(n[2], 0)] ^ table_skew(old1_T0[BYTE(n[1], 1)], 1) ^ table_skew(old1_T0[BYTE(n[4], 6)], 6) ^ table_skew(old1_T0[BYTE(n[3], 7)], 7); round_constants[11]^= old1_T0[BYTE(n[3], 0)] ^ table_skew(old1_T0[BYTE(n[2], 1)], 1) ^ table_skew(old1_T0[BYTE(n[1], 2)], 2) ^ table_skew(old1_T0[BYTE(n[4], 7)], 7); round_constants[12]^= old1_T0[BYTE(n[4], 0)] ^ table_skew(old1_T0[BYTE(n[3], 1)], 1) ^ table_skew(old1_T0[BYTE(n[2], 2)], 2) ^ table_skew(old1_T0[BYTE(n[1], 3)], 3); round_constants[13]^= table_skew(old1_T0[BYTE(n[4], 1)], 1) ^ table_skew(old1_T0[BYTE(n[3], 2)], 2) ^ table_skew(old1_T0[BYTE(n[2], 3)], 3) ^ table_skew(old1_T0[BYTE(n[1], 4)], 4); round_constants[14]^= table_skew(old1_T0[BYTE(n[4], 2)], 2) ^ table_skew(old1_T0[BYTE(n[3], 3)], 3) ^ table_skew(old1_T0[BYTE(n[2], 4)], 4) ^ table_skew(old1_T0[BYTE(n[1], 5)], 5); round_constants[15]^= table_skew(old1_T0[BYTE(n[4], 3)], 3) ^ table_skew(old1_T0[BYTE(n[3], 4)], 4) ^ table_skew(old1_T0[BYTE(n[2], 5)], 5) ^ table_skew(old1_T0[BYTE(n[1], 6)], 6); PaddedMessage[0] ^= PaddedMessage[8]; cudaMemcpyToSymbol(c_PaddedMessage80, PaddedMessage, 128, 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(precomputed_round_key_80, round_constants, 80*sizeof(uint64_t), 0, cudaMemcpyHostToDevice); } __host__ extern void x15_whirlpool_cpu_free(int thr_id) { if (d_resNonce[thr_id]) cudaFree(d_resNonce[thr_id]); } __global__ __launch_bounds__(TPB80,2) void oldwhirlpool_gpu_hash_80(uint32_t threads, uint32_t startNounce, uint32_t* resNonce, const uint64_t target) { __shared__ uint2 sharedMemory[7][256]; if (threadIdx.x < 256) { const uint2 tmp = __ldg((uint2*)&b0[threadIdx.x]); sharedMemory[0][threadIdx.x] = tmp; sharedMemory[1][threadIdx.x] = ROL8(tmp); sharedMemory[2][threadIdx.x] = ROL16(tmp); sharedMemory[3][threadIdx.x] = ROL24(tmp); sharedMemory[4][threadIdx.x] = SWAPUINT2(tmp); sharedMemory[5][threadIdx.x] = ROR24(tmp); sharedMemory[6][threadIdx.x] = ROR16(tmp); } __syncthreads(); const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads){ uint2 hash[8], state[8],n[8], tmp[8]; uint32_t nonce = cuda_swab32(startNounce + thread); uint2 temp = c_PaddedMessage80[9]; temp.y = nonce; /// round 2 /////// ////////////////////////////////// temp = temp ^ c_PaddedMessage80[1]; *(uint2x4*)&n[ 0] = *(uint2x4*)&precomputed_round_key_80[ 0]; *(uint2x4*)&n[ 4] = *(uint2x4*)&precomputed_round_key_80[ 4]; *(uint2x4*)&tmp[ 0] = *(uint2x4*)&precomputed_round_key_80[ 8]; *(uint2x4*)&tmp[ 4] = *(uint2x4*)&precomputed_round_key_80[12]; n[ 0]^= __ldg((uint2*)&b7[__byte_perm(temp.y, 0, 0x4443)]); n[ 5]^= sharedMemory[4][__byte_perm(temp.y, 0, 0x4440)]; n[ 6]^= sharedMemory[5][__byte_perm(temp.y, 0, 0x4441)]; n[ 7]^= sharedMemory[6][__byte_perm(temp.y, 0, 0x4442)]; tmp[ 0]^= __ldg((uint2*)&b0[__byte_perm(n[0].x, 0, 0x4440)]); tmp[ 0]^= sharedMemory[1][__byte_perm(n[7].x, 0, 0x4441)]; tmp[ 0]^= sharedMemory[2][__byte_perm(n[6].x, 0, 0x4442)]; tmp[ 0]^= sharedMemory[3][__byte_perm(n[5].x, 0, 0x4443)]; tmp[ 1]^= sharedMemory[1][__byte_perm(n[0].x, 0, 0x4441)]; tmp[ 1]^= sharedMemory[2][__byte_perm(n[7].x, 0, 0x4442)]; tmp[ 1]^= sharedMemory[3][__byte_perm(n[6].x, 0, 0x4443)]; tmp[ 1]^= sharedMemory[4][__byte_perm(n[5].y, 0, 0x4440)]; tmp[ 2]^= sharedMemory[2][__byte_perm(n[0].x, 0, 0x4442)]; tmp[ 2]^= sharedMemory[3][__byte_perm(n[7].x, 0, 0x4443)]; tmp[ 2]^= sharedMemory[4][__byte_perm(n[6].y, 0, 0x4440)]; tmp[ 2]^= sharedMemory[5][__byte_perm(n[5].y, 0, 0x4441)]; tmp[ 3]^= sharedMemory[3][__byte_perm(n[0].x, 0, 0x4443)]; tmp[ 3]^= sharedMemory[4][__byte_perm(n[7].y, 0, 0x4440)]; tmp[ 3]^= ROR24(__ldg((uint2*)&b0[__byte_perm(n[6].y, 0, 0x4441)])); tmp[ 3]^= ROR8(__ldg((uint2*)&b7[__byte_perm(n[5].y, 0, 0x4442)])); tmp[ 4]^= sharedMemory[4][__byte_perm(n[0].y, 0, 0x4440)]; tmp[ 4]^= sharedMemory[5][__byte_perm(n[7].y, 0, 0x4441)]; tmp[ 4]^= ROR8(__ldg((uint2*)&b7[__byte_perm(n[6].y, 0, 0x4442)])); tmp[ 4]^= __ldg((uint2*)&b7[__byte_perm(n[5].y, 0, 0x4443)]); tmp[ 5]^= __ldg((uint2*)&b0[__byte_perm(n[5].x, 0, 0x4440)]); tmp[ 5]^= sharedMemory[5][__byte_perm(n[0].y, 0, 0x4441)]; tmp[ 5]^= sharedMemory[6][__byte_perm(n[7].y, 0, 0x4442)]; tmp[ 5]^= __ldg((uint2*)&b7[__byte_perm(n[6].y, 0, 0x4443)]); tmp[ 6]^= __ldg((uint2*)&b0[__byte_perm(n[6].x, 0, 0x4440)]); tmp[ 6]^= sharedMemory[1][__byte_perm(n[5].x, 0, 0x4441)]; tmp[ 6]^= sharedMemory[6][__byte_perm(n[0].y, 0, 0x4442)]; tmp[ 6]^= __ldg((uint2*)&b7[__byte_perm(n[7].y, 0, 0x4443)]); tmp[ 7]^= __ldg((uint2*)&b0[__byte_perm(n[7].x, 0, 0x4440)]); tmp[ 7]^= sharedMemory[1][__byte_perm(n[6].x, 0, 0x4441)]; tmp[ 7]^= sharedMemory[2][__byte_perm(n[5].x, 0, 0x4442)]; tmp[ 7]^= __ldg((uint2*)&b7[__byte_perm(n[0].y, 0, 0x4443)]); TRANSFER(n, tmp); for (int i=2; i<10; i++) { tmp[ 0] = d_ROUND_ELT1_LDG(sharedMemory,n, 0, 7, 6, 5, 4, 3, 2, 1, precomputed_round_key_80[i*8+0]); tmp[ 1] = d_ROUND_ELT1( sharedMemory,n, 1, 0, 7, 6, 5, 4, 3, 2, precomputed_round_key_80[i*8+1]); tmp[ 2] = d_ROUND_ELT1( sharedMemory,n, 2, 1, 0, 7, 6, 5, 4, 3, precomputed_round_key_80[i*8+2]); tmp[ 3] = d_ROUND_ELT1_LDG(sharedMemory,n, 3, 2, 1, 0, 7, 6, 5, 4, precomputed_round_key_80[i*8+3]); tmp[ 4] = d_ROUND_ELT1_LDG(sharedMemory,n, 4, 3, 2, 1, 0, 7, 6, 5, precomputed_round_key_80[i*8+4]); tmp[ 5] = d_ROUND_ELT1( sharedMemory,n, 5, 4, 3, 2, 1, 0, 7, 6, precomputed_round_key_80[i*8+5]); tmp[ 6] = d_ROUND_ELT1( sharedMemory,n, 6, 5, 4, 3, 2, 1, 0, 7, precomputed_round_key_80[i*8+6]); tmp[ 7] = d_ROUND_ELT1_LDG(sharedMemory,n, 7, 6, 5, 4, 3, 2, 1, 0, precomputed_round_key_80[i*8+7]); TRANSFER(n, tmp); } state[0] = c_PaddedMessage80[0] ^ n[0]; state[1] = c_PaddedMessage80[1] ^ n[1] ^ vectorize(REPLACE_HIDWORD(devectorize(c_PaddedMessage80[9]),nonce)); state[2] = c_PaddedMessage80[2] ^ n[2] ^ vectorize(0x0000000000000080); state[3] = c_PaddedMessage80[3] ^ n[3]; state[4] = c_PaddedMessage80[4] ^ n[4]; state[5] = c_PaddedMessage80[5] ^ n[5]; state[6] = c_PaddedMessage80[6] ^ n[6]; state[7] = c_PaddedMessage80[7] ^ n[7] ^ vectorize(0x8002000000000000); #pragma unroll 2 for(int r=0;r<2;r++){ #pragma unroll 8 for(int i=0;i<8;i++) hash[ i] = n[ i] = state[ i]; uint2 h[8] = { {0xC0EE0B30,0x672990AF},{0x28282828,0x28282828},{0x28282828,0x28282828},{0x28282828,0x28282828}, {0x28282828,0x28282828},{0x28282828,0x28282828},{0x28282828,0x28282828},{0x28282828,0x28282828} }; tmp[ 0] = d_ROUND_ELT1_LDG(sharedMemory,n, 0, 7, 6, 5, 4, 3, 2, 1, h[0]); tmp[ 1] = d_ROUND_ELT1(sharedMemory,n, 1, 0, 7, 6, 5, 4, 3, 2, h[1]); tmp[ 2] = d_ROUND_ELT1(sharedMemory,n, 2, 1, 0, 7, 6, 5, 4, 3, h[2]); tmp[ 3] = d_ROUND_ELT1_LDG(sharedMemory,n, 3, 2, 1, 0, 7, 6, 5, 4, h[3]); tmp[ 4] = d_ROUND_ELT1(sharedMemory,n, 4, 3, 2, 1, 0, 7, 6, 5, h[4]); tmp[ 5] = d_ROUND_ELT1_LDG(sharedMemory,n, 5, 4, 3, 2, 1, 0, 7, 6, h[5]); tmp[ 6] = d_ROUND_ELT1(sharedMemory,n, 6, 5, 4, 3, 2, 1, 0, 7, h[6]); tmp[ 7] = d_ROUND_ELT1_LDG(sharedMemory,n, 7, 6, 5, 4, 3, 2, 1, 0, h[7]); TRANSFER(n, tmp); // #pragma unroll 10 for (int i=1; i <10; i++){ tmp[ 0] = d_ROUND_ELT1_LDG(sharedMemory,n, 0, 7, 6, 5, 4, 3, 2, 1, precomputed_round_key_64[(i-1)*8+0]); tmp[ 1] = d_ROUND_ELT1( sharedMemory,n, 1, 0, 7, 6, 5, 4, 3, 2, precomputed_round_key_64[(i-1)*8+1]); tmp[ 2] = d_ROUND_ELT1( sharedMemory,n, 2, 1, 0, 7, 6, 5, 4, 3, precomputed_round_key_64[(i-1)*8+2]); tmp[ 3] = d_ROUND_ELT1_LDG(sharedMemory,n, 3, 2, 1, 0, 7, 6, 5, 4, precomputed_round_key_64[(i-1)*8+3]); tmp[ 4] = d_ROUND_ELT1( sharedMemory,n, 4, 3, 2, 1, 0, 7, 6, 5, precomputed_round_key_64[(i-1)*8+4]); tmp[ 5] = d_ROUND_ELT1( sharedMemory,n, 5, 4, 3, 2, 1, 0, 7, 6, precomputed_round_key_64[(i-1)*8+5]); tmp[ 6] = d_ROUND_ELT1( sharedMemory,n, 6, 5, 4, 3, 2, 1, 0, 7, precomputed_round_key_64[(i-1)*8+6]); tmp[ 7] = d_ROUND_ELT1_LDG(sharedMemory,n, 7, 6, 5, 4, 3, 2, 1, 0, precomputed_round_key_64[(i-1)*8+7]); TRANSFER(n, tmp); } #pragma unroll 8 for (int i=0; i<8; i++) state[i] = n[i] ^ hash[i]; #pragma unroll 6 for (int i=1; i<7; i++) n[i]=vectorize(0); n[0] = vectorize(0x80); n[7] = vectorize(0x2000000000000); #pragma unroll 8 for (int i=0; i < 8; i++) { h[i] = state[i]; n[i] = n[i] ^ h[i]; } // #pragma unroll 10 for (int i=0; i < 10; i++) { tmp[ 0] = d_ROUND_ELT1(sharedMemory, h, 0, 7, 6, 5, 4, 3, 2, 1, InitVector_RC[i]); tmp[ 1] = d_ROUND_ELT(sharedMemory, h, 1, 0, 7, 6, 5, 4, 3, 2); tmp[ 2] = d_ROUND_ELT_LDG(sharedMemory, h, 2, 1, 0, 7, 6, 5, 4, 3); tmp[ 3] = d_ROUND_ELT(sharedMemory, h, 3, 2, 1, 0, 7, 6, 5, 4); tmp[ 4] = d_ROUND_ELT_LDG(sharedMemory, h, 4, 3, 2, 1, 0, 7, 6, 5); tmp[ 5] = d_ROUND_ELT(sharedMemory, h, 5, 4, 3, 2, 1, 0, 7, 6); tmp[ 6] = d_ROUND_ELT_LDG(sharedMemory, h, 6, 5, 4, 3, 2, 1, 0, 7); tmp[ 7] = d_ROUND_ELT(sharedMemory, h, 7, 6, 5, 4, 3, 2, 1, 0); TRANSFER(h, tmp); tmp[ 0] = d_ROUND_ELT1(sharedMemory,n, 0, 7, 6, 5, 4, 3, 2, 1, tmp[0]); tmp[ 1] = d_ROUND_ELT1(sharedMemory,n, 1, 0, 7, 6, 5, 4, 3, 2, tmp[1]); tmp[ 2] = d_ROUND_ELT1_LDG(sharedMemory,n, 2, 1, 0, 7, 6, 5, 4, 3, tmp[2]); tmp[ 3] = d_ROUND_ELT1(sharedMemory,n, 3, 2, 1, 0, 7, 6, 5, 4, tmp[3]); tmp[ 4] = d_ROUND_ELT1(sharedMemory,n, 4, 3, 2, 1, 0, 7, 6, 5, tmp[4]); tmp[ 5] = d_ROUND_ELT1(sharedMemory,n, 5, 4, 3, 2, 1, 0, 7, 6, tmp[5]); tmp[ 6] = d_ROUND_ELT1(sharedMemory,n, 6, 5, 4, 3, 2, 1, 0, 7, tmp[6]); tmp[ 7] = d_ROUND_ELT1_LDG(sharedMemory,n, 7, 6, 5, 4, 3, 2, 1, 0, tmp[7]); TRANSFER(n, tmp); } state[0] = xor3x(state[0], n[0], vectorize(0x80)); state[1] = state[1]^ n[1]; state[2] = state[2]^ n[2]; state[3] = state[3]^ n[3]; state[4] = state[4]^ n[4]; state[5] = state[5]^ n[5]; state[6] = state[6]^ n[6]; state[7] = xor3x(state[7], n[7], vectorize(0x2000000000000)); } uint2 h[8] = { {0xC0EE0B30,0x672990AF},{0x28282828,0x28282828},{0x28282828,0x28282828},{0x28282828,0x28282828}, {0x28282828,0x28282828},{0x28282828,0x28282828},{0x28282828,0x28282828},{0x28282828,0x28282828} }; #pragma unroll 8 for(int i=0;i<8;i++) n[i]=hash[i] = state[ i]; tmp[ 0] = d_ROUND_ELT1(sharedMemory,n, 0, 7, 6, 5, 4, 3, 2, 1, h[0]); tmp[ 1] = d_ROUND_ELT1_LDG(sharedMemory,n, 1, 0, 7, 6, 5, 4, 3, 2, h[1]); tmp[ 2] = d_ROUND_ELT1(sharedMemory,n, 2, 1, 0, 7, 6, 5, 4, 3, h[2]); tmp[ 3] = d_ROUND_ELT1_LDG(sharedMemory,n, 3, 2, 1, 0, 7, 6, 5, 4, h[3]); tmp[ 4] = d_ROUND_ELT1(sharedMemory,n, 4, 3, 2, 1, 0, 7, 6, 5, h[4]); tmp[ 5] = d_ROUND_ELT1_LDG(sharedMemory,n, 5, 4, 3, 2, 1, 0, 7, 6, h[5]); tmp[ 6] = d_ROUND_ELT1(sharedMemory,n, 6, 5, 4, 3, 2, 1, 0, 7, h[6]); tmp[ 7] = d_ROUND_ELT1_LDG(sharedMemory,n, 7, 6, 5, 4, 3, 2, 1, 0, h[7]); TRANSFER(n, tmp); // #pragma unroll 10 for (int i=1; i <10; i++){ tmp[ 0] = d_ROUND_ELT1_LDG(sharedMemory,n, 0, 7, 6, 5, 4, 3, 2, 1, precomputed_round_key_64[(i-1)*8+0]); tmp[ 1] = d_ROUND_ELT1( sharedMemory,n, 1, 0, 7, 6, 5, 4, 3, 2, precomputed_round_key_64[(i-1)*8+1]); tmp[ 2] = d_ROUND_ELT1( sharedMemory,n, 2, 1, 0, 7, 6, 5, 4, 3, precomputed_round_key_64[(i-1)*8+2]); tmp[ 3] = d_ROUND_ELT1_LDG(sharedMemory,n, 3, 2, 1, 0, 7, 6, 5, 4, precomputed_round_key_64[(i-1)*8+3]); tmp[ 4] = d_ROUND_ELT1( sharedMemory,n, 4, 3, 2, 1, 0, 7, 6, 5, precomputed_round_key_64[(i-1)*8+4]); tmp[ 5] = d_ROUND_ELT1( sharedMemory,n, 5, 4, 3, 2, 1, 0, 7, 6, precomputed_round_key_64[(i-1)*8+5]); tmp[ 6] = d_ROUND_ELT1( sharedMemory,n, 6, 5, 4, 3, 2, 1, 0, 7, precomputed_round_key_64[(i-1)*8+6]); tmp[ 7] = d_ROUND_ELT1_LDG(sharedMemory,n, 7, 6, 5, 4, 3, 2, 1, 0, precomputed_round_key_64[(i-1)*8+7]); TRANSFER(n, tmp); } #pragma unroll 8 for (int i=0; i<8; i++) n[ i] = h[i] = n[i] ^ hash[i]; uint2 backup = h[ 3]; n[0]^= vectorize(0x80); n[7]^= vectorize(0x2000000000000); // #pragma unroll 8 for (int i=0; i < 8; i++) { tmp[ 0] = d_ROUND_ELT1(sharedMemory, h, 0, 7, 6, 5, 4, 3, 2, 1, InitVector_RC[i]); tmp[ 1] = d_ROUND_ELT(sharedMemory, h, 1, 0, 7, 6, 5, 4, 3, 2); tmp[ 2] = d_ROUND_ELT_LDG(sharedMemory, h, 2, 1, 0, 7, 6, 5, 4, 3); tmp[ 3] = d_ROUND_ELT(sharedMemory, h, 3, 2, 1, 0, 7, 6, 5, 4); tmp[ 4] = d_ROUND_ELT_LDG(sharedMemory, h, 4, 3, 2, 1, 0, 7, 6, 5); tmp[ 5] = d_ROUND_ELT(sharedMemory, h, 5, 4, 3, 2, 1, 0, 7, 6); tmp[ 6] = d_ROUND_ELT_LDG(sharedMemory, h, 6, 5, 4, 3, 2, 1, 0, 7); tmp[ 7] = d_ROUND_ELT(sharedMemory, h, 7, 6, 5, 4, 3, 2, 1, 0); TRANSFER(h, tmp); tmp[ 0] = d_ROUND_ELT1(sharedMemory,n, 0, 7, 6, 5, 4, 3, 2, 1, tmp[0]); tmp[ 1] = d_ROUND_ELT1(sharedMemory,n, 1, 0, 7, 6, 5, 4, 3, 2, tmp[1]); tmp[ 2] = d_ROUND_ELT1_LDG(sharedMemory,n, 2, 1, 0, 7, 6, 5, 4, 3, tmp[2]); tmp[ 3] = d_ROUND_ELT1(sharedMemory,n, 3, 2, 1, 0, 7, 6, 5, 4, tmp[3]); tmp[ 4] = d_ROUND_ELT1(sharedMemory,n, 4, 3, 2, 1, 0, 7, 6, 5, tmp[4]); tmp[ 5] = d_ROUND_ELT1(sharedMemory,n, 5, 4, 3, 2, 1, 0, 7, 6, tmp[5]); tmp[ 6] = d_ROUND_ELT1(sharedMemory,n, 6, 5, 4, 3, 2, 1, 0, 7, tmp[6]); tmp[ 7] = d_ROUND_ELT1_LDG(sharedMemory,n, 7, 6, 5, 4, 3, 2, 1, 0, tmp[7]); TRANSFER(n, tmp); } tmp[ 0] = d_ROUND_ELT1(sharedMemory, h, 0, 7, 6, 5, 4, 3, 2, 1, InitVector_RC[8]); tmp[ 1] = d_ROUND_ELT(sharedMemory, h, 1, 0, 7, 6, 5, 4, 3, 2); tmp[ 2] = d_ROUND_ELT_LDG(sharedMemory, h, 2, 1, 0, 7, 6, 5, 4, 3); tmp[ 3] = d_ROUND_ELT(sharedMemory, h, 3, 2, 1, 0, 7, 6, 5, 4); tmp[ 4] = d_ROUND_ELT_LDG(sharedMemory, h, 4, 3, 2, 1, 0, 7, 6, 5); tmp[ 5] = d_ROUND_ELT(sharedMemory, h, 5, 4, 3, 2, 1, 0, 7, 6); tmp[ 6] = d_ROUND_ELT(sharedMemory, h, 6, 5, 4, 3, 2, 1, 0, 7); tmp[ 7] = d_ROUND_ELT(sharedMemory, h, 7, 6, 5, 4, 3, 2, 1, 0); TRANSFER(h, tmp); tmp[ 0] = d_ROUND_ELT1(sharedMemory,n, 0, 7, 6, 5, 4, 3, 2, 1, tmp[0]); tmp[ 1] = d_ROUND_ELT1(sharedMemory,n, 1, 0, 7, 6, 5, 4, 3, 2, tmp[1]); tmp[ 2] = d_ROUND_ELT1(sharedMemory,n, 2, 1, 0, 7, 6, 5, 4, 3, tmp[2]); tmp[ 3] = d_ROUND_ELT1(sharedMemory,n, 3, 2, 1, 0, 7, 6, 5, 4, tmp[3]); tmp[ 4] = d_ROUND_ELT1(sharedMemory,n, 4, 3, 2, 1, 0, 7, 6, 5, tmp[4]); tmp[ 5] = d_ROUND_ELT1(sharedMemory,n, 5, 4, 3, 2, 1, 0, 7, 6, tmp[5]); tmp[ 6] = d_ROUND_ELT1_LDG(sharedMemory,n, 6, 5, 4, 3, 2, 1, 0, 7, tmp[6]); tmp[ 7] = d_ROUND_ELT1(sharedMemory,n, 7, 6, 5, 4, 3, 2, 1, 0, tmp[7]); n[ 3] = backup ^ d_ROUND_ELT(sharedMemory, h, 3, 2, 1, 0, 7, 6, 5, 4) ^ d_ROUND_ELT(sharedMemory,tmp, 3, 2, 1, 0, 7, 6, 5, 4); if(devectorize(n[3]) <= target) { uint32_t tmp = atomicExch(&resNonce[0], thread); if (tmp != UINT32_MAX) resNonce[1] = tmp; } } // thread < threads } /* only for whirlpool algo, no data out!! */ __host__ void whirlpool512_cpu_hash_80(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *h_resNonces, const uint64_t target) { dim3 grid((threads + TPB80-1) / TPB80); dim3 block(TPB80); cudaMemset(d_resNonce[thr_id], 0xff, 2*sizeof(uint32_t)); oldwhirlpool_gpu_hash_80<<<grid, block>>>(threads, startNounce, d_resNonce[thr_id], target); cudaMemcpy(h_resNonces, d_resNonce[thr_id], 2*sizeof(uint32_t), cudaMemcpyDeviceToHost); if (h_resNonces[0] != UINT32_MAX) h_resNonces[0] += startNounce; if (h_resNonces[1] != UINT32_MAX) h_resNonces[1] += startNounce; } __global__ __launch_bounds__(TPB64,2) void x15_whirlpool_gpu_hash_64(uint32_t threads, uint64_t *g_hash) { __shared__ uint2 sharedMemory[7][256]; if (threadIdx.x < 256) { const uint2 tmp = __ldg((uint2*)&b0[threadIdx.x]); sharedMemory[0][threadIdx.x] = tmp; sharedMemory[1][threadIdx.x] = ROL8(tmp); sharedMemory[2][threadIdx.x] = ROL16(tmp); sharedMemory[3][threadIdx.x] = ROL24(tmp); sharedMemory[4][threadIdx.x] = SWAPUINT2(tmp); sharedMemory[5][threadIdx.x] = ROR24(tmp); sharedMemory[6][threadIdx.x] = ROR16(tmp); } const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads){ uint2 hash[8], n[8], h[ 8]; uint2 tmp[8] = { {0xC0EE0B30,0x672990AF},{0x28282828,0x28282828},{0x28282828,0x28282828},{0x28282828,0x28282828}, {0x28282828,0x28282828},{0x28282828,0x28282828},{0x28282828,0x28282828},{0x28282828,0x28282828} }; *(uint2x4*)&hash[ 0] = __ldg4((uint2x4*)&g_hash[(thread<<3) + 0]); *(uint2x4*)&hash[ 4] = __ldg4((uint2x4*)&g_hash[(thread<<3) + 4]); __syncthreads(); #pragma unroll 8 for(int i=0;i<8;i++) n[i]=hash[i]; tmp[ 0]^= d_ROUND_ELT(sharedMemory,n, 0, 7, 6, 5, 4, 3, 2, 1); tmp[ 1]^= d_ROUND_ELT_LDG(sharedMemory,n, 1, 0, 7, 6, 5, 4, 3, 2); tmp[ 2]^= d_ROUND_ELT(sharedMemory,n, 2, 1, 0, 7, 6, 5, 4, 3); tmp[ 3]^= d_ROUND_ELT_LDG(sharedMemory,n, 3, 2, 1, 0, 7, 6, 5, 4); tmp[ 4]^= d_ROUND_ELT(sharedMemory,n, 4, 3, 2, 1, 0, 7, 6, 5); tmp[ 5]^= d_ROUND_ELT_LDG(sharedMemory,n, 5, 4, 3, 2, 1, 0, 7, 6); tmp[ 6]^= d_ROUND_ELT(sharedMemory,n, 6, 5, 4, 3, 2, 1, 0, 7); tmp[ 7]^= d_ROUND_ELT_LDG(sharedMemory,n, 7, 6, 5, 4, 3, 2, 1, 0); for (int i=1; i <10; i++){ TRANSFER(n, tmp); tmp[ 0] = d_ROUND_ELT1_LDG(sharedMemory,n, 0, 7, 6, 5, 4, 3, 2, 1, precomputed_round_key_64[(i-1)*8+0]); tmp[ 1] = d_ROUND_ELT1( sharedMemory,n, 1, 0, 7, 6, 5, 4, 3, 2, precomputed_round_key_64[(i-1)*8+1]); tmp[ 2] = d_ROUND_ELT1( sharedMemory,n, 2, 1, 0, 7, 6, 5, 4, 3, precomputed_round_key_64[(i-1)*8+2]); tmp[ 3] = d_ROUND_ELT1_LDG(sharedMemory,n, 3, 2, 1, 0, 7, 6, 5, 4, precomputed_round_key_64[(i-1)*8+3]); tmp[ 4] = d_ROUND_ELT1( sharedMemory,n, 4, 3, 2, 1, 0, 7, 6, 5, precomputed_round_key_64[(i-1)*8+4]); tmp[ 5] = d_ROUND_ELT1( sharedMemory,n, 5, 4, 3, 2, 1, 0, 7, 6, precomputed_round_key_64[(i-1)*8+5]); tmp[ 6] = d_ROUND_ELT1( sharedMemory,n, 6, 5, 4, 3, 2, 1, 0, 7, precomputed_round_key_64[(i-1)*8+6]); tmp[ 7] = d_ROUND_ELT1_LDG(sharedMemory,n, 7, 6, 5, 4, 3, 2, 1, 0, precomputed_round_key_64[(i-1)*8+7]); } TRANSFER(h, tmp); #pragma unroll 8 for (int i=0; i<8; i++) hash[ i] = h[i] = h[i] ^ hash[i]; #pragma unroll 6 for (int i=1; i<7; i++) n[i]=vectorize(0); n[0] = vectorize(0x80); n[7] = vectorize(0x2000000000000); #pragma unroll 8 for (int i=0; i < 8; i++) { n[i] = n[i] ^ h[i]; } // #pragma unroll 10 for (int i=0; i < 10; i++) { tmp[ 0] = InitVector_RC[i]; tmp[ 0]^= d_ROUND_ELT(sharedMemory, h, 0, 7, 6, 5, 4, 3, 2, 1); tmp[ 1] = d_ROUND_ELT(sharedMemory, h, 1, 0, 7, 6, 5, 4, 3, 2); tmp[ 2] = d_ROUND_ELT_LDG(sharedMemory, h, 2, 1, 0, 7, 6, 5, 4, 3); tmp[ 3] = d_ROUND_ELT(sharedMemory, h, 3, 2, 1, 0, 7, 6, 5, 4); tmp[ 4] = d_ROUND_ELT_LDG(sharedMemory, h, 4, 3, 2, 1, 0, 7, 6, 5); tmp[ 5] = d_ROUND_ELT(sharedMemory, h, 5, 4, 3, 2, 1, 0, 7, 6); tmp[ 6] = d_ROUND_ELT(sharedMemory, h, 6, 5, 4, 3, 2, 1, 0, 7); tmp[ 7] = d_ROUND_ELT(sharedMemory, h, 7, 6, 5, 4, 3, 2, 1, 0); TRANSFER(h, tmp); tmp[ 0] = d_ROUND_ELT1(sharedMemory,n, 0, 7, 6, 5, 4, 3, 2, 1, tmp[0]); tmp[ 1] = d_ROUND_ELT1_LDG(sharedMemory,n, 1, 0, 7, 6, 5, 4, 3, 2, tmp[1]); tmp[ 2] = d_ROUND_ELT1(sharedMemory,n, 2, 1, 0, 7, 6, 5, 4, 3, tmp[2]); tmp[ 3] = d_ROUND_ELT1(sharedMemory,n, 3, 2, 1, 0, 7, 6, 5, 4, tmp[3]); tmp[ 4] = d_ROUND_ELT1_LDG(sharedMemory,n, 4, 3, 2, 1, 0, 7, 6, 5, tmp[4]); tmp[ 5] = d_ROUND_ELT1(sharedMemory,n, 5, 4, 3, 2, 1, 0, 7, 6, tmp[5]); tmp[ 6] = d_ROUND_ELT1_LDG(sharedMemory,n, 6, 5, 4, 3, 2, 1, 0, 7, tmp[6]); tmp[ 7] = d_ROUND_ELT1(sharedMemory,n, 7, 6, 5, 4, 3, 2, 1, 0, tmp[7]); TRANSFER(n, tmp); } hash[0] = xor3x(hash[0], n[0], vectorize(0x80)); hash[1] = hash[1]^ n[1]; hash[2] = hash[2]^ n[2]; hash[3] = hash[3]^ n[3]; hash[4] = hash[4]^ n[4]; hash[5] = hash[5]^ n[5]; hash[6] = hash[6]^ n[6]; hash[7] = xor3x(hash[7], n[7], vectorize(0x2000000000000)); *(uint2x4*)&g_hash[(thread<<3)+ 0] = *(uint2x4*)&hash[ 0]; *(uint2x4*)&g_hash[(thread<<3)+ 4] = *(uint2x4*)&hash[ 4]; } } __host__ static void x15_whirlpool_cpu_hash_64(int thr_id, uint32_t threads, uint32_t *d_hash) { dim3 grid((threads + TPB64-1) / TPB64); dim3 block(TPB64); x15_whirlpool_gpu_hash_64 <<<grid, block>>> (threads, (uint64_t*)d_hash); } __host__ void x15_whirlpool_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order) { x15_whirlpool_cpu_hash_64(thr_id, threads, d_hash); }
the_stack
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <vector> #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define POSE_CHANNELS 9 // poses_src: (batch_size, 9) template <typename Dtype> __global__ void AveragedistanceForward(const int nthreads, const Dtype* bottom_rotations, const Dtype* bottom_translations, const Dtype* poses_src, const Dtype* poses_tgt, const Dtype* extents, const Dtype* points, const int batch_size, const int num_classes, const int num_points, Dtype* rotations, Dtype* losses, Dtype* diffs_rotation, Dtype* diffs_translation) { CUDA_1D_KERNEL_LOOP(index_thread, nthreads) { // batch index int n = index_thread / num_points; int p = index_thread % num_points; // find the class label and pose of this object int index_cls = int(poses_src[n * POSE_CHANNELS + 1]); int ind; Dtype s, u, v, w; // gt quaternion target int index = n * POSE_CHANNELS + 2; s = poses_tgt[index + 0]; u = poses_tgt[index + 1]; v = poses_tgt[index + 2]; w = poses_tgt[index + 3]; // gt rotation matrix target ind = n * num_points * 7 * 9 + p * 7 * 9 + 0; rotations[ind + 0] = s * s + u * u - v * v - w * w; rotations[ind + 1] = 2 * (u * v - s * w); rotations[ind + 2] = 2 * (u * w + s * v); rotations[ind + 3] = 2 * (u * v + s * w); rotations[ind + 4] = s * s - u * u + v * v - w * w; rotations[ind + 5] = 2 * (v * w - s * u); rotations[ind + 6] = 2 * (u * w - s * v); rotations[ind + 7] = 2 * (v * w + s * u); rotations[ind + 8] = s * s - u * u - v * v + w * w; // gt quaternion source s = poses_src[index + 0]; u = poses_src[index + 1]; v = poses_src[index + 2]; w = poses_src[index + 3]; // gt rotation matrix source ind = n * num_points * 7 * 9 + p * 7 * 9 + 9; rotations[ind + 0] = s * s + u * u - v * v - w * w; rotations[ind + 1] = 2 * (u * v - s * w); rotations[ind + 2] = 2 * (u * w + s * v); rotations[ind + 3] = 2 * (u * v + s * w); rotations[ind + 4] = s * s - u * u + v * v - w * w; rotations[ind + 5] = 2 * (v * w - s * u); rotations[ind + 6] = 2 * (u * w - s * v); rotations[ind + 7] = 2 * (v * w + s * u); rotations[ind + 8] = s * s - u * u - v * v + w * w; // predicted quaternion index = n * 4 * num_classes + 4 * index_cls; s = bottom_rotations[index + 0]; u = bottom_rotations[index + 1]; v = bottom_rotations[index + 2]; w = bottom_rotations[index + 3]; // predicted rotation matrix ind = n * num_points * 7 * 9 + p * 7 * 9 + 18; rotations[ind + 0] = s * s + u * u - v * v - w * w; rotations[ind + 1] = 2 * (u * v - s * w); rotations[ind + 2] = 2 * (u * w + s * v); rotations[ind + 3] = 2 * (u * v + s * w); rotations[ind + 4] = s * s - u * u + v * v - w * w; rotations[ind + 5] = 2 * (v * w - s * u); rotations[ind + 6] = 2 * (u * w - s * v); rotations[ind + 7] = 2 * (v * w + s * u); rotations[ind + 8] = s * s - u * u - v * v + w * w; // derivatives of Ru to quaternion ind = n * num_points * 7 * 9 + p * 7 * 9 + 27; rotations[ind + 0] = 2 * s; rotations[ind + 1] = -2 * w; rotations[ind + 2] = 2 * v; rotations[ind + 3] = 2 * w; rotations[ind + 4] = 2 * s; rotations[ind + 5] = -2 * u; rotations[ind + 6] = -2 * v; rotations[ind + 7] = 2 * u; rotations[ind + 8] = 2 * s; ind = n * num_points * 7 * 9 + p * 7 * 9 + 36; rotations[ind + 0] = 2 * u; rotations[ind + 1] = 2 * v; rotations[ind + 2] = 2 * w; rotations[ind + 3] = 2 * v; rotations[ind + 4] = -2 * u; rotations[ind + 5] = -2 * s; rotations[ind + 6] = 2 * w; rotations[ind + 7] = 2 * s; rotations[ind + 8] = -2 * u; ind = n * num_points * 7 * 9 + p * 7 * 9 + 45; rotations[ind + 0] = -2 * v; rotations[ind + 1] = 2 * u; rotations[ind + 2] = 2 * s; rotations[ind + 3] = 2 * u; rotations[ind + 4] = 2 * v; rotations[ind + 5] = 2 * w; rotations[ind + 6] = -2 * s; rotations[ind + 7] = 2 * w; rotations[ind + 8] = -2 * v; ind = n * num_points * 7 * 9 + p * 7 * 9 + 54; rotations[ind + 0] = -2 * w; rotations[ind + 1] = -2 * s; rotations[ind + 2] = 2 * u; rotations[ind + 3] = 2 * s; rotations[ind + 4] = -2 * w; rotations[ind + 5] = 2 * v; rotations[ind + 6] = 2 * u; rotations[ind + 7] = 2 * v; rotations[ind + 8] = 2 * w; // for the point index = index_cls * num_points * 3 + p * 3; ind = n * num_points * 7 * 9 + p * 7 * 9; // weight for the point Dtype weight = -1; for (int j = 0; j < 3; j++) { if (extents[index_cls * 3 + j] > weight) weight = extents[index_cls * 3 + j]; } weight = 10.0 / weight; // rotate the point using the source pose Dtype x1_0 = rotations[ind + 9 + 0] * points[index + 0] + rotations[ind + 9 + 1] * points[index + 1] + rotations[ind + 9 + 2] * points[index + 2]; Dtype y1_0 = rotations[ind + 9 + 3] * points[index + 0] + rotations[ind + 9 + 4] * points[index + 1] + rotations[ind + 9 + 5] * points[index + 2]; Dtype z1_0 = rotations[ind + 9 + 6] * points[index + 0] + rotations[ind + 9 + 7] * points[index + 1] + rotations[ind + 9 + 8] * points[index + 2]; x1_0 *= weight; y1_0 *= weight; z1_0 *= weight; // rotate the point again using the estimated delta rotation int index_tran = n * 3 * num_classes + 3 * index_cls; Dtype x1 = rotations[ind + 18 + 0] * x1_0 + rotations[ind + 18 + 1] * y1_0 + rotations[ind + 18 + 2] * z1_0 + weight * bottom_translations[index_tran + 0]; Dtype y1 = rotations[ind + 18 + 3] * x1_0 + rotations[ind + 18 + 4] * y1_0 + rotations[ind + 18 + 5] * z1_0 + weight * bottom_translations[index_tran + 1]; Dtype z1 = rotations[ind + 18 + 6] * x1_0 + rotations[ind + 18 + 7] * y1_0 + rotations[ind + 18 + 8] * z1_0 + weight * bottom_translations[index_tran + 2]; // rotate and translate the point using the target pose Dtype x2 = weight * (rotations[ind + 0] * points[index + 0] + rotations[ind + 1] * points[index + 1] + rotations[ind + 2] * points[index + 2]) + weight * poses_tgt[n * POSE_CHANNELS + 6]; Dtype y2 = weight * (rotations[ind + 3] * points[index + 0] + rotations[ind + 4] * points[index + 1] + rotations[ind + 5] * points[index + 2]) + weight * poses_tgt[n * POSE_CHANNELS + 7]; Dtype z2 = weight * (rotations[ind + 6] * points[index + 0] + rotations[ind + 7] * points[index + 1] + rotations[ind + 8] * points[index + 2]) + weight * poses_tgt[n * POSE_CHANNELS + 8]; // smooth l1 loss Dtype distance = 0; int index_diff = n * num_points * 4 * num_classes + p * 4 * num_classes + 4 * index_cls; int index_diff_t = n * num_points * 3 * num_classes + p * 3 * num_classes + 3 * index_cls; for (int j = 0; j < 3; j++) { Dtype diff, df; if (j == 0) diff = x1 - x2; else if (j == 1) diff = y1 - y2; else diff = z1 - z2; if (fabs(diff) < 1) { distance += 0.5 * diff * diff; df = diff; } else { distance += fabs(diff) - 0.5; if (diff > 0) df = 1.0; else df = -1.0; } for (int k = 0; k < 3; k++) { Dtype dp; if (k == 0) dp = x1_0; else if (k == 1) dp = y1_0; else dp = z1_0; ind = n * num_points * 7 * 9 + p * 7 * 9 + 27; diffs_rotation[index_diff + 0] += df * dp * rotations[ind + j * 3 + k] / (batch_size * num_points); ind = n * num_points * 7 * 9 + p * 7 * 9 + 36; diffs_rotation[index_diff + 1] += df * dp * rotations[ind + j * 3 + k] / (batch_size * num_points); ind = n * num_points * 7 * 9 + p * 7 * 9 + 45; diffs_rotation[index_diff + 2] += df * dp * rotations[ind + j * 3 + k] / (batch_size * num_points); ind = n * num_points * 7 * 9 + p * 7 * 9 + 54; diffs_rotation[index_diff + 3] += df * dp * rotations[ind + j * 3 + k] / (batch_size * num_points); } diffs_translation[index_diff_t + j] = weight * df / (batch_size * num_points); } losses[index_thread] = distance / (batch_size * num_points); } } template <typename Dtype> __global__ void sum_losses_gradients(const int nthreads, const Dtype* losses, const Dtype* poses_src, const Dtype* diffs_rotation, const Dtype* diffs_translation, const int num_classes, const int num_points, Dtype* losses_batch, Dtype* bottom_diff_rotation, Dtype* bottom_diff_translation) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int index_cls = int(poses_src[index * POSE_CHANNELS + 1]); losses_batch[index] = 0; bottom_diff_rotation[4 * index * num_classes + 4 * index_cls + 0] = 0; bottom_diff_rotation[4 * index * num_classes + 4 * index_cls + 1] = 0; bottom_diff_rotation[4 * index * num_classes + 4 * index_cls + 2] = 0; bottom_diff_rotation[4 * index * num_classes + 4 * index_cls + 3] = 0; bottom_diff_translation[3 * index * num_classes + 3 * index_cls + 0] = 0; bottom_diff_translation[3 * index * num_classes + 3 * index_cls + 1] = 0; bottom_diff_translation[3 * index * num_classes + 3 * index_cls + 2] = 0; for (int p = 0; p < num_points; p++) { losses_batch[index] += losses[index * num_points + p]; int index_diff = index * num_points * 4 * num_classes + p * 4 * num_classes + 4 * index_cls; bottom_diff_rotation[4 * index * num_classes + 4 * index_cls + 0] += diffs_rotation[index_diff + 0]; bottom_diff_rotation[4 * index * num_classes + 4 * index_cls + 1] += diffs_rotation[index_diff + 1]; bottom_diff_rotation[4 * index * num_classes + 4 * index_cls + 2] += diffs_rotation[index_diff + 2]; bottom_diff_rotation[4 * index * num_classes + 4 * index_cls + 3] += diffs_rotation[index_diff + 3]; index_diff = index * num_points * 3 * num_classes + p * 3 * num_classes + 3 * index_cls; bottom_diff_translation[3 * index * num_classes + 3 * index_cls + 0] += diffs_translation[index_diff + 0]; bottom_diff_translation[3 * index * num_classes + 3 * index_cls + 1] += diffs_translation[index_diff + 1]; bottom_diff_translation[3 * index * num_classes + 3 * index_cls + 2] += diffs_translation[index_diff + 2]; } } } std::vector<at::Tensor> pml_cuda_forward( at::Tensor bottom_rotations, at::Tensor bottom_translations, at::Tensor poses_src, at::Tensor poses_tgt, at::Tensor extents, at::Tensor points) { // run kernels const int kThreadsPerBlock = 512; int output_size; // temp losses const int batch_size = bottom_rotations.size(0); const int num_classes = points.size(1); const int num_points = points.size(2); auto losses = at::zeros({batch_size, num_points}, points.options()); auto losses_batch = at::zeros({batch_size}, points.options()); auto loss = at::zeros({1}, points.options()); // temp diffs auto diffs_rotation = at::zeros({batch_size, num_points, 4 * num_classes}, points.options()); auto bottom_diff_rotation = at::zeros({batch_size, 4 * num_classes}, points.options()); auto diffs_translation = at::zeros({batch_size, num_points, 3 * num_classes}, points.options()); auto bottom_diff_translation = at::zeros({batch_size, 3 * num_classes}, points.options()); // temp rotations auto rots = at::zeros({batch_size, num_points, 7 * 9}, points.options()); AT_DISPATCH_FLOATING_TYPES(points.type(), "pml_forward_cuda", ([&] { // compute the losses and gradients output_size = batch_size * num_points; AveragedistanceForward<scalar_t><<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, bottom_rotations.data<scalar_t>(), bottom_translations.data<scalar_t>(), poses_src.data<scalar_t>(), poses_tgt.data<scalar_t>(), extents.data<scalar_t>(), points.data<scalar_t>(), batch_size, num_classes, num_points, rots.data<scalar_t>(), losses.data<scalar_t>(), diffs_rotation.data<scalar_t>(), diffs_translation.data<scalar_t>()); // sum the diffs output_size = batch_size; sum_losses_gradients<scalar_t><<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, losses.data<scalar_t>(), poses_src.data<scalar_t>(), diffs_rotation.data<scalar_t>(), diffs_translation.data<scalar_t>(), num_classes, num_points, losses_batch.data<scalar_t>(), bottom_diff_rotation.data<scalar_t>(), bottom_diff_translation.data<scalar_t>()); // sum the loss thrust::device_ptr<float> losses_ptr(losses_batch.data<float>()); float loss_value = thrust::reduce(losses_ptr, losses_ptr + batch_size); cudaMemcpy(loss.data<float>(), &loss_value, sizeof(float), cudaMemcpyHostToDevice); })); return {loss, bottom_diff_rotation, bottom_diff_translation}; } template <typename Dtype> __global__ void AveragedistanceBackward(const int nthreads, const Dtype* top_diff, const Dtype* bottom_diff, Dtype* output) { CUDA_1D_KERNEL_LOOP(index, nthreads) { output[index] = top_diff[0] * bottom_diff[index]; } } std::vector<at::Tensor> pml_cuda_backward( at::Tensor grad_loss, at::Tensor bottom_diff_rotation, at::Tensor bottom_diff_translation) { const int kThreadsPerBlock = 512; int output_size; const int batch_size = bottom_diff_rotation.size(0); const int num_classes = bottom_diff_rotation.size(1) / 4; auto grad_rotation = at::zeros({batch_size, 4 * num_classes}, bottom_diff_rotation.options()); auto grad_translation = at::zeros({batch_size, 3 * num_classes}, bottom_diff_translation.options()); AT_DISPATCH_FLOATING_TYPES(grad_loss.type(), "pml_backward_cuda", ([&] { output_size = batch_size * 4 * num_classes; AveragedistanceBackward<scalar_t><<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, grad_loss.data<scalar_t>(), bottom_diff_rotation.data<scalar_t>(), grad_rotation.data<scalar_t>()); output_size = batch_size * 3 * num_classes; AveragedistanceBackward<scalar_t><<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>( output_size, grad_loss.data<scalar_t>(), bottom_diff_translation.data<scalar_t>(), grad_translation.data<scalar_t>()); })); return {grad_rotation, grad_translation}; }
the_stack
void kernel_sizing(CSRGraph &, dim3 &, dim3 &); #define TB_SIZE 256 const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ tb_lb=False $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=False $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ dyn_lb=False $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic"; bool enable_lb = false; #include "bc_level_cuda.cuh" __global__ void InitializeGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_betweeness_centrality, float * p_dependency, ShortPathType * p_num_shortest_paths) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { p_betweeness_centrality[src] = 0; p_num_shortest_paths[src] = 0; p_dependency[src] = 0; } } // FP: "9 -> 10; } __global__ void InitializeIteration(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint64_t local_current_src_node, const uint32_t local_infinity, uint32_t * p_current_length, float * p_dependency, ShortPathType * p_num_shortest_paths) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; bool is_source; index_type src_end; // FP: "1 -> 2; // FP: "2 -> 3; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { is_source = graph.node_data[src] == local_current_src_node; if (!is_source) { p_current_length[src] = local_infinity; p_num_shortest_paths[src] = 0; } else { p_current_length[src] = 0; p_num_shortest_paths[src] = 1; } p_dependency[src] = 0; } } // FP: "15 -> 16; } __global__ void ForwardPass(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t local_r, uint32_t * p_current_length, ShortPathType * p_num_shortest_paths, DynamicBitset& bitset_current_length, DynamicBitset& bitset_num_shortest_paths, HGAccumulator<uint32_t> dga) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; __shared__ cub::BlockReduce<uint32_t, TB_SIZE>::TempStorage dga_ts; index_type src_end; // FP: "1 -> 2; // FP: "2 -> 3; dga.thread_entry(); // FP: "3 -> 4; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { index_type current_edge_end; bool pop = src < __end; if (pop) { if (p_current_length[src] == local_r) { } else { pop = false; } } if (!pop) { continue; } current_edge_end = (graph).getFirstEdge((src) + 1); for (index_type current_edge = (graph).getFirstEdge(src) + 0; current_edge < current_edge_end; current_edge += 1) { index_type dst; uint32_t new_dist; uint32_t old; dst = graph.getAbsDestination(current_edge); new_dist = 1 + p_current_length[src]; old = atomicTestMin(&p_current_length[dst], new_dist); if (old > new_dist) { double nsp; bitset_current_length.set(dst); nsp = p_num_shortest_paths[src]; atomicTestAdd(&p_num_shortest_paths[dst], nsp); bitset_num_shortest_paths.set(dst); dga.reduce( 1); } else { if (old == new_dist) { double nsp; nsp = p_num_shortest_paths[src]; atomicTestAdd(&p_num_shortest_paths[dst], nsp); bitset_num_shortest_paths.set(dst); dga.reduce( 1); } } } } // FP: "37 -> 38; dga.thread_exit<cub::BlockReduce<uint32_t, TB_SIZE> >(dga_ts); // FP: "38 -> 39; } __global__ void MiddleSync(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t local_infinity, uint32_t * p_current_length, DynamicBitset& bitset_num_shortest_paths) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { if (p_current_length[src] != local_infinity) { bitset_num_shortest_paths.set(src); } } } // FP: "9 -> 10; } __global__ void BackwardPass(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t local_r, uint32_t * p_current_length, float * p_dependency, ShortPathType * p_num_shortest_paths, DynamicBitset& bitset_dependency) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; uint32_t dest_to_find; index_type src_end; // FP: "1 -> 2; // FP: "2 -> 3; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { index_type current_edge_end; bool pop = src < __end; if (pop) { if (p_current_length[src] == local_r) { dest_to_find = p_current_length[src] + 1; } else { pop = false; } } if (!pop) { continue; } current_edge_end = (graph).getFirstEdge((src) + 1); for (index_type current_edge = (graph).getFirstEdge(src) + 0; current_edge < current_edge_end; current_edge += 1) { index_type dst; dst = graph.getAbsDestination(current_edge); if (dest_to_find == p_current_length[dst]) { float contrib; contrib = ((float)1 + p_dependency[dst]) / p_num_shortest_paths[dst]; p_dependency[src] = p_dependency[src] + contrib; bitset_dependency.set(src); } } p_dependency[src] *= p_num_shortest_paths[src]; } // FP: "25 -> 26; } __global__ void BC(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_betweeness_centrality, float * p_dependency) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { if (p_dependency[src] > 0) { p_betweeness_centrality[src] += p_dependency[src]; } } } // FP: "9 -> 10; } __global__ void Sanity(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_betweeness_centrality, HGAccumulator<float> DGAccumulator_sum, HGReduceMax<float> DGAccumulator_max, HGReduceMin<float> DGAccumulator_min) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; __shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_sum_ts; __shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_max_ts; __shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_min_ts; index_type src_end; // FP: "1 -> 2; // FP: "2 -> 3; DGAccumulator_sum.thread_entry(); // FP: "3 -> 4; // FP: "4 -> 5; DGAccumulator_max.thread_entry(); // FP: "5 -> 6; // FP: "6 -> 7; DGAccumulator_min.thread_entry(); // FP: "7 -> 8; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { DGAccumulator_max.reduce(p_betweeness_centrality[src]); DGAccumulator_min.reduce(p_betweeness_centrality[src]); DGAccumulator_sum.reduce( p_betweeness_centrality[src]); } } // FP: "15 -> 16; DGAccumulator_sum.thread_exit<cub::BlockReduce<float, TB_SIZE> >(DGAccumulator_sum_ts); // FP: "16 -> 17; DGAccumulator_max.thread_exit<cub::BlockReduce<float, TB_SIZE> >(DGAccumulator_max_ts); // FP: "17 -> 18; DGAccumulator_min.thread_exit<cub::BlockReduce<float, TB_SIZE> >(DGAccumulator_min_ts); // FP: "18 -> 19; } void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; InitializeGraph <<<blocks, threads>>>(ctx->gg, __begin, __end, ctx->betweeness_centrality.data.gpu_wr_ptr(), ctx->dependency.data.gpu_wr_ptr(), ctx->num_shortest_paths.data.gpu_wr_ptr()); cudaDeviceSynchronize(); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void InitializeGraph_allNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph_cuda(0, ctx->gg.nnodes, ctx); // FP: "2 -> 3; } void InitializeGraph_masterNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx); // FP: "2 -> 3; } void InitializeGraph_nodesWithEdges_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph_cuda(0, ctx->numNodesWithEdges, ctx); // FP: "2 -> 3; } void InitializeIteration_cuda(unsigned int __begin, unsigned int __end, const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; InitializeIteration <<<blocks, threads>>>(ctx->gg, __begin, __end, local_current_src_node, local_infinity, ctx->current_length.data.gpu_wr_ptr(), ctx->dependency.data.gpu_wr_ptr(), ctx->num_shortest_paths.data.gpu_wr_ptr()); cudaDeviceSynchronize(); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void InitializeIteration_allNodes_cuda(const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeIteration_cuda(0, ctx->gg.nnodes, local_infinity, local_current_src_node, ctx); // FP: "2 -> 3; } void InitializeIteration_masterNodes_cuda(const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeIteration_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_infinity, local_current_src_node, ctx); // FP: "2 -> 3; } void InitializeIteration_nodesWithEdges_cuda(const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeIteration_cuda(0, ctx->numNodesWithEdges, local_infinity, local_current_src_node, ctx); // FP: "2 -> 3; } void ForwardPass_cuda(unsigned int __begin, unsigned int __end, uint32_t & dga, uint32_t local_r, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; HGAccumulator<uint32_t> _dga; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; Shared<uint32_t> dgaval = Shared<uint32_t>(1); // FP: "5 -> 6; // FP: "6 -> 7; *(dgaval.cpu_wr_ptr()) = 0; // FP: "7 -> 8; _dga.rv = dgaval.gpu_wr_ptr(); // FP: "8 -> 9; ForwardPass <<<blocks, threads>>>(ctx->gg, __begin, __end, local_r, ctx->current_length.data.gpu_wr_ptr(), ctx->num_shortest_paths.data.gpu_wr_ptr(), *(ctx->current_length.is_updated.gpu_rd_ptr()), *(ctx->num_shortest_paths.is_updated.gpu_rd_ptr()), _dga); cudaDeviceSynchronize(); // FP: "9 -> 10; check_cuda_kernel; // FP: "10 -> 11; dga = *(dgaval.cpu_rd_ptr()); // FP: "11 -> 12; } void ForwardPass_allNodes_cuda(uint32_t & dga, uint32_t local_r, struct CUDA_Context* ctx) { // FP: "1 -> 2; ForwardPass_cuda(0, ctx->gg.nnodes, dga, local_r, ctx); // FP: "2 -> 3; } void ForwardPass_masterNodes_cuda(uint32_t & dga, uint32_t local_r, struct CUDA_Context* ctx) { // FP: "1 -> 2; ForwardPass_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, dga, local_r, ctx); // FP: "2 -> 3; } void ForwardPass_nodesWithEdges_cuda(uint32_t & dga, uint32_t local_r, struct CUDA_Context* ctx) { // FP: "1 -> 2; ForwardPass_cuda(0, ctx->numNodesWithEdges, dga, local_r, ctx); // FP: "2 -> 3; } void MiddleSync_cuda(unsigned int __begin, unsigned int __end, const uint32_t local_infinity, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; MiddleSync <<<blocks, threads>>>(ctx->gg, __begin, __end, local_infinity, ctx->current_length.data.gpu_wr_ptr(), *(ctx->num_shortest_paths.is_updated.gpu_rd_ptr())); cudaDeviceSynchronize(); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void MiddleSync_allNodes_cuda(const uint32_t local_infinity, struct CUDA_Context* ctx) { // FP: "1 -> 2; MiddleSync_cuda(0, ctx->gg.nnodes, local_infinity, ctx); // FP: "2 -> 3; } void MiddleSync_masterNodes_cuda(const uint32_t local_infinity, struct CUDA_Context* ctx) { // FP: "1 -> 2; MiddleSync_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_infinity, ctx); // FP: "2 -> 3; } void MiddleSync_nodesWithEdges_cuda(const uint32_t local_infinity, struct CUDA_Context* ctx) { // FP: "1 -> 2; MiddleSync_cuda(0, ctx->numNodesWithEdges, local_infinity, ctx); // FP: "2 -> 3; } void BackwardPass_cuda(unsigned int __begin, unsigned int __end, uint32_t local_r, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; BackwardPass <<<blocks, threads>>>(ctx->gg, __begin, __end, local_r, ctx->current_length.data.gpu_wr_ptr(), ctx->dependency.data.gpu_wr_ptr(), ctx->num_shortest_paths.data.gpu_wr_ptr(), *(ctx->dependency.is_updated.gpu_rd_ptr())); cudaDeviceSynchronize(); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void BackwardPass_allNodes_cuda(uint32_t local_r, struct CUDA_Context* ctx) { // FP: "1 -> 2; BackwardPass_cuda(0, ctx->gg.nnodes, local_r, ctx); // FP: "2 -> 3; } void BackwardPass_masterNodes_cuda(uint32_t local_r, struct CUDA_Context* ctx) { // FP: "1 -> 2; BackwardPass_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_r, ctx); // FP: "2 -> 3; } void BackwardPass_nodesWithEdges_cuda(uint32_t local_r, struct CUDA_Context* ctx) { // FP: "1 -> 2; BackwardPass_cuda(0, ctx->numNodesWithEdges, local_r, ctx); // FP: "2 -> 3; } void BC_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; BC <<<blocks, threads>>>(ctx->gg, __begin, __end, ctx->betweeness_centrality.data.gpu_wr_ptr(), ctx->dependency.data.gpu_wr_ptr()); cudaDeviceSynchronize(); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void BC_allNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; BC_cuda(0, ctx->gg.nnodes, ctx); // FP: "2 -> 3; } void BC_masterNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; BC_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx); // FP: "2 -> 3; } void BC_nodesWithEdges_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; BC_cuda(0, ctx->numNodesWithEdges, ctx); // FP: "2 -> 3; } void Sanity_cuda(unsigned int __begin, unsigned int __end, float & DGAccumulator_sum, float & DGAccumulator_max, float & DGAccumulator_min, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; HGAccumulator<float> _DGAccumulator_sum; HGReduceMax<float> _DGAccumulator_max; HGReduceMin<float> _DGAccumulator_min; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; Shared<float> DGAccumulator_sumval = Shared<float>(1); // FP: "5 -> 6; // FP: "6 -> 7; *(DGAccumulator_sumval.cpu_wr_ptr()) = 0; // FP: "7 -> 8; _DGAccumulator_sum.rv = DGAccumulator_sumval.gpu_wr_ptr(); // FP: "8 -> 9; Shared<float> DGAccumulator_maxval = Shared<float>(1); // FP: "9 -> 10; // FP: "10 -> 11; *(DGAccumulator_maxval.cpu_wr_ptr()) = 0; // FP: "11 -> 12; _DGAccumulator_max.rv = DGAccumulator_maxval.gpu_wr_ptr(); // FP: "12 -> 13; Shared<float> DGAccumulator_minval = Shared<float>(1); // FP: "13 -> 14; // FP: "14 -> 15; *(DGAccumulator_minval.cpu_wr_ptr()) = 1073741823; // FP: "15 -> 16; _DGAccumulator_min.rv = DGAccumulator_minval.gpu_wr_ptr(); // FP: "16 -> 17; Sanity <<<blocks, threads>>>(ctx->gg, __begin, __end, ctx->betweeness_centrality.data.gpu_wr_ptr(), _DGAccumulator_sum, _DGAccumulator_max, _DGAccumulator_min); cudaDeviceSynchronize(); // FP: "17 -> 18; check_cuda_kernel; // FP: "18 -> 19; DGAccumulator_sum = *(DGAccumulator_sumval.cpu_rd_ptr()); // FP: "19 -> 20; DGAccumulator_max = *(DGAccumulator_maxval.cpu_rd_ptr()); // FP: "20 -> 21; DGAccumulator_min = *(DGAccumulator_minval.cpu_rd_ptr()); // FP: "21 -> 22; } void Sanity_allNodes_cuda(float & DGAccumulator_sum, float & DGAccumulator_max, float & DGAccumulator_min, struct CUDA_Context* ctx) { // FP: "1 -> 2; Sanity_cuda(0, ctx->gg.nnodes, DGAccumulator_sum, DGAccumulator_max, DGAccumulator_min, ctx); // FP: "2 -> 3; } void Sanity_masterNodes_cuda(float & DGAccumulator_sum, float & DGAccumulator_max, float & DGAccumulator_min, struct CUDA_Context* ctx) { // FP: "1 -> 2; Sanity_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_sum, DGAccumulator_max, DGAccumulator_min, ctx); // FP: "2 -> 3; } void Sanity_nodesWithEdges_cuda(float & DGAccumulator_sum, float & DGAccumulator_max, float & DGAccumulator_min, struct CUDA_Context* ctx) { // FP: "1 -> 2; Sanity_cuda(0, ctx->numNodesWithEdges, DGAccumulator_sum, DGAccumulator_max, DGAccumulator_min, ctx); // FP: "2 -> 3; }
the_stack
using namespace nvinfer1; namespace bert { inline __device__ void res_add( float (&hdata)[4], const uint32_t idata, const uint32_t ires, const float dqData, const float dqRes) { char4 ires4 = reinterpret_cast<const char4&>(ires); char4 idata4 = reinterpret_cast<const char4&>(idata); hdata[0] = float(idata4.x) * dqData + float(ires4.x) * dqRes; hdata[1] = float(idata4.y) * dqData + float(ires4.y) * dqRes; hdata[2] = float(idata4.z) * dqData + float(ires4.z) * dqRes; hdata[3] = float(idata4.w) * dqData + float(ires4.w) * dqRes; } template <int32_t WARPS, int32_t HEADS, int32_t THREADS_PER_ROW> __global__ void skipln_vec32_hface(const int8_t* input, const int8_t* skip, int8_t* output, const half* beta, const half* gamma, const float dqScaleIn, const float dqScaleSkip, const float qScale, const int32_t total) { // clang-format off enum { HEAD_SIZE = 64 }; enum { BYTES_PER_LDG = 16 }; enum { THREADS_PER_CTA = WARPS * 32 }; enum { ROWS_PER_LDG = THREADS_PER_CTA / THREADS_PER_ROW }; enum { VECS_PER_CTA = THREADS_PER_ROW / 2 }; enum { PARAM_BYTES = HEADS * HEAD_SIZE * 2 }; enum { PARAM_LDGS = PARAM_BYTES / (THREADS_PER_CTA * BYTES_PER_LDG) }; enum { LDGS = HEADS * 2 / ROWS_PER_LDG }; // clang-format on static_assert(VECS_PER_CTA == 4, ""); static_assert(PARAM_LDGS == 1, ""); static_assert(ROWS_PER_LDG == HEADS , ""); static_assert(LDGS == 2, ""); static_assert(LDGS * ROWS_PER_LDG == HEADS * 2, ""); static_assert(THREADS_PER_CTA * BYTES_PER_LDG == PARAM_BYTES, ""); static_assert(PARAM_LDGS == 1, ""); extern __shared__ char smem_[]; // space for CTA-wide reduction __shared__ half2 smem_red[VECS_PER_CTA][WARPS]; constexpr float rld = 1.f / (float(HEADS) * float(HEAD_SIZE)); const int32_t bidx = blockIdx.x; const int32_t tidx = threadIdx.x; const int32_t row = tidx / THREADS_PER_ROW; const int32_t col = tidx % THREADS_PER_ROW; const int32_t lane = tidx % 32; const int32_t warp = tidx / 32; const bool is_warp_lead = (lane < THREADS_PER_ROW) && ((lane & 1) == 0); const bool is_cta_lead = (tidx < THREADS_PER_ROW) && ((tidx & 1) == 0); // token position: every two threads load together the 32B at one token // position const int32_t pos = col / 2; const int32_t pos_offset = bidx * VECS_PER_CTA + pos; // for token positions per block, disabling 2 threads per pos const bool my_pred = pos_offset < total; const int32_t row_stride_bytes = total * 32; uint4 in_data[LDGS]; uint4 in_skip[LDGS]; float hdata[LDGS * 4][4]; const int32_t gmem_offset = row * row_stride_bytes + (bidx * THREADS_PER_ROW + col) * BYTES_PER_LDG; #pragma unroll for (int32_t ii = 0; ii < LDGS; ii++) { in_data[ii] = {0, 0, 0, 0}; in_skip[ii] = {0, 0, 0, 0}; if (my_pred) { ldg(input + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_data[ii]); ldg(skip + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_skip[ii]); } } uint4* smem_b = reinterpret_cast<uint4*>(&smem_[0]) + tidx; uint4* smem_g = reinterpret_cast<uint4*>(&smem_[PARAM_BYTES]) + tidx; const int8_t* beta_ptr = reinterpret_cast<const int8_t*>(beta) + tidx * BYTES_PER_LDG; const int8_t* gamma_ptr = reinterpret_cast<const int8_t*>(gamma) + tidx * BYTES_PER_LDG; ldg(beta_ptr, *smem_b); ldg(gamma_ptr, *smem_g); half* b = reinterpret_cast<half*>(&smem_[0]); half* g = reinterpret_cast<half*>(&smem_[PARAM_BYTES]); #pragma unroll for (int32_t ii = 0; ii < LDGS; ii++) { res_add(hdata[ii * 4 + 0], in_data[ii].x, in_skip[ii].x, dqScaleIn, dqScaleSkip); res_add(hdata[ii * 4 + 1], in_data[ii].y, in_skip[ii].y, dqScaleIn, dqScaleSkip); res_add(hdata[ii * 4 + 2], in_data[ii].z, in_skip[ii].z, dqScaleIn, dqScaleSkip); res_add(hdata[ii * 4 + 3], in_data[ii].w, in_skip[ii].w, dqScaleIn, dqScaleSkip); } half2 stats_local = {0, 0}; #pragma unroll for (int32_t ii = 0; ii < LDGS * 4; ii++) { #pragma unroll for (int32_t jj = 0; jj < 4; jj++) { const float tmp = hdata[ii][jj] * (rld); stats_local = stats_local + __floats2half2_rn(tmp, tmp * hdata[ii][jj]); } } stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 1); __syncwarp(); if (VECS_PER_CTA == 1) { stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 2); __syncwarp(); stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 4); __syncwarp(); } else if (VECS_PER_CTA == 2) { stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 4); __syncwarp(); } stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 8); __syncwarp(); stats_local = stats_local + __shfl_xor_sync(uint32_t(-1), stats_local, 16); __syncwarp(); if (is_warp_lead) { smem_red[pos][warp] = stats_local; } __syncthreads(); if (is_cta_lead) { for (int32_t ii = 1; ii < WARPS; ii++) { stats_local = stats_local + smem_red[pos][ii]; } float mu = __low2float(stats_local); float sos = __high2float(stats_local); float rsigma = rsqrtf(sos - mu * mu); smem_red[pos][0] = __floats2half2_rn(mu, rsigma); } __syncthreads(); // load params into smem: 2x Headsx32x2x2B const float2 statsf = __half22float2(smem_red[pos][0]); #pragma unroll for (int32_t ii = 0; ii < LDGS; ii++) { #pragma unroll for (int32_t jj = 0; jj < 4; jj++) { #pragma unroll for (int32_t kk = 0; kk < 4; kk++) { const int32_t param_idx = (ii * ROWS_PER_LDG + row) * 32 + (jj * 4 + kk) + (tidx & 1) * 16; const float bb = b[param_idx]; const float gg = g[param_idx]; hdata[ii * 4 + jj][kk] = gg * statsf.y * (hdata[ii * 4 + jj][kk] - statsf.x) + bb; } } } #pragma unroll for (int32_t ii = 0; ii < LDGS; ii++) { in_data[ii].x = pack4(hdata[ii * 4 + 0], qScale); in_data[ii].y = pack4(hdata[ii * 4 + 1], qScale); in_data[ii].z = pack4(hdata[ii * 4 + 2], qScale); in_data[ii].w = pack4(hdata[ii * 4 + 3], qScale); } #pragma unroll for (int32_t ii = 0; ii < LDGS; ii++) { if (my_pred) { stg(output + gmem_offset + ii * ROWS_PER_LDG * row_stride_bytes, in_data[ii]); } } // store } int32_t launch_large_hface(cudaStream_t stream, const int32_t ld, const int32_t total, const int8_t* input, const int8_t* skip, const half* beta, const half* gamma, int8_t* output, const float dqScaleIn, const float dqScaleSkip, const float qScale) { if (ld == 1024) { constexpr int32_t WARPS = 4; constexpr int32_t THREADS_PER_ROW = 8; constexpr int32_t HEADS = 16; constexpr int32_t PARAM_BYTES = HEADS * 64 * 2 * sizeof(half); constexpr int32_t VECS_PER_CTA = THREADS_PER_ROW / 2; const int32_t blocks = (total + VECS_PER_CTA - 1) / VECS_PER_CTA; skipln_vec32_hface<WARPS, HEADS, THREADS_PER_ROW><<<blocks, WARPS * 32, PARAM_BYTES, stream>>>( input, skip, output, beta, gamma, dqScaleIn, dqScaleSkip, qScale, total); } else if (ld == 768) { constexpr int32_t WARPS = 3; constexpr int32_t THREADS_PER_ROW = 8; constexpr int32_t HEADS = 12; constexpr int32_t PARAM_BYTES = HEADS * 64 * 2 * sizeof(half); constexpr int32_t VECS_PER_CTA = THREADS_PER_ROW / 2; const int32_t blocks = (total + VECS_PER_CTA - 1) / VECS_PER_CTA; skipln_vec32_hface<WARPS, HEADS, THREADS_PER_ROW><<<blocks, WARPS * 32, PARAM_BYTES, stream>>>( input, skip, output, beta, gamma, dqScaleIn, dqScaleSkip, qScale, total); } else { return STATUS_FAILURE; } return cudaPeekAtLastError(); } // naive kernel that only changes the addressing seems to be faster for small problem sizes template <int32_t TPB, int32_t VPT> __global__ void skiplnDQQ_vec3(const int32_t ld, const int8_t* input, const int8_t* skip, int8_t* output, const half* beta, const half* gamma, const float dqScaleIn, const float dqScaleSkip, const float qScale, const int32_t total) { const int32_t hinner = threadIdx.x % 4; const int32_t houter = threadIdx.x / 4; const int32_t tidx = threadIdx.x; const int32_t bidx = blockIdx.x; const int32_t idx = houter * total * 32 + bidx * 32 + hinner * VPT; // 4 * 1024 * 4 * 2 Bytes = 16KB per block int8_t in_local[VPT]; int8_t skip_local[VPT]; half in_local_dq[VPT]; // dequantized input + skip half beta_local[VPT]; half gamma_local[VPT]; // load input tensors copy<sizeof(int8_t) * VPT>(&input[idx], in_local); copy<sizeof(int8_t) * VPT>(&skip[idx], skip_local); // load parameters copy<sizeof(half) * VPT>(&beta[tidx * VPT], beta_local); copy<sizeof(half) * VPT>(&gamma[tidx * VPT], gamma_local); half2 stats_local = __floats2half2_rn(0.f, 0.f); // accumulator const half rld = half(1.f) / half(ld); #pragma unroll for (int32_t it = 0; it < VPT; it++) { // DQ input and skip const float tmp_in = in_local[it]; const float tmp_skip = skip_local[it]; in_local_dq[it] = dqScaleIn * tmp_in + dqScaleSkip * tmp_skip; const half tmp = rld * in_local_dq[it]; const half2 tmp2 = __halves2half2(tmp, tmp * in_local_dq[it]); stats_local = stats_local + tmp2; } using BlockReduce = cub::BlockReduce<half2, TPB>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ half mu; // mean __shared__ half rsigma; // 1 / std.dev. const half2 sum2 = BlockReduce(temp_storage).Reduce(stats_local, cub::Sum()); if (tidx == 0) { mu = __low2half(sum2); rsigma = rsqrtf(__high2half(sum2) - mu * mu); } __syncthreads(); static_assert(VPT % 4 == 0, ""); uint32_t out_local[VPT/4]; #pragma unroll for (int it = 0; it < VPT / 4; it++) { const float tmp0 = gamma_local[it*4+0] * (in_local_dq[it*4+0] - mu) * rsigma + beta_local[it*4+0]; const float tmp1 = gamma_local[it*4+1] * (in_local_dq[it*4+1] - mu) * rsigma + beta_local[it*4+1]; const float tmp2 = gamma_local[it*4+2] * (in_local_dq[it*4+2] - mu) * rsigma + beta_local[it*4+2]; const float tmp3 = gamma_local[it*4+3] * (in_local_dq[it*4+3] - mu) * rsigma + beta_local[it*4+3]; out_local[it] = float4_to_char4(tmp0 * qScale, tmp1 * qScale, tmp2 * qScale, tmp3 * qScale); } copy<sizeof(int8_t) * VPT>(out_local, &output[idx]); } int launch_small_hface(cudaStream_t stream, const int32_t ld, const int32_t total, const int8_t* input, const int8_t* skip, const half* beta, const half* gamma, int8_t* output, const float dqScaleIn, const float dqScaleSkip, const float qScale) { const int32_t gridSize = total; // we align reads with the number of parameters, i.e. 8-wide instead of 16 constexpr int32_t VPT = 16 / sizeof(half); // 8 if (ld == 768) { constexpr int32_t TPB = 768 / VPT; skiplnDQQ_vec3<TPB, VPT> <<<gridSize, TPB, 0, stream>>>(ld, input, skip, output, beta, gamma, dqScaleIn, dqScaleSkip, qScale, total); } else if (ld == 1024) { constexpr int32_t TPB = 1024 / VPT; // 128 skiplnDQQ_vec3<TPB, VPT> <<<gridSize, TPB, 0, stream>>>(ld, input, skip, output, beta, gamma, dqScaleIn, dqScaleSkip, qScale, total); } else { std::cout << "SkipLayerNormDQQ - FATAL: unsupported hidden layer size: " << ld << std::endl; return STATUS_FAILURE; } return cudaPeekAtLastError(); } } // namespace bert
the_stack
__global__ void kerMakeFirstTetra ( Tet* tetArr, TetOpp* oppArr, char* tetInfoArr, Tet tet, int tetIdx, int infIdx ) { const Tet tets[] = { { tet._v[0], tet._v[1], tet._v[2], tet._v[3] }, { tet._v[1], tet._v[2], tet._v[3], infIdx }, { tet._v[0], tet._v[3], tet._v[2], infIdx }, { tet._v[0], tet._v[1], tet._v[3], infIdx }, { tet._v[0], tet._v[2], tet._v[1], infIdx } }; const int oppTet[][4] = { { 1, 2, 3, 4 }, { 2, 3, 4, 0 }, { 1, 4, 3, 0 }, { 1, 2, 4, 0 }, { 1, 3, 2, 0 } }; const int oppVi[][4] = { { 3, 3, 3, 3 }, { 0, 0, 0, 0 }, { 0, 2, 1, 1 }, { 1, 2, 1, 2 }, { 2, 2, 1, 3 } }; for ( int i = 0; i < 5; ++i ) { tetArr[ tetIdx + i ] = tets[ i ]; tetInfoArr[ tetIdx + i ] = 1; setTetEmptyState( tetInfoArr[ tetIdx + i ], false ); TetOpp opp = { -1, -1, -1, -1 }; for ( int j = 0; j < 4; ++j ) opp.setOpp( j, tetIdx + oppTet[i][j], oppVi[i][j] ); oppArr[ tetIdx + i ] = opp; } } __global__ void kerSplitTetra ( KerIntArray newVertVec, KerIntArray insVertVec, int* vertArr, int* vertTetArr, int* tetToVert, Tet* tetArr, TetOpp* oppArr, char* tetInfoArr, int* freeArr, int* vertFreeArr, int infIdx ) { // Iterate current tetra for ( int idx = getCurThreadIdx(); idx < newVertVec._num; idx += getThreadNum() ) { const int insIdx = newVertVec._arr[ idx ]; const int tetIdx = makePositive( vertTetArr[ insIdx ] ); const int splitVertex = vertArr[ insIdx ]; const int newIdx = ( splitVertex + 1 ) * MeanVertDegree - 1; const int newTetIdx[4] = { freeArr[ newIdx ], freeArr[ newIdx - 1 ], freeArr[ newIdx - 2 ], freeArr[ newIdx - 3 ] }; // Update vertFree, 4 has been used. vertFreeArr[ splitVertex ] -= 4; // Create new tets const TetOpp oldOpp = loadOpp( oppArr, tetIdx ); const Tet tet = loadTet( tetArr, tetIdx ); // Note: This slot will be overwritten below for ( int vi = 0; vi < 4; ++vi ) { TetOpp newOpp = { -1, -1, -1, -1 }; // Set internal adjacency newOpp.setOppInternal( 0, newTetIdx[ IntSplitFaceOpp[vi][0] ], IntSplitFaceOpp[vi][1] ); newOpp.setOppInternal( 1, newTetIdx[ IntSplitFaceOpp[vi][2] ], IntSplitFaceOpp[vi][3] ); newOpp.setOppInternal( 2, newTetIdx[ IntSplitFaceOpp[vi][4] ], IntSplitFaceOpp[vi][5] ); // Set external adjacency //if ( -1 != oldOpp._t[ vi ] ) { int neiTetIdx = oldOpp.getOppTet( vi ); int neiTetVi = oldOpp.getOppVi( vi ); // Check if neighbour has split const int neiSplitIdx = tetToVert[ neiTetIdx ]; if ( neiSplitIdx == INT_MAX ) // Neighbour is un-split { oppArr[ neiTetIdx ].setOpp( neiTetVi, newTetIdx[ vi ], 3 ); // Point un-split neighbour back to this new tetra } else // Neighbour has split { // Get neighbour's new split tetra that has this face const int neiSplitVert = vertArr[ neiSplitIdx ]; const int neiFreeIdx = ( neiSplitVert + 1 ) * MeanVertDegree - 1; neiTetIdx = freeArr[ neiFreeIdx - neiTetVi ]; neiTetVi = 3; } newOpp.setOpp( 3, neiTetIdx, neiTetVi ); // Point this tetra to neighbour } // Write split tetra and opp const Tet newTet = { tet._v[ TetViAsSeenFrom[vi][0] ], tet._v[ TetViAsSeenFrom[vi][1] ], tet._v[ TetViAsSeenFrom[vi][2] ], splitVertex }; const int toTetIdx = newTetIdx[ vi ]; storeTet( tetArr, toTetIdx, newTet ); storeOpp( oppArr, toTetIdx, newOpp ); setTetAliveState( tetInfoArr[ toTetIdx ], true ); setTetCheckState( tetInfoArr[ toTetIdx ], Changed ); } //*** Donate one tetra const int blkIdx = tetIdx / MeanVertDegree; const int vertIdx = ( blkIdx < insVertVec._num ) ? insVertVec._arr[ blkIdx ] : infIdx; const int freeIdx = atomicAdd( &vertFreeArr[ vertIdx ], 1 ); freeArr[ vertIdx * MeanVertDegree + freeIdx ] = tetIdx; setTetAliveState( tetInfoArr[ tetIdx ], false ); } return; } // Note: tetVoteArr should *not* be modified here __global__ void kerMarkRejectedFlips ( KerIntArray actTetVec, TetOpp* oppArr, int* tetVoteArr, char* tetInfoArr, int* voteArr, int* flipToTet, int* counterArr, int voteOffset ) { __shared__ int s_flipNum, s_flipOffset; int actTetNumRounded = actTetVec._num; if ( flipToTet != voteArr ) // Compact flips { if ( threadIdx.x == 0 ) s_flipNum = 0; actTetNumRounded = roundUp( actTetVec._num, blockDim.x ); __syncthreads(); } for ( int idx = getCurThreadIdx(); idx < actTetNumRounded; idx += getThreadNum() ) { int flipVal = -1; const int tetIdx = ( idx < actTetVec._num ) ? actTetVec._arr[ idx ] : -1; if ( tetIdx != -1 ) { flipVal = voteArr[ idx ]; if ( flipVal == -1 ) // No flip from this tet { setTetCheckState( tetInfoArr[ tetIdx ], Checked ); actTetVec._arr[ idx ] = -1; } else { const int voteVal = voteOffset + tetIdx; const int botVoteVal = tetVoteArr[ tetIdx ]; if ( botVoteVal != voteVal ) flipVal = -1; else { const char flipInfo = getVoteFlipInfo( flipVal ); const FlipType fType = getFlipType( flipInfo ); const TetOpp& opp = oppArr[ tetIdx ]; const int botVi = getFlipBotVi( flipInfo ); // Top const int topTetIdx = opp.getOppTet( botVi ); const int topVoteVal = tetVoteArr[ topTetIdx ]; if ( topVoteVal != voteVal ) flipVal = -1; else if ( Flip32 == fType ) { // Corner const int* ordVi = TetViAsSeenFrom[ botVi ]; const int corOrdVi = getFlipBotCorOrdVi( flipInfo ); const int botCorVi = ordVi[ corOrdVi ]; // Side const int sideTetIdx = opp.getOppTet( botCorVi ); const int sideVoteVal = tetVoteArr[ sideTetIdx ]; if ( sideVoteVal != voteVal ) flipVal = -1; } } if ( flipVal == -1 ) voteArr[ idx ] = -1; } } if ( flipToTet != voteArr ) // Compact flips { int flipLocIdx = ( flipVal == -1 ? -1 : atomicAdd( &s_flipNum, 1 ) ); __syncthreads(); if ( s_flipNum > 0 ) { if ( threadIdx.x == 0 ) s_flipOffset = atomicAdd( &counterArr[ CounterFlip ], s_flipNum ); __syncthreads(); if ( flipLocIdx != -1 ) flipToTet[ s_flipOffset + flipLocIdx ] = flipVal; if ( threadIdx.x == 0 ) s_flipNum = 0; __syncthreads(); } } } if ( blockIdx.x == 0 && THREAD_IDX == 0 ) { counterArr[ CounterExact ] = 0; } return; } __global__ void kerPickWinnerPoint ( KerIntArray vertexArr, int* vertexTetArr, int* vertSphereArr, int* tetSphereArr, int* tetVertArr ) { // Iterate uninserted points for ( int idx = getCurThreadIdx(); idx < vertexArr._num; idx += getThreadNum() ) { const int vertSVal = vertSphereArr[ idx ]; const int tetIdx = vertexTetArr[ idx ]; const int winSVal = tetSphereArr[ tetIdx ]; // Check if vertex is winner if ( winSVal == vertSVal ) atomicMin( &tetVertArr[ tetIdx ], idx ); } return; } //////////////// Some helper functions used in flipping and updating opps. __forceinline__ __device__ void setTetIdxVi( int &output, int oldVi, int ni, int newVi ) { output -= ( 0xF ) << ( oldVi * 4 ); output += ( ( ni << 2) + newVi ) << ( oldVi * 4 ); } __forceinline__ __device__ int getTetIdx( int input, int oldVi ) { int idxVi = ( input >> ( oldVi * 4 ) ) & 0xf; return ( idxVi >> 2 ) & 0x3; } __forceinline__ __device__ int getTetVi( int input, int oldVi ) { int idxVi = ( input >> ( oldVi * 4 ) ) & 0xf; return idxVi & 0x3; } __global__ void kerFlip ( KerIntArray flipToTet, Tet* tetArr, TetOpp* oppArr, char* tetInfoArr, int2* tetMsgArr, FlipItem* flipArr, int* flip23NewSlot, int* vertFreeArr, int* freeArr, int* actTetArr, KerIntArray insVertVec, int infIdx, int orgFlipNum ) { // Iterate flips for ( int flipIdx = getCurThreadIdx(); flipIdx < flipToTet._num; flipIdx += getThreadNum() ) { const int voteVal = flipToTet._arr[ flipIdx ]; const char flipInfo = getVoteFlipInfo( voteVal ); const FlipType fType = getFlipType( flipInfo ); CudaAssert( FlipNone != fType ); // Bottom tetra const int botTetIdx = getVoteTetIdx( voteVal ); const int botCorOrdVi = getFlipBotCorOrdVi( flipInfo ); const int botTetVi = getFlipBotVi( flipInfo ); Tet botTet = loadTet( tetArr, botTetIdx ); const TetOpp& botOpp = loadOpp( oppArr, botTetIdx ); // Top tetra const int topTetIdx = botOpp.getOppTet( botTetVi ); const int topTetVi = botOpp.getOppVi( botTetVi ); Tet topTet = loadTet( tetArr, topTetIdx ); const int globFlipIdx = orgFlipNum + flipIdx; int encodedFaceVi = 0; int sideTetIdx; const int* corVi = TetViAsSeenFrom[ botTetVi ]; const int* corTopVi = TetViAsSeenFrom[ topTetVi ]; int newIdx[3] = { 0xFFFF, 0xFFFF, 0xFFFF }; if ( Flip23 == fType ) { const int corV[3] = { botTet._v[ corVi[0] ], botTet._v[ corVi[1] ], botTet._v[ corVi[2] ] }; int topVi0 = TetNextViAsSeenFrom[ topTetVi ][ topTet.getIndexOf( corV[ 2 ] ) ]; const int oldFaceVi[3][2] = { { corVi[ 2 ], corTopVi[ topVi0 ] }, // Old bottom tetra { corVi[ 0 ], corTopVi[ ( topVi0 + 2 ) % 3 ] }, // Old top tetra { corVi[ 1 ], corTopVi[ ( topVi0 + 1 ) % 3 ] } // Old side tetra }; // Iterate new tetra for ( int ni = 0; ni < 3; ++ni ) { // Set external face adjacencies setTetIdxVi( newIdx[ 0 ], oldFaceVi[ ni ][ 0 ], ni == 0 ? 3 : ni, 0 ); setTetIdxVi( newIdx[ 1 ], oldFaceVi[ ni ][ 1 ], ni == 1 ? 3 : ni, 3 ); encodedFaceVi = ( encodedFaceVi << 4 ) | ( oldFaceVi[ ni ][ 0 ] << 2 ) | ( oldFaceVi[ ni ][ 1 ] ); } // 3 new tetra // Create new tetra const int topVert = topTet._v[ topTetVi ]; const int botVert = botTet._v[ botTetVi ]; botTet = makeTet( topVert, corV[0], corV[1], botVert ); topTet = makeTet( topVert, corV[1], corV[2], botVert ); // Output the side tetra const Tet sideTet = makeTet( topVert, corV[2], corV[0], botVert ); sideTetIdx = flip23NewSlot[ flipIdx ]; storeTet( tetArr, sideTetIdx, sideTet ); } else { // Side tetra const int botCorVi = corVi[ botCorOrdVi ]; sideTetIdx = botOpp.getOppTet( botCorVi ); const int sideCorVi0 = botOpp.getOppVi( botCorVi ); // BotVi const int botAVi = corVi[ ( botCorOrdVi + 1 ) % 3 ]; const int botBVi = corVi[ ( botCorOrdVi + 2 ) % 3 ]; const int botA = botTet._v[ botAVi ]; const int botB = botTet._v[ botBVi ]; // Top vi const int botCor = botTet._v[ botCorVi ]; const int topCorVi = topTet.getIndexOf( botCor ); const int topLocVi = TetNextViAsSeenFrom[ topTetVi ][ topCorVi ]; const int topAVi = corTopVi[ ( topLocVi + 2 ) % 3 ]; const int topBVi = corTopVi[ ( topLocVi + 1 ) % 3 ]; // Side vi const int sideCorVi1 = oppArr[ topTetIdx ].getOppVi( topCorVi ); const int sideLocVi = TetNextViAsSeenFrom[ sideCorVi0 ][ sideCorVi1 ]; const int* sideOrdVi = TetViAsSeenFrom[ sideCorVi0 ]; const int sideAVi = sideOrdVi[ ( sideLocVi + 1 ) % 3 ]; const int sideBVi = sideOrdVi[ ( sideLocVi + 2 ) % 3 ]; const int oldFaceVi[3][2] = { { botAVi, botBVi }, // Old bottom tetra { topAVi, topBVi }, // Old top tetra { sideAVi, sideBVi } // Old side tetra }; // Set external face adjacencies for ( int ti = 0; ti < 3; ++ti ) // Iterate old tetra { setTetIdxVi( newIdx[ ti ], oldFaceVi[ti][0], 1 == ti ? 3 : 1, Flip32NewFaceVi[ti][0] ); setTetIdxVi( newIdx[ ti ], oldFaceVi[ti][1], 0 == ti ? 3 : 0, Flip32NewFaceVi[ti][1] ); encodedFaceVi = ( encodedFaceVi << 4 ) | ( oldFaceVi[ ti ][ 0 ] << 2 ) | ( oldFaceVi[ ti ][ 1 ] ); } // Write down the new tetra idx tetMsgArr[ sideTetIdx ] = make_int2( newIdx[ 2 ], globFlipIdx ); // Vertices of old 3 tetra const int botTetV = botTet._v[ botTetVi ]; const int topTetV = topTet._v[ topTetVi ]; botTet = makeTet( botCor, topTetV, botTetV, botA ); topTet = makeTet( botCor, botTetV, topTetV, botB ); //*** Donate one tetra const int insIdx = sideTetIdx / MeanVertDegree; const int vertIdx = ( insIdx < insVertVec._num ) ? insVertVec._arr[ insIdx ] : infIdx; const int freeIdx = atomicAdd( &vertFreeArr[ vertIdx ], 1 ); freeArr[ vertIdx * MeanVertDegree + freeIdx ] = sideTetIdx; } // Write down the new tetra idx tetMsgArr[ botTetIdx ] = make_int2( newIdx[ 0 ], globFlipIdx ); tetMsgArr[ topTetIdx ] = make_int2( newIdx[ 1 ], globFlipIdx ); // Update the bottom and top tetra storeTet( tetArr, botTetIdx, botTet ); storeTet( tetArr, topTetIdx, topTet ); // Store faceVi flip23NewSlot[ flipIdx ] = encodedFaceVi; bool tetEmpty = isTetEmpty( tetInfoArr[ botTetIdx ] ) && isTetEmpty( tetInfoArr[ topTetIdx ] ); if ( fType == Flip32 && tetEmpty ) tetEmpty = isTetEmpty( tetInfoArr[ sideTetIdx ] ); // Record the flip FlipItem flipItem = { botTet._v[0], botTet._v[1], botTet._v[2], botTet._v[3], topTet._v[ fType == Flip23 ? 2 : 3 ], botTetIdx, topTetIdx, ( fType == Flip32 ) ? makeNegative( sideTetIdx ) : sideTetIdx }; if ( tetEmpty ) flipItem._v[ 0 ] = -1; if ( actTetArr != NULL ) { actTetArr[ flipIdx ] = ( Checked == getTetCheckState( tetInfoArr[ topTetIdx ] ) ) ? topTetIdx : -1; actTetArr[ flipToTet._num + flipIdx ] = ( fType == Flip23 ) ? sideTetIdx : -1; } char botTetState = 3; // Alive + Changed char topTetState = 3; char sideTetState = 3; setTetEmptyState( botTetState, tetEmpty ); setTetEmptyState( topTetState, tetEmpty ); if ( fType == Flip23 ) setTetEmptyState( sideTetState, tetEmpty ); else setTetAliveState( sideTetState, false ); tetInfoArr[ botTetIdx ] = botTetState; tetInfoArr[ topTetIdx ] = topTetState; tetInfoArr[ sideTetIdx ] = sideTetState; storeFlip( flipArr, globFlipIdx, flipItem ); } return; } __global__ void kerUpdateOpp ( FlipItem* flipVec, TetOpp* oppArr, int2* tetMsgArr, int* encodedFaceViArr, int orgFlipNum, int flipNum ) { // Iterate flips for ( int flipIdx = getCurThreadIdx(); flipIdx < flipNum; flipIdx += getThreadNum() ) { FlipItemTetIdx flipItem = loadFlipTetIdx( flipVec, flipIdx ); FlipType fType = ( flipItem._t[ 2 ] < 0 ) ? Flip32 : Flip23; int encodedFaceVi = encodedFaceViArr[ flipIdx ]; int extOpp[6]; TetOpp opp; opp = loadOpp( oppArr, flipItem._t[ 0 ] ); if ( Flip23 == fType ) { extOpp[ 0 ] = opp.getOppTetVi( ( encodedFaceVi >> 10 ) & 3 ); extOpp[ 2 ] = opp.getOppTetVi( ( encodedFaceVi >> 6 ) & 3 ); extOpp[ 4 ] = opp.getOppTetVi( ( encodedFaceVi >> 2 ) & 3 ); } else { extOpp[ 0 ] = opp.getOppTetVi( ( encodedFaceVi >> 10 ) & 3 ); extOpp[ 1 ] = opp.getOppTetVi( ( encodedFaceVi >> 8 ) & 3 ); } opp = loadOpp( oppArr, flipItem._t[ 1 ] ); if ( Flip23 == fType ) { extOpp[ 1 ] = opp.getOppTetVi( ( encodedFaceVi >> 8 ) & 3 ); extOpp[ 3 ] = opp.getOppTetVi( ( encodedFaceVi >> 4 ) & 3 ); extOpp[ 5 ] = opp.getOppTetVi( ( encodedFaceVi >> 0 ) & 3 ); } else { extOpp[ 2 ] = opp.getOppTetVi( ( encodedFaceVi >> 6 ) & 3 ); extOpp[ 3 ] = opp.getOppTetVi( ( encodedFaceVi >> 4 ) & 3 ); opp = loadOpp( oppArr, makePositive( flipItem._t[ 2 ] ) ); extOpp[ 4 ] = opp.getOppTetVi( ( encodedFaceVi >> 2 ) & 3 ); extOpp[ 5 ] = opp.getOppTetVi( ( encodedFaceVi >> 0 ) & 3 ); } // Ok, update with neighbors for ( int i = 0; i < 6; ++i ) { int newTetIdx, vi; int tetOpp = extOpp[ i ]; // No neighbor //if ( -1 == tetOpp ) continue; int oppIdx = getOppValTet( tetOpp ); int oppVi = getOppValVi( tetOpp ); const int2 msg = tetMsgArr[ oppIdx ]; if ( msg.y < orgFlipNum ) // Neighbor not flipped { // Set my neighbor's opp if ( fType == Flip23 ) { newTetIdx = flipItem._t[ i / 2 ]; vi = ( i & 1 ) ? 3 : 0; } else { newTetIdx = flipItem._t[ 1 - ( i & 1 ) ]; vi = Flip32NewFaceVi[ i / 2 ][ i & 1 ]; } oppArr[ oppIdx ].setOpp( oppVi, newTetIdx, vi ); } else { const int oppFlipIdx = msg.y - orgFlipNum; // Update my own opp const int newLocOppIdx = getTetIdx( msg.x, oppVi ); if ( newLocOppIdx != 3 ) oppIdx = flipVec[ oppFlipIdx ]._t[ newLocOppIdx ]; oppVi = getTetVi( msg.x, oppVi ); extOpp[ i ] = makeOppVal( oppIdx, oppVi ); } } // Now output if ( Flip23 == fType ) { opp._t[ 0 ] = extOpp[ 0 ]; opp.setOppInternal( 1, flipItem._t[ 1 ], 2 ); opp.setOppInternal( 2, flipItem._t[ 2 ], 1 ); opp._t[ 3 ] = extOpp[ 1 ]; } else { opp._t[ 1 ] = extOpp[ 1 ]; opp._t[ 2 ] = extOpp[ 3 ]; opp._t[ 0 ] = extOpp[ 5 ]; opp.setOppInternal( 3, flipItem._t[ 1 ], 3 ); } storeOpp( oppArr, flipItem._t[ 0 ], opp ); if ( Flip23 == fType ) { opp._t[ 0 ] = extOpp[ 2 ]; opp.setOppInternal( 2, flipItem._t[ 0 ], 1 ); opp.setOppInternal( 1, flipItem._t[ 2 ], 2 ); opp._t[ 3 ] = extOpp[ 3 ]; } else { opp._t[ 2 ] = extOpp[ 0 ]; opp._t[ 1 ] = extOpp[ 2 ]; opp._t[ 0 ] = extOpp[ 4 ]; opp.setOppInternal( 3, flipItem._t[ 0 ], 3 ); } storeOpp( oppArr, flipItem._t[ 1 ], opp ); if ( Flip23 == fType ) { opp._t[ 0 ] = extOpp[ 4 ]; opp.setOppInternal( 1, flipItem._t[ 0 ], 2 ); opp.setOppInternal( 2, flipItem._t[ 1 ], 1 ); opp._t[ 3 ] = extOpp[ 5 ]; storeOpp( oppArr, flipItem._t[ 2 ], opp ); } } return; } __global__ void kerGatherFailedVerts ( KerTetArray tetVec, TetOpp* tetOppArr, int* failVertArr, int* vertTetArr ) { for ( int tetIdx = getCurThreadIdx(); tetIdx < tetVec._num; tetIdx += getThreadNum() ) { const TetOpp tetOpp = loadOpp( tetOppArr, tetIdx ); int failVi = -1; int win = 0; // Get out immediately if > 1 failures for ( int vi = 0; vi < 4; ++vi ) { if ( tetOpp.getOppTet( vi ) < tetIdx ) win |= ( 1 << vi ); if ( !tetOpp.isOppSphereFail( vi ) ) continue; failVi = ( -1 == failVi ) ? vi : 4; } const Tet tet = loadTet( tetVec._arr, tetIdx ); // Write for ( int vi = 0; vi < 4; ++vi ) { int vert = tet._v[ vi ]; if ( -1 != failVi && vi != failVi ) failVertArr[ vert ] = vert; if ( ( win | ( 1 << vi ) ) == 0x0F ) vertTetArr[ vert ] = tetIdx; } } return; } __global__ void kerUpdateFlipTrace ( FlipItem* flipArr, int* tetToFlip, int orgFlipNum, int flipNum ) { for ( int idx = getCurThreadIdx(); idx < flipNum; idx += getThreadNum() ) { const int flipIdx = orgFlipNum + idx; FlipItem flipItem = loadFlip( flipArr, flipIdx ); if ( flipItem._v[ 0 ] == -1 ) // All tets are empty, no need to trace continue; int tetIdx, nextFlip; tetIdx = flipItem._t[ 0 ]; nextFlip = tetToFlip[ tetIdx ]; flipItem._t[ 0 ] = ( nextFlip == -1 ) ? ( tetIdx << 1 ) | 0 : nextFlip; tetToFlip[ tetIdx ] = ( flipIdx << 1 ) | 1; tetIdx = flipItem._t[ 1 ]; nextFlip = tetToFlip[ tetIdx ]; flipItem._t[ 1 ] = ( nextFlip == -1 ) ? ( tetIdx << 1 ) | 0 : nextFlip; tetToFlip[ tetIdx ] = ( flipIdx << 1 ) | 1; tetIdx = flipItem._t[ 2 ]; if ( tetIdx < 0 ) { tetIdx = makePositive( tetIdx ); tetToFlip[ tetIdx ] = ( flipIdx << 1 ) | 1; } else { nextFlip = tetToFlip[ tetIdx ]; flipItem._t[ 2 ] = ( nextFlip == -1 ) ? ( tetIdx << 1 ) | 0 : nextFlip; } storeFlip( flipArr, flipIdx, flipItem ); } } __global__ void kerMarkTetEmpty ( KerCharArray tetInfoVec ) { for ( int idx = getCurThreadIdx(); idx < tetInfoVec._num; idx += getThreadNum() ) setTetEmptyState( tetInfoVec._arr[ idx ], true ); } __global__ void kerUpdateVertIdx ( KerTetArray tetVec, int* orgPointIdx ) { for ( int idx = getCurThreadIdx(); idx < tetVec._num; idx += getThreadNum() ) { Tet tet = loadTet( tetVec._arr, idx ); for ( int i = 0; i < 4; ++i ) tet._v[ i ] = orgPointIdx[ tet._v[i] ]; storeTet( tetVec._arr, idx, tet ); } } __global__ void kerMakeReverseMap ( KerIntArray insVertVec, int* scatterArr, int* revMapArr, int num ) { for ( int idx = getCurThreadIdx(); idx < insVertVec._num; idx += getThreadNum() ) { const int oldIdx = scatterArr[ insVertVec._arr[ idx ] ]; if ( oldIdx < num ) revMapArr[ oldIdx ] = idx; } } __global__ void kerMarkSpecialTets ( KerCharArray tetInfoVec, TetOpp* oppArr ) { for ( int idx = getCurThreadIdx(); idx < tetInfoVec._num; idx += getThreadNum() ) { if ( !isTetAlive( tetInfoVec._arr[ idx ] ) ) continue; TetOpp opp = loadOpp( oppArr, idx ); bool changed = false; for ( int vi = 0; vi < 4; ++vi ) { //if ( -1 == opp._t[ vi ] ) continue; if ( opp.isOppSpecial( vi ) ) { changed = true; opp.setOppSpecial( vi, false ); } //else // opp.setOppInternal( vi ); // BUG: Non-Delaunay facets are set to be Internal! } if ( changed ) { setTetCheckState( tetInfoVec._arr[ idx ], Changed ); storeOpp( oppArr, idx, opp ); } } } __global__ void kerNegateInsertedVerts ( KerIntArray vertTetVec, int* tetToVert ) { for ( int idx = getCurThreadIdx(); idx < vertTetVec._num; idx += getThreadNum() ) { const int tetIdx = vertTetVec._arr[ idx ]; if ( tetToVert[ tetIdx ] == idx ) vertTetVec._arr[ idx ] = makeNegative( tetIdx ); } } __global__ void kerAllocateFlip23Slot ( KerIntArray flipToTet, Tet* tetArr, int* vertFreeArr, int* freeArr, int* flip23NewSlot, int infIdx, int tetNum ) { // Iterate flips for ( int flipIdx = getCurThreadIdx(); flipIdx < flipToTet._num; flipIdx += getThreadNum() ) { const int voteVal = flipToTet._arr[ flipIdx ]; const int botTetIdx = getVoteTetIdx( voteVal ); const char flipInfo = getVoteFlipInfo( voteVal ); const FlipType fType = getFlipType( flipInfo ); if ( fType != Flip23 ) continue; // Bottom tetra Tet botTet = loadTet( tetArr, botTetIdx ); // Try to put the new tets near one of the vertices // of the botTet. Not perfect since the new tet is not this one, // but at least 3 out of 4 vertices are the same. // Ideally: Also look at the opp vertex. But probably too expensive! int freeIdx = -1; for ( int vi = 0; vi < 4; ++vi ) { int vert = botTet._v[ vi ]; if ( vert >= infIdx ) continue; if ( vertFreeArr[ vert ] > 0 ) { const int locIdx = atomicSub( &vertFreeArr[ vert ], 1 ) - 1; if ( locIdx >= 0 ) { freeIdx = freeArr[ vert * MeanVertDegree + locIdx ]; break; } vertFreeArr[ vert ] = 0; //atomicExch( &vertFreeArr[ vert ], 0 ); } } if ( freeIdx == -1 ) // Still no free slot? { const int locIdx = atomicSub( &vertFreeArr[ infIdx ], 1 ) - 1; if ( locIdx >= 0 ) freeIdx = freeArr[ infIdx * MeanVertDegree + locIdx ]; else // Gotta expand freeIdx = tetNum - locIdx - 1; } flip23NewSlot[ flipIdx ] = freeIdx; } } __global__ void kerUpdateBlockVertFreeList ( KerIntArray insTetVec, int* vertFreeArr, int* freeArr, int* scatterMap, int oldInsNum ) { int freeNum = insTetVec._num * MeanVertDegree; for ( int idx = getCurThreadIdx(); idx < freeNum; idx += getThreadNum() ) { int insIdx = idx / MeanVertDegree; int locIdx = idx % MeanVertDegree; int vert = insTetVec._arr[ insIdx ]; int freeIdx = vert * MeanVertDegree + locIdx; int newIdx; if ( scatterMap[ vert ] >= oldInsNum ) // New vert { newIdx = idx; // Update free size for new vert if ( locIdx == 0 ) vertFreeArr[ vert ] = MeanVertDegree; } else newIdx = idx - locIdx + freeArr[ freeIdx ] % MeanVertDegree; freeArr[ freeIdx ] = newIdx; } } __global__ void kerShiftInfFreeIdx ( int* vertFreeArr, int* freeArr, int infIdx, int start, int shift ) { int freeNum = vertFreeArr[ infIdx ]; int freeBeg = infIdx * MeanVertDegree; for ( int idx = getCurThreadIdx(); idx < freeNum; idx += getThreadNum() ) { const int tetIdx = freeArr[ freeBeg + idx ]; CudaAssert( tetIdx >= start ); freeArr[ freeBeg + idx ] = tetIdx + shift; } } __global__ void kerUpdateBlockOppTetIdx ( TetOpp* oppArr, int* orderArr, int oldInfBlockIdx, int newInfBlockIdx, int oldTetNum ) { for ( int idx = getCurThreadIdx(); idx < oldTetNum; idx += getThreadNum() ) { TetOpp opp = loadOpp( oppArr, idx ); for ( int i = 0; i < 4; ++i ) { int tetIdx = opp.getOppTet( i ); if ( tetIdx < 0 ) continue; if ( tetIdx < oldInfBlockIdx ) { int insIdx = tetIdx / MeanVertDegree; int locIdx = tetIdx % MeanVertDegree; opp.setOppTet( i, orderArr[ insIdx ] * MeanVertDegree + locIdx ); } else opp.setOppTet( i, tetIdx - oldInfBlockIdx + newInfBlockIdx ); } storeOpp( oppArr, idx, opp ); } } __global__ void kerUpdateTetIdx ( KerIntArray idxVec, int* orderArr, int oldInfBlockIdx, int newInfBlockIdx ) { for ( int idx = getCurThreadIdx(); idx < idxVec._num; idx += getThreadNum() ) { int tetIdx = idxVec._arr[ idx ]; int posTetIdx = ( tetIdx < 0 ? makePositive( tetIdx ) : tetIdx ); if ( posTetIdx < oldInfBlockIdx ) { int insIdx = posTetIdx / MeanVertDegree; int locIdx = posTetIdx % MeanVertDegree; posTetIdx = orderArr[ insIdx ] * MeanVertDegree + locIdx; } else posTetIdx = posTetIdx - oldInfBlockIdx + newInfBlockIdx; idxVec._arr[ idx ] = ( tetIdx < 0 ? makeNegative( posTetIdx ) : posTetIdx ); } } __global__ void kerShiftOppTetIdx ( TetOpp* oppArr, int tetNum, int start, int shift ) { for ( int idx = getCurThreadIdx(); idx < tetNum; idx += getThreadNum() ) { TetOpp opp = loadOpp( oppArr, idx ); for ( int i = 0; i < 4; ++i ) { if ( opp._t[ i ] < 0 ) continue; const int oppIdx = opp.getOppTet( i ); if ( oppIdx >= start ) opp.setOppTet( i, oppIdx + shift ); } storeOpp( oppArr, idx, opp ); } } __global__ void kerShiftTetIdx ( KerIntArray idxVec, int start, int shift ) { int negStart = makeNegative( start ); for ( int idx = getCurThreadIdx(); idx < idxVec._num; idx += getThreadNum() ) { const int oldIdx = idxVec._arr[ idx ]; if ( oldIdx >= start ) idxVec._arr[ idx ] = oldIdx + shift; if ( oldIdx <= negStart ) idxVec._arr[ idx ] = oldIdx - shift; } } __global__ void kerUpdateVertFreeList ( KerIntArray insTetVec, int* vertFreeArr, int* freeArr, int startFreeIdx ) { int newFreeNum = insTetVec._num * MeanVertDegree; for ( int idx = getCurThreadIdx(); idx < newFreeNum; idx += getThreadNum() ) { int insIdx = idx / MeanVertDegree; int locIdx = idx % MeanVertDegree; int vertIdx = insTetVec._arr[ insIdx ]; freeArr[ vertIdx * MeanVertDegree + locIdx ] = startFreeIdx + idx; // Update free size for new vert if ( idx < insTetVec._num ) vertFreeArr[ insTetVec._arr[ idx ] ] = MeanVertDegree; } } __global__ void kerCollectFreeSlots ( char* tetInfoArr, int* prefixArr, int* freeArr, int newTetNum ) { for ( int idx = getCurThreadIdx(); idx < newTetNum; idx += getThreadNum() ) { if ( isTetAlive( tetInfoArr[ idx ] ) ) continue; int freeIdx = idx - prefixArr[ idx ]; freeArr[ freeIdx ] = idx; } } __global__ void kerMakeCompactMap ( KerCharArray tetInfoVec, int* prefixArr, int* freeArr, int newTetNum ) { for ( int idx = newTetNum + getCurThreadIdx(); idx < tetInfoVec._num; idx += getThreadNum() ) { if ( !isTetAlive( tetInfoVec._arr[ idx ] ) ) continue; int freeIdx = newTetNum - prefixArr[ idx ]; int newTetIdx = freeArr[ freeIdx ]; prefixArr[ idx ] = newTetIdx; } } __global__ void kerCompactTets ( KerCharArray tetInfoVec, int* prefixArr, Tet* tetArr, TetOpp* oppArr, int newTetNum ) { for ( int idx = newTetNum + getCurThreadIdx(); idx < tetInfoVec._num; idx += getThreadNum() ) { if ( !isTetAlive( tetInfoVec._arr[ idx ] ) ) continue; int newTetIdx = prefixArr[ idx ]; Tet tet = loadTet( tetArr, idx ); storeTet( tetArr, newTetIdx, tet ); TetOpp opp = loadOpp( oppArr, idx ); for ( int vi = 0; vi < 4; ++vi ) { if ( opp._t[ vi ] < 0 ) continue; const int oppIdx = opp.getOppTet( vi ); if ( oppIdx >= newTetNum ) { const int oppNewIdx = prefixArr[ oppIdx ]; opp.setOppTet( vi, oppNewIdx ); } else { const int oppVi = opp.getOppVi( vi ); oppArr[ oppIdx ].setOppTet( oppVi, newTetIdx ); } } storeOpp( oppArr, newTetIdx, opp ); } }
the_stack
#include "io.h" #include "utilities/types.h" #include "utilities/boundaryCondition.h" /** * \brief Converts a string to a number. * * \param str a string * * \return a number (\c real or \c integer) */ template <typename T> T toNumber(std::string str) { T num; std::stringstream ss(str); //turn the string into a stream ss >> num; //convert return num; } // toNumber /** * \namespace io * \brief Contains functions related to I/O tasks. */ namespace io { /** * \brief Splits a string given a delimiter. * * \param s the string to split * \param delim the delimiter * \param elems the vector that contains the different elements of the string * * \return a vector that contains the different elements of the string */ std::vector<std::string> &split(const std::string &s, char delim, std::vector<std::string> &elems) { std::stringstream ss(s); std::string item; while (std::getline(ss, item, delim)) { elems.push_back(item); } return elems; } // split /** * \brief Splits a string given a delimiter. * * \param s the string to split * \param delim the delimiter * * \return a vector that contains the different elements of the string */ std::vector<std::string> split(const std::string &s, char delim) { std::vector<std::string> elems; split(s, delim, elems); return elems; } // split /** * \brief Reads data inputs from the command-line and the simulation files. * * \param argc number of arguments in the command-line * \param argv command-line arguments * \param DB database that contains all the simulation parameters * \param D object of the class \c domain that contains the computational grid */ void readInputs(int argc, char **argv, parameterDB &DB, domain &D) { // get a default database initialiseDefaultDB(DB); // first pass of command line arguments commandLineParse1(argc, argv, DB); // case folder std::string folder = DB["inputs"]["caseFolder"].get<std::string>(); std::string fname; // read the simulation file fname = folder + "/simParams.yaml"; parseSimulationFile(fname, DB); // read the flow file fname = folder + "/flow.yaml"; parseFlowFile(fname, DB); // read the domain file fname = folder + "/domain.yaml"; parseDomainFile(fname, D); // read the body file fname = folder + "/bodies.yaml"; parseBodiesFile(fname, DB); // second pass of command line -- overwrite values in DB commandLineParse2(argc, argv, DB); } // readInputs /** * \brief Initializes the database with default values. * * \param DB database that contains all the simulation parameters */ void initialiseDefaultDB(parameterDB &DB) { DB["inputs"] = componentParameter(); DB["flow"] = componentParameter(); DB["simulation"] = componentParameter(); DB["velocitySolve"] = componentParameter(); DB["PoissonSolve"] = componentParameter(); // default input files std::string inputs = "inputs"; DB[inputs]["caseFolder"].set<std::string>("."); DB[inputs]["deviceNumber"].set<int>(0); // flow parameters std::string flow = "flow"; DB[flow]["nu"].set<real>(0.01); DB[flow]["uInitial"].set<real>(1.0); DB[flow]["vInitial"].set<real>(0.0); DB[flow]["numBodies"].set<int>(0); std::vector<body> *bodyVec = new std::vector<body>; DB[flow]["bodies"].set<std::vector<body> *>(bodyVec); // boundary conditions boundaryCondition **bc = new boundaryCondition*[4]; for (int i=0; i<4; i++) bc[i] = new boundaryCondition[2]; DB[flow]["boundaryConditions"].set<boundaryCondition **>(bc); // simulation parameters std::string sim = "simulation"; DB[sim]["dt"].set<real>(0.02); DB[sim]["nt"].set<int>(100); DB[sim]["nsave"].set<int>(100); DB[sim]["startStep"].set<bool>(0); DB[sim]["convTimeScheme"].set<timeScheme>(EULER_EXPLICIT); DB[sim]["diffTimeScheme"].set<timeScheme>(EULER_IMPLICIT); DB[sim]["ibmScheme"].set<ibmScheme>(TAIRA_COLONIUS); DB[sim]["interpolationType"].set<interpolationType>(LINEAR); // velocity solver std::string solver = "velocitySolve"; DB[solver]["solver"].set<std::string>("CG"); DB[solver]["preconditioner"].set<preconditionerType>(DIAGONAL); DB[solver]["rTol"].set<real>(1.0E-05); DB[solver]["aTol"].set<real>(1.0E-50); DB[solver]["maxIterations"].set<int>(10000); // Poisson solver solver = "PoissonSolve"; DB[solver]["solver"].set<std::string>("CG"); DB[solver]["preconditioner"].set<preconditionerType>(DIAGONAL); DB[solver]["rTol"].set<real>(1.0E-05); DB[solver]["aTol"].set<real>(1.0E-50); DB[solver]["maxIterations"].set<int>(20000); } // initialiseDefaultDB /** * \brief Parses the command-line to get the case folder name * and the device number. * * \param argc number of arguments in the command-line * \param argv arguments of the command-line * \param DB database that contains all the simulation parameters */ void commandLineParse1(int argc, char **argv, parameterDB &DB) { for (int i=1; i<argc; i++) { if (strcmp(argv[i],"-directory")==0) { i++; DB["inputs"]["caseFolder"].set<std::string>(std::string(argv[i])); } else if (strcmp(argv[i],"-deviceNumber")==0) { i++; int devNum = toNumber<int>(std::string(argv[i])); DB["inputs"]["deviceNumber"].set<int>(devNum); // sets devNum as the current device for the calling host thread cudaSetDevice(devNum); } } } // commandLineParse1 /** * \brief Overwrites parameters with additional arguments of the command-line. * * \param argc number of arguments in the command-line * \param argv arguments of the command-line * \param DB database that contains all the simulation parameters */ void commandLineParse2(int argc, char **argv, parameterDB &DB) { for (int i=1; i<argc; i++) { // kinematic viscosity if ( strcmp(argv[i],"-nu")==0 ) { i++; DB["flow"]["nu"].set<real>(toNumber<real>(std::string(argv[i]))); } // perturbation in the x-velocity if ( strcmp(argv[i],"-uPerturb")==0 ) { i++; DB["flow"]["uPerturb"].set<real>(toNumber<real>(std::string(argv[i]))); } // perturbation in the y-velocity if ( strcmp(argv[i],"-vPerturb")==0 ) { i++; DB["flow"]["vPerturb"].set<real>(toNumber<real>(std::string(argv[i]))); } // scale the CV with respect to the body if ( strcmp(argv[i],"-scaleCV")==0 ) { i++; DB["simulation"]["scaleCV"].set<real>(toNumber<real>(std::string(argv[i]))); } // frequency of saving the data if ( strcmp(argv[i],"-nsave")==0 ) { i++; DB["simulation"]["nsave"].set<int>(toNumber<int>(std::string(argv[i]))); } // total number of time steps if ( strcmp(argv[i],"-nt")==0 ) { i++; DB["simulation"]["nt"].set<int>(toNumber<int>(std::string(argv[i]))); } // size of time increment if ( strcmp(argv[i],"-dt")==0 ) { i++; DB["simulation"]["dt"].set<real>(toNumber<real>(std::string(argv[i]))); } // relative tolerance for the velocity solve if ( strcmp(argv[i],"-velocity-rtol")==0 ) { i++; DB["velocitySolve"]["rTol"].set<real>(toNumber<real>(std::string(argv[i]))); } // absolute tolerance for the velocity solve if ( strcmp(argv[i],"-velocity-atol")==0 ) { i++; DB["velocitySolve"]["aTol"].set<real>(toNumber<real>(std::string(argv[i]))); } // relative tolerance for the Poisson solve if ( strcmp(argv[i],"-poisson-rtol")==0 ) { i++; DB["PoissonSolve"]["rTol"].set<real>(toNumber<real>(std::string(argv[i]))); } // absolute tolerance for the Poisson solve if ( strcmp(argv[i],"-poisson-atol")==0 ) { i++; DB["PoissonSolve"]["aTol"].set<real>(toNumber<real>(std::string(argv[i]))); } // IBM Scheme if ( strcmp(argv[i],"-ibmScheme")==0 ) { i++; if ( strcmp(argv[i],"NavierStokes")==0 ) DB["simulation"]["ibmScheme"].set<ibmScheme>(NAVIER_STOKES); else if ( strcmp(argv[i],"TairaColonius")==0 ) DB["simulation"]["ibmScheme"].set<ibmScheme>(TAIRA_COLONIUS); else if ( strcmp(argv[i],"DirectForcing")==0 ) DB["simulation"]["ibmScheme"].set<ibmScheme>(DIRECT_FORCING); else if ( strcmp(argv[i],"FadlunEtAl")==0 ) DB["simulation"]["ibmScheme"].set<ibmScheme>(FADLUN_ET_AL); else if ( strcmp(argv[i],"Diffusion")==0 ) DB["simulation"]["ibmScheme"].set<ibmScheme>(DIFFUSION); else if ( strcmp(argv[i],"DFModified")==0 ) DB["simulation"]["ibmScheme"].set<ibmScheme>(DF_MODIFIED); else if ( strcmp(argv[i],"FEAModified")==0 ) DB["simulation"]["ibmScheme"].set<ibmScheme>(FEA_MODIFIED); else if ( strcmp(argv[i],"DFImproved")==0 ) DB["simulation"]["ibmScheme"].set<ibmScheme>(DF_IMPROVED); } // interpolation type for Eulerian direct forcing methods if ( strcmp(argv[i],"-interpolationType")==0 ) { i++; if ( strcmp(argv[i],"constant")==0 ) DB["simulation"]["interpolationType"].set<interpolationType>(CONSTANT); else if ( strcmp(argv[i],"linear")==0 ) DB["simulation"]["interpolationType"].set<interpolationType>(LINEAR); else if ( strcmp(argv[i],"quadratic")==0 ) DB["simulation"]["interpolationType"].set<interpolationType>(QUADRATIC); } } } // commandLineParse2 /** * \brief Converts a \c preconditionerType to a \c std::string. * * \param s a preconditioner * * \return a string */ std::string stringFromPreconditionerType(preconditionerType s) { if (s == NONE) return "None"; else if (s == DIAGONAL) return "Diagonal"; else if (s == SMOOTHED_AGGREGATION) return "Smoothed Aggregation"; else if (s == AINV) return "Approximate Inverse"; else { printf("Error: Unknown preconditionerType.\n"); exit(-1); } } // stringFromPreconditionerType /** * \brief Converts a \c timeScheme to a \c std::string. * * \param s a time-integration scheme * * \return a string */ std::string stringFromTimeScheme(timeScheme s) { if (s == EULER_EXPLICIT) return "Explicit Euler Method"; else if (s == EULER_IMPLICIT) return "Implicit Euler Method"; else if (s == ADAMS_BASHFORTH_2) return "2nd Order Adams-Bashforth"; else if (s == CRANK_NICOLSON) return "Crank-Nicolson"; else { printf("Error: Unknown timeScheme!\n"); exit(-1); } } // stringFromTimeScheme /** * \brief Prints the parameters of the simulation. * * \param DB database that contains all the simulation parameters * \param D information about the computational grid */ void printSimulationInfo(parameterDB &DB, domain &D) { real dt = DB["simulation"]["dt"].get<real>(), scaleCV = DB["simulation"]["scaleCV"].get<real>(); int nt = DB["simulation"]["nt"].get<int>(), nsave = DB["simulation"]["nsave"].get<int>(), startStep = DB["simulation"]["startStep"].get<int>(); interpolationType interpType = DB["simulation"]["interpolationType"].get<interpolationType>(); ibmScheme ibmSchm = DB["simulation"]["ibmScheme"].get<ibmScheme>(); std::cout << '\n'; std::cout << "\nFlow parameters" << '\n'; std::cout << "---------------" << '\n'; std::cout << "nu = " << DB["flow"]["nu"].get<real>() << '\n'; std::cout << "\nDomain" << '\n'; std::cout << "------" << '\n'; std::cout << D.nx << " x " << D.ny << '\n'; std::cout << "\nSimulation parameters" << '\n'; std::cout << "---------------------" << '\n'; std::cout << "dt = " << dt << '\n'; std::cout << "scaleCV = " << scaleCV << '\n'; std::cout << "startStep = " << startStep << '\n'; std::cout << "nt = " << nt << '\n'; std::cout << "nsave = " << nsave << '\n'; std::cout << "Convection time scheme = " << stringFromTimeScheme(DB["simulation"]["convTimeScheme"].get<timeScheme>()) << '\n'; std::cout << "Diffusion time scheme = " << stringFromTimeScheme(DB["simulation"]["diffTimeScheme"].get<timeScheme>()) << '\n'; if (ibmSchm == FADLUN_ET_AL || ibmSchm == DIRECT_FORCING || ibmSchm == DIFFUSION || ibmSchm == DF_IMPROVED || ibmSchm == DF_MODIFIED || ibmSchm == FEA_MODIFIED) { std::cout << "Interpolation type: "; switch(interpType) { case CONSTANT : std::cout << "Constant\n"; break; case LINEAR : std::cout << "Linear\n"; break; case QUADRATIC: std::cout << "Quadratic\n"; break; default : std::cout << "Unknown\n"; break; } } std::cout << "\nVelocity Solve" << '\n'; std::cout << "--------------" << '\n'; std::cout << "Solver = " << DB["velocitySolve"]["solver"].get<std::string>() << '\n'; std::cout << "Preconditioner = " << stringFromPreconditionerType(DB["velocitySolve"]["preconditioner"].get<preconditionerType>()) << '\n'; std::cout << "Relative tolerance = " << DB["velocitySolve"]["rTol"].get<real>() << '\n'; std::cout << "Absolute tolerance = " << DB["velocitySolve"]["aTol"].get<real>() << '\n'; std::cout << "\nPoisson Solve" << '\n'; std::cout << "-------------" << '\n'; std::cout << "Solver = " << DB["PoissonSolve"]["solver"].get<std::string>() << '\n'; std::cout << "Preconditioner = " << stringFromPreconditionerType(DB["PoissonSolve"]["preconditioner"].get<preconditionerType>()) << '\n'; std::cout << "Relative tolerance = " << DB["PoissonSolve"]["rTol"].get<real>() << '\n'; std::cout << "Absolute tolerance = " << DB["PoissonSolve"]["aTol"].get<real>() << '\n'; std::cout << "\nOutput parameters" << '\n'; std::cout << "-----------------" << '\n'; std::cout << "Output folder = " << DB["inputs"]["caseFolder"].get<std::string>() << '\n'; std::cout << "nsave = " << DB["simulation"]["nsave"].get<int>() << '\n'; cudaDeviceProp deviceProp; int gpu = DB["inputs"]["deviceNumber"].get<int>(); cudaGetDeviceProperties(&deviceProp, gpu); std::cout << "\nDevice Properties" << '\n'; std::cout << "-----------------" << '\n'; std::cout << "Name = " << deviceProp.name << '\n'; std::cout << "Number = " << gpu << '\n'; std::string ecc = deviceProp.ECCEnabled ? "yes" : "no"; std::cout << "Compute capability = " << deviceProp.major << "." << deviceProp.minor << '\n'; std::cout << "ECC Enabled = " << ecc << std::endl; } // printSimulationInfo /** * \brief Prints the time spent to execute tasks. * * \param logger object that contains the name and time spent of tasks */ void printTimingInfo(Logger &logger) { logger.printAllTime(); std::cout << std::endl; } // printTimingInfo /** * \brief Writes grid-points coordinates into the file \a grid. * * \param caseFolder the directory of the simulation * \param D information about the computational grid */ void writeGrid(std::string &caseFolder, domain &D) { std::stringstream out; out << caseFolder << "/grid"; std::ofstream file(out.str().c_str(), std::ios::binary); file.write((char*)(&D.nx), sizeof(int)); file.write((char*)(&D.x[0]), (D.nx+1)*sizeof(real)); file.write((char*)(&D.ny), sizeof(int)); file.write((char*)(&D.y[0]), (D.ny+1)*sizeof(real)); file.close(); } // writeGrid /** * \brief Writes numerical data at a given time-step (on the host). * * It creates a directory whose name is the time-step number * and writes the flux, the pressure (and eventually the body forces) * into the files \a q, \a lambda, respectively. * * \param caseFolder directory of the simulation * \param n the time-step number * \param q array that contains the fluxes * \param lambda array that contains the pressures (and eventually the body forces) * \param D information about the computational grid */ template <> void writeData<vecH>(std::string &caseFolder, int n, vecH &q, vecH &lambda, domain &D)//, bodies &B) { std::string path; std::stringstream out; int N; out << caseFolder << '/' << std::setfill('0') << std::setw(7) << n; path = out.str(); mkdir(path.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); out.str(""); out << path << "/q"; std::ofstream file(out.str().c_str(), std::ios::binary); N = q.size(); file.write((char*)(&N), sizeof(int)); file.write((char*)(&q[0]), N*sizeof(real)); file.close(); out.str(""); out << path << "/lambda"; file.open(out.str().c_str(), std::ios::binary); N = lambda.size(); file.write((char*)(&N), sizeof(int)); file.write((char*)(&lambda[0]), N*sizeof(real)); file.close(); std::cout << "Data saved to folder " << path << std::endl; } // writeData /** * \brief Writes numerical data at a given time-step (on the device). * * It creates a directory whose name is the time-step number * and writes the flux, the pressure (and eventually the body forces) * into the files \a q, \a lambda, respectively. * * \param caseFolder directory of the simulation * \param n the time-step number * \param q array that contains the fluxes * \param lambda array that contains the pressures (and eventually the body forces) * \param D information about the computational grid */ template <> void writeData<vecD>(std::string &caseFolder, int n, vecD &q, vecD &lambda, domain &D)//, bodies &B) { vecH qH = q, lambdaH = lambda; writeData(caseFolder, n, qH, lambdaH, D); } // writeData /** * \brief Reads numerical data at a given time-step. * * \param caseFolder directory of the simulation * \param timeStep the time-step number * \param x array that to fill * \param name name of the file containing the variable */ void readData(std::string &caseFolder, int timeStep, real *x, std::string name) { std::stringstream in; std::string inFilePath; int n; in << caseFolder << "/" << std::setfill('0') << std::setw(7) << timeStep << "/" << name; inFilePath = in.str(); std::cout << "Reading fluxes from " << inFilePath << " ... "; std::ifstream inFile(inFilePath.c_str(), std::ifstream::binary); inFile.read((char*)(&n), sizeof(int)); inFile.read((char*)(&x[0]), n*sizeof(real)); inFile.close(); std::cout << "done" << std::endl; } // readData /** * \brief Prints device memory usage. * * \param label the label of the device */ void printDeviceMemoryUsage(std::string label) { size_t _free, _total; cudaMemGetInfo(&_free, &_total); std::cout << label << ": Memory Usage " << std::setprecision(3) << (_total-_free)/(1024.0*1024*1024) \ << " / " << std::setprecision(3) << _total/(1024.0*1024*1024) << " GB" << std::setprecision(6) << '\n' << std::endl; } // printDeviceMemoryUsage } // End of namespace io
the_stack
#include "bits/mexutils.h" #include "bits/datamex.hpp" #include "bits/nnconv.hpp" #include "bits/nnfullyconnected.hpp" #include "bits/nnsubsample.hpp" #if ENABLE_GPU #include "bits/datacu.hpp" #endif #include <memory> #include <assert.h> #include <math.h> /* option codes */ enum { opt_stride = 0, opt_pad, opt_verbose, opt_no_der_data, opt_no_der_filters, opt_no_der_biases, opt_cudnn, opt_no_cudnn, opt_cudnn_workspace_limit, opt_transpose } ; /* options */ vlmxOption options [] = { {"Stride", 1, opt_stride }, {"Pad", 1, opt_pad }, {"Verbose", 0, opt_verbose }, {"NoDerData", 0, opt_no_der_data }, {"NoDerFilters", 0, opt_no_der_filters }, {"NoderBiases", 0, opt_no_der_biases }, {"Cudnn", 0, opt_cudnn }, {"NoCudnn", 0, opt_no_cudnn }, {"CudnnWorkSpaceLimit", 1, opt_cudnn_workspace_limit }, {0, 0, 0 } } ; /* ---------------------------------------------------------------- */ /* Context */ /* ---------------------------------------------------------------- */ vl::MexContext context ; /* Resetting the context here resolves a crash when MATLAB quits and the ~Context function is implicitly called on unloading the MEX file. */ void atExit() { context.clear() ; } /* ---------------------------------------------------------------- */ /* MEX driver */ /* ---------------------------------------------------------------- */ enum { IN_DATA = 0, IN_FILTERS, IN_BIASES, IN_DEROUTPUT, IN_END } ; enum { OUT_RESULT = 0, OUT_DERFILTERS, OUT_DERBIASES, OUT_END } ; void mexFunction(int nout, mxArray *out[], int nin, mxArray const *in[]) { int strideX = 1 ; int strideY = 1 ; int padLeft = 0 ; int padRight = 0 ; int padTop = 0 ; int padBottom = 0 ; int numFilterGroups = 1 ; bool backMode = false ; bool hasFilters = false ; bool hasBiases = false ; bool fullyConnectedMode = false ; bool computeDerData = true ; bool computeDerFilters = true ; bool computederBiases = true ; int verbosity = 0 ; int opt ; int next = IN_END ; mxArray const *optarg ; /* -------------------------------------------------------------- */ /* Check the arguments */ /* -------------------------------------------------------------- */ mexAtExit(atExit) ; if (nin < 3) { mexErrMsgTxt("There are less than three arguments.") ; } if (nin > 3 && vlmxIsString(in[3],-1)) { next = 3 ; backMode = 0 ; } else { backMode = (nin >= 4) ; } while ((opt = vlmxNextOption (in, nin, options, &next, &optarg)) >= 0) { switch (opt) { case opt_verbose : ++ verbosity ; break ; case opt_stride : if (!vlmxIsPlainMatrix(optarg,-1,-1)) { mexErrMsgTxt("STRIDE is not a plain matrix.") ; } switch (mxGetNumberOfElements(optarg)) { case 1: strideY = (int)mxGetPr(optarg)[0] ; strideX = strideY ; break ; case 2: strideY = (int)mxGetPr(optarg)[0] ; strideX = (int)mxGetPr(optarg)[1] ; break ; default: mexErrMsgTxt("STRIDE has neither one nor two elements.") ; } break ; case opt_pad : if (!vlmxIsPlainMatrix(optarg,-1,-1)) { mexErrMsgTxt("PAD is not a plain matrix.") ; } switch (mxGetNumberOfElements(optarg)) { case 1: padLeft = (int)mxGetPr(optarg)[0] ; padRight = padLeft ; padTop = padLeft ; padBottom = padLeft ; break ; case 4: padTop = (int)mxGetPr(optarg)[0] ; padBottom = (int)mxGetPr(optarg)[1] ; padLeft = (int)mxGetPr(optarg)[2] ; padRight = (int)mxGetPr(optarg)[3] ; break ; default: mexErrMsgTxt("PAD has neither one nor four elements.") ; } break ; case opt_no_der_data : computeDerData = VL_FALSE ; break ; case opt_no_der_filters : computeDerFilters = VL_FALSE ; break ; case opt_no_der_biases : computederBiases = VL_FALSE ; break ; case opt_no_cudnn : #if ENABLE_CUDNN context.getCudaHelper().setCudnnEnabled(false) ; #endif break ; case opt_cudnn : #if ENABLE_CUDNN context.getCudaHelper().setCudnnEnabled(true) ; #endif break ; case opt_cudnn_workspace_limit : { #if ENABLE_CUDNN double x ; if (!vlmxIsScalar(optarg) || (x = mxGetScalar(optarg)) < 0) { mexErrMsgTxt("CudnnWorkSpaceLimit is not a non-negative scalar.") ; } context.getCudaHelper().setCudnnConvolutionFwdPreference ((x==mxGetInf() ? CUDNN_CONVOLUTION_FWD_PREFER_FASTEST : CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT), (size_t)x) ; context.getCudaHelper().setCudnnConvolutionBwdFilterPreference ((x==mxGetInf() ? CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST : CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT), (size_t)x) ; context.getCudaHelper().setCudnnConvolutionBwdDataPreference ((x==mxGetInf() ? CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST : CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT), (size_t)x) ; break ; #endif } default: break ; } } vl::MexTensor data(context) ; vl::MexTensor filters(context) ; vl::MexTensor biases(context) ; vl::MexTensor derOutput(context) ; data.init(in[IN_DATA]) ; data.reshape(4) ; filters.init(in[IN_FILTERS]) ; filters.reshape(4) ; biases.init(in[IN_BIASES]) ; if (backMode) { derOutput.init(in[IN_DEROUTPUT]) ; derOutput.reshape(4) ; } hasFilters = !filters.isEmpty() ; hasBiases = !biases.isEmpty() ; /* check for GPU/data class consistency */ if (hasFilters && ! vl::areCompatible(data, filters)) { mexErrMsgTxt("DATA and FILTERS do not have compatible formats.") ; } if (hasBiases && ! vl::areCompatible(data, biases)) { mexErrMsgTxt("DATA and BIASES do not have compatible formats.") ; } if (backMode && ! vl::areCompatible(data, derOutput)) { mexErrMsgTxt("DATA and DEROUTPUT do not have compatible formats.") ; } /* basic argument checks */ if (strideX < 1 || strideY < 1) { mexErrMsgTxt("At least one element of STRIDE is smaller than one.") ; } if (padLeft < 0 || padRight < 0 || padTop < 0 || padBottom < 0) { mexErrMsgTxt("An element of PAD is negative.") ; } /* Get the filter shape */ vl::TensorShape filtersShape(filters) ; int equivalentNumFilters ; if (hasFilters) { if (filtersShape.getHeight() == 0 || filtersShape.getWidth() == 0 || filtersShape.getDepth() == 0) { mexErrMsgTxt("A dimension of FILTERS is void.") ; } if (data.getHeight() + (padTop+padBottom) < filters.getHeight() || data.getWidth() + (padLeft+padRight) < filters.getWidth()) { mexErrMsgTxt("FILTERS are larger than the DATA (including padding).") ; } /* grouped filters */ numFilterGroups = data.getDepth() / filters.getDepth() ; if (numFilterGroups * filters.getDepth() != data.getDepth()) { mexErrMsgTxt("The FILTERS depth does not divide the DATA depth.") ; } if (filters.getSize() % numFilterGroups != 0) { mexErrMsgTxt("The number of filter groups does not divide the number of filters.") ; } equivalentNumFilters = filters.getSize() ; } else { /* empty filters -> pretend the identity filter bank */ filtersShape = vl::TensorShape(1, 1, data.getDepth(), data.getDepth()) ; numFilterGroups = 1 ; equivalentNumFilters = data.getDepth() ; } /* Get the output shape */ vl::TensorShape outputShape((data.getHeight() + (padTop+padBottom) - filtersShape.getHeight())/strideY + 1, (data.getWidth() + (padLeft+padRight) - filtersShape.getWidth())/strideX + 1, equivalentNumFilters, data.getSize()) ; if (backMode && (derOutput != outputShape)) { mexErrMsgTxt("DEROUTPUT dimensions are incompatible with X and FILTERS.") ; } /* Check the biases sizes */ if (hasBiases) { if (biases.getNumElements() != filtersShape.getSize()) { mexErrMsgTxt("The number of elements of BIASES is not the same as the number of filters.") ; } } /* Detect fully connected mode (further optimisations): the output is 1 x 1 pixels, no padding, one filter group, stride of one pixel */ fullyConnectedMode = (outputShape.getHeight() == 1 && outputShape.getWidth() == 1 && strideY == 1 && strideX == 1 && padTop == 0 && padBottom == 0 && padLeft == 0 && padRight == 0 && numFilterGroups == 1) ; /* create output buffers */ vl::Device deviceType = data.getDeviceType() ; vl::Type dataType = data.getDataType() ; vl::MexTensor output(context) ; vl::MexTensor derData(context) ; vl::MexTensor derFilters(context) ; vl::MexTensor derBiases(context) ; if (!backMode) { output.init(deviceType, dataType, outputShape) ; } else { if (computeDerData) { derData.init(deviceType, dataType, data.getShape()) ; } if (computeDerFilters && hasFilters) { derFilters.init(deviceType, dataType, filters.getShape()) ; } if (computederBiases && hasBiases) { derBiases.init(deviceType, dataType, biases.getShape()) ; } } if (verbosity > 0) { mexPrintf("vl_nnconv: %s; %s", backMode?"backward":"forward", (data.getDeviceType()==vl::GPU) ? "GPU" : "CPU") ; if (data.getDeviceType() == vl::GPU) { #if ENABLE_CUDNN mexPrintf("; %s\n", context.getCudaHelper().getCudnnEnabled() ? "cuDNN" : "cuBLAS") ; #else mexPrintf("; cuBLAS\n") ; #endif } else { mexPrintf("; BLAS\n") ; } mexPrintf("vl_nnconv: stride: [%d %d], pad: [%d %d %d %d]\n" "vl_nnconv: num filter groups: %d, has bias: %d, has filters: %d, is fully connected: %d\n", strideY, strideX, padTop, padBottom, padLeft, padRight, numFilterGroups, hasBiases, hasFilters, fullyConnectedMode) ; vl::print("vl_nnconv: data: ", data) ; if (hasFilters) { vl::print("vl_nnconv: filters: ", filters) ; } if (hasBiases) { vl::print("vl_nnconv: biases: ", biases) ; } if (backMode) { vl::print("vl_nnconv: derOutput: ", derOutput) ; vl::print("vl_nnconv: derData: ", derData) ; if (hasFilters) { vl::print("vl_nnconv: derFilters: ", derFilters) ; } if (hasBiases) { vl::print("vl_nnconv: derBiases: ", derBiases) ; } } else { vl::print("vl_nnconv: output: ", output) ; } } /* -------------------------------------------------------------- */ /* Do the work */ /* -------------------------------------------------------------- */ vl::Error error ; /* special case: fully connected (could be done as a regular case, but it is faster this way) */ if (fullyConnectedMode) { if (!backMode) { error = vl::nnfullyconnected_forward(context, output, data, filters, biases) ; } else { error = vl::nnfullyconnected_backward(context, derData, derFilters, derBiases, data, filters, derOutput) ; } goto doneok ; } /* special case: no filters = identity filter bank (subsample + bias) */ if (!hasFilters) { if (!backMode) { error = vl::nnsubsample_forward(context, output, data, biases, strideY, strideX, padTop, padBottom, padLeft, padRight) ; } else { error = vl::nnsubsample_backward(context, derData, derBiases, derOutput, strideY, strideX, padTop, padBottom, padLeft, padRight) ; } goto doneok ; } /* regular case */ if (!backMode) { error = vl::nnconv_forward(context, output, 0, data, 1, filters, biases, strideY, strideX, padTop, padBottom, padLeft, padRight) ; } else { error = vl::nnconv_backward(context, derData, derFilters, derBiases, data, filters, derOutput, strideY, strideX, padTop, padBottom, padLeft, padRight) ; } doneok: if (verbosity > 0) { #if ENABLE_CUDNN if (context.getCudaHelper().getCudnnEnabled()) { mexPrintf("vl_nnconv: cuDNN workspace used: " "fwd %.6g MB" ", bwd filter %.6g MB" ", bwd data %.6g MB\n", (double)context.getCudaHelper().getCudnnConvolutionFwdWorkSpaceUsed() / (1024*1024), (double)context.getCudaHelper().getCudnnConvolutionBwdFilterWorkSpaceUsed() / (1024*1024), (double)context.getCudaHelper().getCudnnConvolutionBwdDataWorkSpaceUsed() / (1024*1024)) ; } #endif } /* -------------------------------------------------------------- */ /* Cleanup */ /* -------------------------------------------------------------- */ if (error != vl::vlSuccess) { mexErrMsgTxt(context.getLastErrorMessage().c_str()) ; } if (backMode) { mxClassID classID ; switch (derOutput.getDataType()) { case vl::vlTypeFloat: classID = mxSINGLE_CLASS ; break ; case vl::vlTypeDouble: classID = mxDOUBLE_CLASS ; break ; default: abort() ; } out[OUT_RESULT] = (computeDerData) ? derData.relinquish() : mxCreateNumericMatrix(0,0,classID,mxREAL) ; out[OUT_DERFILTERS] = (computeDerFilters & hasFilters)? derFilters.relinquish() : mxCreateNumericMatrix(0,0,classID,mxREAL) ; out[OUT_DERBIASES] = (computederBiases & hasBiases) ? derBiases.relinquish() : mxCreateNumericMatrix(0,0,classID,mxREAL) ; } else { out[OUT_RESULT] = output.relinquish() ; } }
the_stack
#include <svo/img_align/sparse_img_align_device_utils.cuh> #include <imp/cu_core/cu_texture.cuh> #include <imp/cu_core/cu_utils.hpp> #include <svo/common/logging.h> #include <svo/img_align/sparse_img_align_base.h> namespace svo { GpuCacheHandler::GpuCacheHandler(): patch_area_(0), feature_capacity_(0), reduction_cache_capacity_(0) { } GpuCacheHandler::GpuCacheHandler(const size_t patch_area): patch_area_(patch_area), feature_capacity_(0), reduction_cache_capacity_(0) { } void GpuCacheHandler::setPatchArea(const size_t patch_area) { patch_area_ = patch_area; } void GpuCacheHandler::reserveFeatureCapacity(const size_t capacity) { CHECK_GT(patch_area_,0); // Check if enough or to much memory is allocated. if( (capacity > feature_capacity_) || (feature_capacity_ - capacity > kMaxStorageSurplus)) { SVO_WARN_STREAM("Reallocate GPU memory. Changing capacity from " << feature_capacity_ << " to " << capacity << " features."); uv_cache_.reset(new UvCache(capacity)); xyz_ref_cache_.reset(new XyzRefCache(capacity)); jacobian_proj_cache_.reset(new JacobianProjCache(capacity*kJacProjStride)); jacobian_cache_.reset(new JacobianCache(capacity*kJacStride*patch_area_)); residual_cache_.reset(new ResidualCache(capacity*patch_area_)); visibility_mask_.reset(new VisibilityMask(capacity)); ref_patch_cache_.reset(new RefPatchCache(capacity*patch_area_)); disparity_cache_.reset(new DistparitiyCache(capacity)); feature_capacity_ = capacity; } } void GpuCacheHandler::reserveReductionCacheCapacity(const size_t capacity) { // Check if enough or to much memory is allocated. if( (capacity > reduction_cache_capacity_) || (reduction_cache_capacity_ - capacity > kMaxStorageSurplus)) { SVO_WARN_STREAM("Reallocate memory for reduction step from " << reduction_cache_capacity_ << " to " << capacity << " blocks."); hessian_reduction_cache_.reset(new HessianReductionCache(capacity*kHessianTriagStride)); gradient_reduction_cache_.reset(new GradientReductionCache(capacity*kJacStride)); chi2_reduction_cache_.reset(new GradientReductionCache(capacity)); nr_visible_cache_.reset(new NrVisibleCache(capacity)); hessian_reduction_cache_host_.reset(new HessianReductionCacheHost(capacity*kHessianTriagStride)); gradient_reduction_cache_host_.reset(new GradientReductionCacheHost(capacity*kJacStride)); chi2_reduction_cache_host_.reset(new GradientReductionCacheHost(capacity)); nr_visible_cache_host_.reset(new NrVisibleCacheHost(capacity)); reduction_cache_capacity_ = capacity; } else { // Set region of interest to the correct value to make // copying from device to host possible. if(nr_visible_cache_host_->roi().length() != capacity) { SVO_DEBUG_STREAM("Change region of interest of linear memory (before " << nr_visible_cache_host_->roi().length() << ", after " << capacity << " elements)"); hessian_reduction_cache_->setRoi(imp::Roi1u(0,capacity*kHessianTriagStride)); gradient_reduction_cache_->setRoi(imp::Roi1u(0,capacity*kJacStride)); chi2_reduction_cache_->setRoi(imp::Roi1u(0,capacity)); nr_visible_cache_->setRoi(imp::Roi1u(0,capacity)); hessian_reduction_cache_host_->setRoi(imp::Roi1u(0,capacity*kHessianTriagStride)); gradient_reduction_cache_host_->setRoi(imp::Roi1u(0,capacity*kJacStride)); chi2_reduction_cache_host_->setRoi(imp::Roi1u(0,capacity)); nr_visible_cache_host_->setRoi(imp::Roi1u(0,capacity)); } } } inline void GpuCacheHandler::copyReductionCacheDeviceToHost() { hessian_reduction_cache_->copyTo(*hessian_reduction_cache_host_); gradient_reduction_cache_->copyTo(*gradient_reduction_cache_host_); chi2_reduction_cache_->copyTo(*chi2_reduction_cache_host_); nr_visible_cache_->copyTo(*nr_visible_cache_host_); } namespace sparse_img_align_device_utils { __host__ __device__ __forceinline__ void setGx(imp::cu::Matrix<FloatTypeGpu,3,6>& __restrict__ g_x, const Float3TypeGpu& __restrict__ p_in_imu) { g_x(0,0) = 1.0; g_x(0,1) = 0.0; g_x(0,2) = 0.0; g_x(0,3) = 0.0; g_x(0,4) = p_in_imu.z; g_x(0,5) = -p_in_imu.y; g_x(1,0) = 0.0; g_x(1,1) = 1.0; g_x(1,2) = 0.0; g_x(1,3) = -p_in_imu.z; g_x(1,4) = 0.0; g_x(1,5) = p_in_imu.x; g_x(2,0) = 0.0; g_x(2,1) = 0.0; g_x(2,2) = 1.0; g_x(2,3) = p_in_imu.y; g_x(2,4) = -p_in_imu.x; g_x(2,5) = 0.0; } //Todo: This function should be a member function of the CPU camera __host__ __device__ __forceinline__ void setPinholeJacobian(imp::cu::Matrix<FloatTypeGpu,2,3>& __restrict__ jac_cam, const Float3TypeGpu& __restrict__ p_in_cam, const FloatTypeGpu& __restrict__ focal_length) { FloatTypeGpu ratio_p_x_z_cam = p_in_cam.x/p_in_cam.z; FloatTypeGpu ratio_p_y_z_cam = p_in_cam.y/p_in_cam.z; FloatTypeGpu ratio_fl_p_z_cam = focal_length/p_in_cam.z; jac_cam(0,0) = ratio_fl_p_z_cam; jac_cam(0,1) = 0.0; jac_cam(0,2) = -ratio_fl_p_z_cam*ratio_p_x_z_cam; jac_cam(1,0) = 0.0; jac_cam(1,1) = ratio_fl_p_z_cam; jac_cam(1,2) = -ratio_fl_p_z_cam*ratio_p_y_z_cam; } __global__ void k_baseCachesGeneric(const imp::cu::Matrix<FloatTypeGpu,3,4> T_imu_cam, const imp::cu::Matrix<FloatTypeGpu,3,3> R_imu_cam, const FloatTypeGpu focal_length, const Float3TypeGpu* __restrict__ p_in_cam, FloatTypeGpu* __restrict__ jac_proj_cache, const unsigned int nr_features) { const unsigned int i = blockIdx.x*blockDim.x+threadIdx.x; if(i < nr_features) { const Float3TypeGpu p_in_imu = transform(T_imu_cam,p_in_cam[i]); imp::cu::Matrix<FloatTypeGpu,3,6> g_x; setGx(g_x,p_in_imu); imp::cu::Matrix<FloatTypeGpu,2,3> jac_cam; setPinholeJacobian(jac_cam,p_in_cam[i],focal_length); imp::cu::Matrix<FloatTypeGpu,2,6> jac_proj = ((jac_cam*R_imu_cam)*g_x); // wite to buffer int offset = 2*6*i; #pragma unroll for(int row = 0; row < 2;++row) { #pragma unroll for(int col = 0; col < 6; ++col) { // times (-1) because of our definition of the photometric error jac_proj_cache[offset + col] = -1.0f*jac_proj(row,col); } offset +=6; } } } __global__ void k_baseCachesPinhole(const imp::cu::Matrix<FloatTypeGpu,3,4> T_imu_cam, const imp::cu::Matrix<FloatTypeGpu,3,3> R_cam_imu, const FloatTypeGpu focal_length, const Float3TypeGpu* __restrict__ p_in_cam, FloatTypeGpu* __restrict__ jac_proj_cache, const unsigned int nr_features) { const unsigned int i = blockIdx.x*blockDim.x+threadIdx.x; if(i < nr_features) { Float3TypeGpu p_in_imu = transform(T_imu_cam,p_in_cam[i]); FloatTypeGpu ratio_p_x_z_cam = p_in_cam[i].x/p_in_cam[i].z; FloatTypeGpu ratio_p_y_z_cam = p_in_cam[i].y/p_in_cam[i].z; // times (-1) because of our definition of the photometric error FloatTypeGpu ratio_fl_p_z_cam = (-1.0)*focal_length/p_in_cam[i].z; FloatTypeGpu r00 = ratio_fl_p_z_cam*(R_cam_imu(0,0) - R_cam_imu(2,0)*ratio_p_x_z_cam); FloatTypeGpu r01 = ratio_fl_p_z_cam*(R_cam_imu(0,1) - R_cam_imu(2,1)*ratio_p_x_z_cam); FloatTypeGpu r02 = ratio_fl_p_z_cam*(R_cam_imu(0,2) - R_cam_imu(2,2)*ratio_p_x_z_cam); FloatTypeGpu r10 = ratio_fl_p_z_cam*(R_cam_imu(1,0) - R_cam_imu(2,0)*ratio_p_y_z_cam); FloatTypeGpu r11 = ratio_fl_p_z_cam*(R_cam_imu(1,1) - R_cam_imu(2,1)*ratio_p_y_z_cam); FloatTypeGpu r12 = ratio_fl_p_z_cam*(R_cam_imu(1,2) - R_cam_imu(2,2)*ratio_p_y_z_cam); const int offset = 2*6*i; jac_proj_cache[offset] = r00; jac_proj_cache[offset + 1] = r01; jac_proj_cache[offset + 2] = r02; jac_proj_cache[offset + 3] = -p_in_imu.z*r01 + p_in_imu.y*r02; jac_proj_cache[offset + 4] = p_in_imu.z*r00 - p_in_imu.x*r02; jac_proj_cache[offset + 5] = -p_in_imu.y*r00 + p_in_imu.x*r01; jac_proj_cache[offset + 6] = r10; jac_proj_cache[offset + 7] = r11; jac_proj_cache[offset + 8] = r12; jac_proj_cache[offset + 9] = -p_in_imu.z*r11 + p_in_imu.y*r12; jac_proj_cache[offset + 10] = p_in_imu.z*r10 - p_in_imu.x*r12; jac_proj_cache[offset + 11] = -p_in_imu.y*r10 + p_in_imu.x*r11; } } void precomputeBaseCaches(std::vector<Float2TypeGpu>& uv_cache, std::vector<Float3TypeGpu>& xyz_ref_cache, const std::vector<size_t>& first_ftr_index, const std::vector<size_t>& nbr_of_ftrs, const std::vector<imp::cu::Matrix<FloatTypeGpu,3,4>::Ptr>& cu_T_imu_cam_bundle, const std::vector<imp::cu::Matrix<FloatTypeGpu,3,4>::Ptr>& cu_T_cam_imu_bundle, const std::vector<imp::cu::PinholeCamera::Ptr>& cu_camera_bundle, const size_t& nbr_fts_to_track, GpuCacheHandler& gpu_cache) { // Prepare the GPU buffers. gpu_cache.reserveFeatureCapacity(nbr_fts_to_track); // Transfer data from CPU to GPU. LinearMemoryFloat2 uv_linear(reinterpret_cast<Float2PixelGpu*>(uv_cache.data()),uv_cache.size(),true); LinearMemoryFloat3 xyz_linear(reinterpret_cast<Float3PixelGpu*>(xyz_ref_cache.data()),xyz_ref_cache.size(),true); gpu_cache.uv().setRoi(uv_linear.roi()); gpu_cache.uv().copyFrom(uv_linear); gpu_cache.xyzRef().setRoi(xyz_linear.roi()); gpu_cache.xyzRef().copyFrom(xyz_linear); // Fill base caches. for(int i = 0; i< static_cast<int>(cu_camera_bundle.size()); ++i) { imp::cu::Fragmentation<32,1> frag(nbr_of_ftrs.at(i)); k_baseCachesPinhole <<< frag.dimGrid,frag.dimBlock >>>(*cu_T_imu_cam_bundle.at(i), cu_T_cam_imu_bundle.at(i)->block<3,3>(0,0), cu_camera_bundle.at(i)->fx(), &gpu_cache.xyzRef().cuData()[first_ftr_index.at(i)], &gpu_cache.jacProj().cuData()[first_ftr_index.at(i)*GpuCacheHandler::kJacProjStride], nbr_of_ftrs.at(i)); } cudaDeviceSynchronize(); } __global__ void k_jacobianAndRefPatches(imp::cu::Texture2D ref_tex, const Float2TypeGpu* __restrict__ uv, const FloatTypeGpu* __restrict__ jac_proj_cache, const int patch_size, const int level, const unsigned int nrFeatures, FloatTypeGpu* __restrict__ jacobian_cache, FloatTypeGpu* __restrict__ ref_patch_cache) { const unsigned int i = blockIdx.x*blockDim.x+threadIdx.x; if(i < nrFeatures) { const FloatTypeGpu scale = 1.0f/(1<<level); const FloatTypeGpu patch_area = patch_size*patch_size; const FloatTypeGpu upper_left_coord_x = uv[i].x*scale - (patch_size - 1)/2.0f; const FloatTypeGpu upper_left_coord_y = uv[i].y*scale - (patch_size - 1)/2.0f; size_t ref_patch_index_offset = patch_area*i; size_t jacobian_index_offset = patch_area*GpuCacheHandler::kJacStride*i; size_t jac_proj_cache_index_offset = GpuCacheHandler::kJacProjStride*i; #pragma unroll 4 for(int row = 0; row < patch_size; ++row) { #pragma unroll 4 for(int col = 0; col < patch_size; ++col, ++ref_patch_index_offset, jacobian_index_offset += 8) { FloatTypeGpu center_texel; imp::cu::tex2DFetch(center_texel, ref_tex,upper_left_coord_x + col, upper_left_coord_y + row); ref_patch_cache[ref_patch_index_offset] = 255.0f*center_texel; FloatTypeGpu dx_left,dx_right,dy_up,dy_down; imp::cu::tex2DFetch(dx_left, ref_tex,upper_left_coord_x + col - 1, upper_left_coord_y + row); imp::cu::tex2DFetch(dx_right, ref_tex,upper_left_coord_x + col + 1, upper_left_coord_y + row); imp::cu::tex2DFetch(dy_up, ref_tex,upper_left_coord_x + col, upper_left_coord_y + row - 1); imp::cu::tex2DFetch(dy_down, ref_tex,upper_left_coord_x + col, upper_left_coord_y + row + 1); const FloatTypeGpu dx = 0.5f*(dx_right - dx_left)*255.0f; const FloatTypeGpu dy = 0.5f*(dy_down - dy_up)*255.0f; #pragma unroll for(int i = 0; i < 6; ++i) { jacobian_cache[jacobian_index_offset + i] = (dx*(jac_proj_cache[jac_proj_cache_index_offset + i]) + dy*(jac_proj_cache[jac_proj_cache_index_offset + 6 + i]))*scale; } //jacobian_cache[jacobian_index_offset + 6] = -255*center_texel; //jacobian_cache[jacobian_index_offset + 7] = -1; jacobian_cache[jacobian_index_offset + 6] = 0.0; jacobian_cache[jacobian_index_offset + 7] = 0.0; } } } } // TODO: imp::ImagePyramid version (currently not working) //void precomputeJacobiansAndRefPatches( // std::vector<imp::ImagePyramid8uC1::Ptr>& ref_pyramid, // const int level, // const int patch_size, // const bool estimate_alpha, // const bool estimate_beta, // const std::vector<size_t>& first_ftr_index, // const std::vector<size_t>& nbr_of_ftrs, // GpuCacheHandler& gpu_cache) //{ // int patch_area = patch_size*patch_size; // for(int ii = 0; ii < static_cast<int>(pyramid.size());++ii) // { // std::shared_ptr<imp::cu::Texture2D> ref_tex = // std::dynamic_pointer_cast<imp::cu::ImageGpu8uC1>(ref_pyramid.at(ii)->at(level)) // ->genTexture(false,cudaFilterModeLinear,cudaAddressModeBorder,cudaReadModeNormalizedFloat); // dim3 threads(32); // dim3 blocks((nbr_of_ftrs.at(ii) + threads.x-1)/threads.x); // std::cout << " features " << ii << " = " << nbr_of_ftrs.at(ii) << std::endl; // k_jacobianAndRefPatches<<<blocks,threads>>>(*ref_tex.get(), // reinterpret_cast<float2*>(&gpu_cache.uv().data()[first_ftr_index.at(ii)]), // reinterpret_cast<float*>(&gpu_cache.jacProj().data()[first_ftr_index.at(ii)*12]), // patch_size , level , nbr_of_ftrs.at(ii), // reinterpret_cast<float*>(&gpu_cache.jacobian().data()[first_ftr_index.at(ii)*8*patch_area]), // reinterpret_cast<float*>(&gpu_cache.refPatch().data()[first_ftr_index.at(ii)*patch_area])); // } // cudaDeviceSynchronize(); //} void precomputeJacobiansAndRefPatches( const std::vector<std::vector<imp::cu::ImageGpu8uC1::Ptr> >& ref_pyramid, const int level, const int patch_size, const bool estimate_alpha, const bool estimate_beta, const std::vector<size_t>& first_ftr_index, const std::vector<size_t>& nbr_of_ftrs, GpuCacheHandler& gpu_cache) { int patch_area = patch_size*patch_size; for(int i = 0; i < static_cast<int>(ref_pyramid.size());++i) { std::shared_ptr<imp::cu::Texture2D> ref_tex = std::dynamic_pointer_cast<imp::cu::ImageGpu8uC1>(ref_pyramid.at(i).at(level)) ->genTexture(false,cudaFilterModeLinear,cudaAddressModeBorder,cudaReadModeNormalizedFloat); imp::cu::Fragmentation<32,1> frag(nbr_of_ftrs.at(i)); k_jacobianAndRefPatches <<< frag.dimGrid, frag.dimBlock >>>(*ref_tex,&gpu_cache.uv().cuData()[first_ftr_index.at(i)], &gpu_cache.jacProj().cuData()[first_ftr_index.at(i)*GpuCacheHandler::kJacProjStride], patch_size , level , nbr_of_ftrs.at(i), &gpu_cache.jacobian().cuData()[first_ftr_index.at(i)*GpuCacheHandler::kJacStride*patch_area], &gpu_cache.refPatch().cuData()[first_ftr_index.at(i)*patch_area]); } cudaDeviceSynchronize(); } __global__ void k_residuals(const imp::cu::Texture2D cur_tex, const int width, const int height, const imp::cu::Matrix<FloatTypeGpu,3,4> T_cur_ref, const imp::cu::PinholeCamera cam, const FloatTypeGpu* __restrict__ ref_patch_cache, const Float3TypeGpu* __restrict__ xyz_ref, FloatTypeGpu alpha_illumination, FloatTypeGpu beta_illumionation, const int patch_size, const int level, const unsigned int nrFeatures, BoolTypeGpu* __restrict__ visibility_cache, FloatTypeGpu* __restrict__ residual_cache) { const unsigned int i = blockIdx.x*blockDim.x+threadIdx.x; if(i < nrFeatures) { FloatTypeGpu scale = 1.0f/(1<<level); const int patch_area = patch_size*patch_size; Float2TypeGpu uv_cur = static_cast<Float2TypeGpu>( cam.world2cam(static_cast<Float3TypeGpu>(transform(T_cur_ref,xyz_ref[i])))); const FloatTypeGpu upper_left_coord_x = uv_cur.x*scale - (patch_size - 1)/2.0f; const FloatTypeGpu upper_left_coord_y = uv_cur.y*scale - (patch_size - 1)/2.0f; // Check if projection is within the image. if(upper_left_coord_x < 0.0f || upper_left_coord_y < 0.0f || upper_left_coord_x + patch_size >= width - 1 || upper_left_coord_y + patch_size >= height - 1) { visibility_cache[i] = 0; } else { visibility_cache[i] = 1; int pixel = 0; #pragma unroll 4 for(int row = 0; row < patch_size; ++row) { #pragma unroll 4 for(int col = 0; col < patch_size; ++col,++pixel) { FloatTypeGpu cur_Texel; imp::cu::tex2DFetch(cur_Texel, cur_tex,upper_left_coord_x + col, upper_left_coord_y + row); residual_cache[i*patch_area + pixel] = static_cast<FloatTypeGpu>( 255.0*cur_Texel*(1.0 + alpha_illumination) + beta_illumionation) - ref_patch_cache[i*patch_area + pixel]; } } } } } void computeResidualsOfFrame( const std::vector<std::vector<imp::cu::ImageGpu8uC1::Ptr>>& cur_pyramid, const std::vector<imp::cu::Matrix<FloatTypeGpu,3,4>>& cu_T_cur_ref_bundle, const std::vector<imp::cu::PinholeCamera::Ptr>& cu_camera_bundle, const std::vector<size_t>& first_ftr_index, const std::vector<size_t>& nbr_of_ftrs, const int level, const int patch_size, const bool estimate_alpha, const bool estimate_beta, GpuCacheHandler& gpu_cache) { const size_t patch_area = patch_size*patch_size; for(int i = 0; i < static_cast<int>(cur_pyramid.size());++i) { std::shared_ptr<imp::cu::Texture2D> cur_tex = std::dynamic_pointer_cast<imp::cu::ImageGpu8uC1>( cur_pyramid.at(i).at(level)) ->genTexture(false,cudaFilterModeLinear,cudaAddressModeBorder,cudaReadModeNormalizedFloat); imp::cu::Fragmentation<32,1> frag(nbr_of_ftrs.at(i)); k_residuals <<< frag.dimGrid,frag.dimBlock >>>(*cur_tex, cur_pyramid.at(i).at(level)->width(), cur_pyramid.at(i).at(level)->height(), cu_T_cur_ref_bundle.at(i), *cu_camera_bundle.at(i), &gpu_cache.refPatch().cuData()[first_ftr_index.at(i)*patch_area], &gpu_cache.xyzRef().cuData()[first_ftr_index.at(i)], estimate_alpha, estimate_beta, patch_size, level, nbr_of_ftrs.at(i), &gpu_cache.visibility().cuData()[first_ftr_index.at(i)], &gpu_cache.residual().cuData()[first_ftr_index.at(i)*patch_area]); } cudaDeviceSynchronize(); } inline unsigned int nextPow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } bool isPow2(unsigned int x) { return ((x&(x-1))==0); } template <size_t _n_elements> __host__ __device__ __forceinline__ void setToZero(FloatTypeGpu* mem) { #pragma unroll for(int ind = 0; ind < _n_elements; ++ind) { mem[ind] = 0.0; } } template <size_t _matrix_size> __host__ __device__ __forceinline__ void setVVTUpperTriag(FloatTypeGpu* __restrict__ upper_triag_row_maj, const FloatTypeGpu* __restrict__ vect, const FloatTypeGpu& __restrict__ weight = 1.0) { int index = 0; #pragma unroll for(int row = 0; row < _matrix_size; ++row) { #pragma unroll for(int col = row; col < _matrix_size; ++col,++index) { upper_triag_row_maj[index] = weight*vect[row]*vect[col]; } } } template <size_t _matrix_size> __host__ __device__ __forceinline__ void addVVTUpperTriag(FloatTypeGpu* __restrict__ upper_triag_row_maj, const FloatTypeGpu* __restrict__ vect, const FloatTypeGpu& __restrict__ weight = 1.0) { int index = 0; #pragma unroll for(int row = 0; row < _matrix_size; ++row) { #pragma unroll for(int col = row; col < _matrix_size; ++col,++index) { upper_triag_row_maj[index] += weight*vect[row]*vect[col]; } } } template <size_t _vector_size> __host__ __device__ __forceinline__ void addVector(FloatTypeGpu* __restrict__ sum_vect, const FloatTypeGpu* __restrict__ addend_vect) { #pragma unroll for(int ind = 0; ind < _vector_size; ++ind) { sum_vect[ind] += addend_vect[ind]; } } template <size_t _vector_size> __host__ __device__ __forceinline__ void addWeightedVector(FloatTypeGpu* __restrict__ sum_vect, const FloatTypeGpu* __restrict__ addend_vect, const FloatTypeGpu& __restrict__ weight = 1.0) { #pragma unroll for(int ind = 0; ind < _vector_size; ++ind) { sum_vect[ind] += weight*addend_vect[ind]; } } template <size_t _vector_size> __host__ __device__ __forceinline__ void subWeightedVector(FloatTypeGpu* __restrict__ sum_vect, const FloatTypeGpu* __restrict__ addend_vect, const FloatTypeGpu& __restrict__ weight = 1.0) { #pragma unroll for(int ind = 0; ind < _vector_size; ++ind) { sum_vect[ind] -= weight*addend_vect[ind]; } } template <size_t _vector_size> __host__ __device__ __forceinline__ void setWeightedVector(FloatTypeGpu* __restrict__ dest_vect, const FloatTypeGpu* __restrict__ src_vect, const FloatTypeGpu& __restrict__ weight = 1.0) { #pragma unroll for(int ind = 0; ind < _vector_size; ++ind) { dest_vect[ind] = weight*src_vect[ind]; } } template <size_t _vector_size> __host__ __device__ __forceinline__ void copyVector(FloatTypeGpu* __restrict__ dest_vect, const FloatTypeGpu* __restrict__ src_vect) { #pragma unroll for(int ind = 0; ind < _vector_size; ++ind) { dest_vect[ind] = src_vect[ind]; } } // _block_size must be power of 2 template <unsigned int _block_size, bool n_is_pow2> __global__ void k_reduceHessianGradient(const FloatTypeGpu* __restrict__ jacobian_cache, const FloatTypeGpu* __restrict__ residual_cache, const BoolTypeGpu* __restrict__ visibility_cache, FloatTypeGpu* __restrict__ gradient_cache, FloatTypeGpu* __restrict__ hessian_cache, UIntTypeGpu* __restrict__ nr_meas, FloatTypeGpu* __restrict__ chi2, const unsigned int n_elements, const unsigned int patch_area) { constexpr unsigned int kHessianTriagN = SparseImgAlignBase::kHessianTriagN; constexpr unsigned int kJacobianSize = SparseImgAlignBase::kJacobianSize; __shared__ FloatTypeGpu s_hessian_data[_block_size*kHessianTriagN]; __shared__ FloatTypeGpu s_gradient_data[_block_size*kJacobianSize]; __shared__ FloatTypeGpu s_chi2[_block_size]; __shared__ UIntTypeGpu s_chi2_nr_meas[_block_size]; FloatTypeGpu jacobian[kJacobianSize]; FloatTypeGpu gradient[kJacobianSize]; FloatTypeGpu hessian[kHessianTriagN]; FloatTypeGpu chi2_temp; UIntTypeGpu chi2_nr_meas = 0; const unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*_block_size*2 + threadIdx.x; const unsigned int gridSize = _block_size*2*gridDim.x; const unsigned int hessian_index = tid*kHessianTriagN; const unsigned int gradient_index = tid*kJacobianSize; // We reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread. // We reading from global memory and write to shared memory. // Get first element. if((!n_is_pow2)&&(i >= n_elements)) { setToZero<kJacobianSize>(gradient); setToZero<kHessianTriagN>(hessian); chi2_temp = 0.0; } else { const unsigned int visib_index = i/patch_area; BoolTypeGpu visible = visibility_cache[visib_index]; if(visible == 1) { FloatTypeGpu residual = residual_cache[i]; // TODO: add weighting function FloatTypeGpu weight = 1.0;// weight_function(residual/weight_scale); copyVector<kJacobianSize>(jacobian,&jacobian_cache[i*kJacobianSize]); setVVTUpperTriag<kJacobianSize>(hessian,jacobian,weight); setWeightedVector<kJacobianSize>(gradient,jacobian, -weight*residual); chi2_temp = residual*residual*weight; ++chi2_nr_meas; } else { setToZero<kJacobianSize>(gradient); setToZero<kHessianTriagN>(hessian); chi2_temp = 0.0; } // Get second element. // Ensure we don't read out of bounds -- this is optimized away for powerOf2 problem size. if (n_is_pow2 || i + _block_size < n_elements) { i += _block_size; const unsigned int visib_index = i/patch_area; BoolTypeGpu visible = visibility_cache[visib_index]; if(visible == 1) { FloatTypeGpu residual = residual_cache[i]; //TODO: add weighting function FloatTypeGpu weight = 1.0;//weight_function(residual/weight_scale); copyVector<kJacobianSize>(jacobian,&jacobian_cache[i*kJacobianSize]); addVVTUpperTriag<kJacobianSize>(hessian,jacobian,weight); subWeightedVector<kJacobianSize>(gradient,jacobian, weight*residual); chi2_temp += residual*residual*weight; ++chi2_nr_meas; } } i += (gridSize - _block_size); } // Add further elements if available. while (i < n_elements) { const unsigned int visib_index = i/patch_area; BoolTypeGpu visible = visibility_cache[visib_index]; if(visible == 1) { FloatTypeGpu residual = residual_cache[i]; //TODO: add weighting function FloatTypeGpu weight = 1.0;// weight_function(residual/weight_scale); copyVector<kJacobianSize>(jacobian,&jacobian_cache[i*kJacobianSize]); addVVTUpperTriag<kJacobianSize>(hessian,jacobian,weight); subWeightedVector<kJacobianSize>(gradient,jacobian, weight*residual); chi2_temp += residual*residual*weight; ++chi2_nr_meas; } // Add second element. // Ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays. if (n_is_pow2 || i + _block_size < n_elements) { i += _block_size; const unsigned int visib_index = i/patch_area; BoolTypeGpu visible = visibility_cache[visib_index]; if(visible == 1) { FloatTypeGpu residual = residual_cache[i]; //TODO: add weighting function FloatTypeGpu weight = 1.0;// visible*weight_function(residual/weight_scale); copyVector<kJacobianSize>(jacobian,&jacobian_cache[i*kJacobianSize]); addVVTUpperTriag<kJacobianSize>(hessian,jacobian,weight); subWeightedVector<kJacobianSize>(gradient,jacobian, weight*residual); chi2_temp += residual*residual*weight; ++chi2_nr_meas; } } i += (gridSize - _block_size); } // Each thread puts its local sum into shared memory. copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient); copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian); s_chi2[tid] = chi2_temp; s_chi2_nr_meas[tid] = chi2_nr_meas; __syncthreads(); // Do reduction in shared mem. if ((_block_size >= 512) && (tid < 256)) { // Add to local variable. addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 256)*kJacobianSize]); addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 256)*kHessianTriagN]); chi2_temp += s_chi2[tid + 256]; chi2_nr_meas += s_chi2_nr_meas[tid + 256]; // Store result to shared memory. copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient); copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian); s_chi2[tid] = chi2_temp; s_chi2_nr_meas[tid] = chi2_nr_meas; } __syncthreads(); if ((_block_size >= 256) &&(tid < 128)) { // add to local variable addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 128)*kJacobianSize]); addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 128)*kHessianTriagN]); chi2_temp += s_chi2[tid + 128]; chi2_nr_meas += s_chi2_nr_meas[tid + 128]; // Store result to shared memory. copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient); copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian); s_chi2[tid] = chi2_temp; s_chi2_nr_meas[tid] = chi2_nr_meas; } __syncthreads(); if ((_block_size >= 128) && (tid < 64)) { // add to local variable addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 64)*kJacobianSize]); addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 64)*kHessianTriagN]); chi2_temp += s_chi2[tid + 64]; chi2_nr_meas += s_chi2_nr_meas[tid + 64]; // Store result to shared memory. copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient); copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian); s_chi2[tid] = chi2_temp; s_chi2_nr_meas[tid] = chi2_nr_meas; } __syncthreads(); // TODO: __shfl_down can be used for reduction when only a single warp (32 threads) is left. // #if (__CUDA_ARCH__ >= 300 ) // Do reduction with __shfl_down ... // #else // Fully unroll reduction within a single warp. Theoretically __syncthreads() is not necessary anymore // as all threads are in the same warp. But with __syncthreads() the performance seems to be slightly increased. // The reason for this is not yet clear. if ((_block_size >= 64) && (tid < 32)) { // Add to local variable. addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 32)*kJacobianSize]); addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 32)*kHessianTriagN]); chi2_temp += s_chi2[tid + 32]; chi2_nr_meas += s_chi2_nr_meas[tid + 32]; // Store result to shared memory. copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient); copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian); s_chi2[tid] = chi2_temp; s_chi2_nr_meas[tid] = chi2_nr_meas; } __syncthreads(); if ((_block_size >= 32) && (tid < 16)) { // Add to local variable. addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 16)*kJacobianSize]); addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 16)*kHessianTriagN]); chi2_temp += s_chi2[tid + 16]; chi2_nr_meas += s_chi2_nr_meas[tid + 16]; // Store result to shared memory. copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient); copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian); s_chi2[tid] = chi2_temp; s_chi2_nr_meas[tid] = chi2_nr_meas; } __syncthreads(); if ((_block_size >= 16) && (tid < 8)) { // Add to local variable. addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 8)*kJacobianSize]); addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 8)*kHessianTriagN]); chi2_temp += s_chi2[tid + 8]; chi2_nr_meas += s_chi2_nr_meas[tid + 8]; // Store result to shared memory. copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient); copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian); s_chi2[tid] = chi2_temp; s_chi2_nr_meas[tid] = chi2_nr_meas; } __syncthreads(); if ((_block_size >= 8) && (tid < 4)) { // Add to local variable. addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 4)*kJacobianSize]); addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 4)*kHessianTriagN]); chi2_temp += s_chi2[tid + 4]; chi2_nr_meas += s_chi2_nr_meas[tid + 4]; // Store result to shared memory. copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient); copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian); s_chi2[tid] = chi2_temp; s_chi2_nr_meas[tid] = chi2_nr_meas; } __syncthreads(); if ((_block_size >= 4) && (tid < 2)) { // Add to local variable. addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 2)*kJacobianSize]); addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 2)*kHessianTriagN]); chi2_temp += s_chi2[tid + 2]; chi2_nr_meas += s_chi2_nr_meas[tid + 2]; // Store result to shared memory. copyVector<kJacobianSize>(&s_gradient_data[gradient_index],gradient); copyVector<kHessianTriagN>(&s_hessian_data[hessian_index],hessian); s_chi2[tid] = chi2_temp; s_chi2_nr_meas[tid] = chi2_nr_meas; } __syncthreads(); if ((_block_size >= 2) && ( tid < 1)) { // Add to local variable. addVector<kJacobianSize>(gradient,&s_gradient_data[(tid + 1)*kJacobianSize]); addVector<kHessianTriagN>(hessian,&s_hessian_data[(tid + 1)*kHessianTriagN]); chi2_temp += s_chi2[tid + 1]; chi2_nr_meas += s_chi2_nr_meas[tid + 1]; } __syncthreads(); // Write result for this block to global memory. if (tid == 0) { copyVector<kJacobianSize>(&gradient_cache[blockIdx.x*kJacobianSize],gradient); copyVector<kHessianTriagN>(&hessian_cache[blockIdx.x*kHessianTriagN],hessian); chi2[blockIdx.x] = chi2_temp; nr_meas[blockIdx.x] = chi2_nr_meas; } } void reduceHessianGradient(const size_t size, const size_t patch_area, const int threads, const int blocks, const FloatTypeGpu* __restrict__ jacobian_input_device, const BoolTypeGpu* __restrict__ visibility_input_device, const FloatTypeGpu* __restrict__ residual_input_device, FloatTypeGpu* __restrict__ gradient_output, FloatTypeGpu* __restrict__ hessian_output, UIntTypeGpu* __restrict__ nrMeas, FloatTypeGpu* __restrict__ chi2) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); if (isPow2(size)) { switch (threads) { case 512: SVO_ERROR_STREAM(" 512 threads exceed the 48kB of available shared memory per block!"); // k_jacobianReduceHessianGradient<512, true><<< dimGrid, dimBlock >>>(jacobian_input_device, // residual_input_device, // visibility_input_device, // gradient_output, // hessian_output, // size, patch_area); break; case 256: k_reduceHessianGradient<256, true><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; case 128: k_reduceHessianGradient<128, true><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; case 64: k_reduceHessianGradient<64, true><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; case 32: k_reduceHessianGradient<32, true><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; case 16: k_reduceHessianGradient<16, true><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; case 8: k_reduceHessianGradient<8, true><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; case 4: k_reduceHessianGradient<4, true><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; case 2: k_reduceHessianGradient<2, true><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; case 1: k_reduceHessianGradient<1, true><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; default: SVO_ERROR_STREAM("The block size must be a power of 2 for the reduction step! Block size is " << threads << "."); break; } } else { switch (threads) { case 512: SVO_ERROR_STREAM(" 512 threads exceed the 48kB of available shared memory per block!"); // k_reduceHessianGradient<256, false><<< dimGrid, dimBlock >>>(jacobian_input_device, // residual_input_device, // visibility_input_device, // gradient_output, // hessian_output, // nrMeas, // chi2, // size,patch_area); break; case 256: k_reduceHessianGradient<256, false><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; case 128: k_reduceHessianGradient<128, false><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; case 64: k_reduceHessianGradient<64, false><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; case 32: k_reduceHessianGradient<32, false><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; case 16: k_reduceHessianGradient<16, false><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; case 8: k_reduceHessianGradient<8, false><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; case 4: k_reduceHessianGradient<4, false><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; case 2: k_reduceHessianGradient<2, false><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; case 1: k_reduceHessianGradient<1, false><<< dimGrid, dimBlock >>>(jacobian_input_device, residual_input_device, visibility_input_device, gradient_output, hessian_output, nrMeas, chi2, size, patch_area); break; default: SVO_ERROR_STREAM("The block size must be a power of 2 for the reduction step! Block size is " << threads << "."); break; } } } void getNumBlocksAndThreads(const size_t nr_elements, const int max_grid_size_device, const int max_block_size_device, const int max_blocks , const int max_threads, const int elements_per_thread, int &blocks, int &threads) { threads = (nr_elements < static_cast<size_t>(max_threads)*2) ? nextPow2((nr_elements + 1)/ 2) : max_threads; blocks = (nr_elements + (threads * elements_per_thread - 1)) / (threads * elements_per_thread); if ((float)threads*blocks > (float)max_grid_size_device * max_block_size_device) { throw std::runtime_error("Desired number of threads is too large."); } if(blocks > max_blocks) { blocks = max_blocks; } //TODO: comment this block if max_block is for sure < max_grid_size_device if (blocks > max_grid_size_device) { std::cout << "Desired number of blocks is bigger then the maximum grid size of the target device." << std::endl; blocks /= 2; threads *= 2; } } void computeNumBlocksAndThreadsReduction(const size_t nr_features, const size_t patch_area, const GPUProperties& gpu_props, int &num_blocks, int &num_threads) { const int max_threads = 256; const int max_blocks = 64; const size_t nr_elements = nr_features*patch_area; // To reduce data of size N, log(N) elements should be reduced per thread for best performance. // (c.f. cuda reduction example) const int nr_elements_per_thread = std::max( static_cast<int>(std::floor(log2 (static_cast<double>(nr_elements)))),2); getNumBlocksAndThreads(nr_elements, gpu_props.maxGridSizeX(), gpu_props.maxThreadsPerBlock(), max_blocks, max_threads, nr_elements_per_thread, num_blocks, num_threads); } void reduceHessianGradientCPU(const int num_blocks, const LinearMemoryFloat& __restrict__ gradient_input_host, const LinearMemoryFloat& __restrict__ hessian_input_host, const LinearMemoryUInt& __restrict__ nMeas_input_host, const LinearMemoryFloat& __restrict__ chi2_input_host, FloatTypeGpu gradient_out[], FloatTypeGpu hessian_out[], FloatTypeGpu& chi2) { memset(hessian_out,0,SparseImgAlignBase::kHessianTriagN*sizeof(FloatTypeGpu)); memset(gradient_out,0,SparseImgAlignBase::kJacobianSize*sizeof(FloatTypeGpu)); chi2 = 0; unsigned int n_meas = 0; #pragma unroll 5 for(unsigned int block = 0; block< static_cast<unsigned int>(num_blocks); ++block) { #pragma unroll for(unsigned int i = 0; i < SparseImgAlignBase::kHessianTriagN; ++i) { hessian_out[i] += hessian_input_host[block*SparseImgAlignBase::kHessianTriagN + i]; } #pragma unroll for(unsigned int i = 0; i < SparseImgAlignBase::kJacobianSize; ++i) { gradient_out[i] += gradient_input_host[block*SparseImgAlignBase::kJacobianSize + i]; } n_meas += nMeas_input_host[block]; chi2 += chi2_input_host[block]; } chi2 = chi2/n_meas; } FloatTypeGpu computeHessianAndGradient(SparseImgAlignBase::HessianMatrix* H, SparseImgAlignBase::GradientVector* g, const size_t nr_elements, const size_t patch_area, GpuCacheHandler& gpu_cache, const int num_blocks, const int num_threads) { FloatTypeGpu hessian_triag[SparseImgAlignBase::kHessianTriagN]; FloatTypeGpu gradient[SparseImgAlignBase::kJacobianSize]; reduceHessianGradient(nr_elements, patch_area, num_threads, num_blocks, gpu_cache.jacobian().cuData(), gpu_cache.visibility().cuData(), gpu_cache.residual().cuData(), gpu_cache.gradientDevice().cuData(), gpu_cache.hessianDevice().cuData(), gpu_cache.nrVisibleDevice().cuData(), gpu_cache.chi2Device().cuData()); cudaDeviceSynchronize(); // Sum the results of each block on CPU. FloatTypeGpu chi2; gpu_cache.copyReductionCacheDeviceToHost(); reduceHessianGradientCPU(num_blocks, gpu_cache.gradientHost(), gpu_cache.hessianHost(), gpu_cache.nrVisibleHost(), gpu_cache.chi2Host(), gradient, hessian_triag,chi2); // Copy result to H and g. #pragma unroll for(unsigned int row = 0, index = 0; row < SparseImgAlignBase::kJacobianSize; ++row) { #pragma unroll for(unsigned int col = row; col < SparseImgAlignBase::kJacobianSize; ++col,++index) { (*H)(row,col) = (*H)(col,row) = hessian_triag[index]; } } #pragma unroll for(unsigned int index = 0; index < SparseImgAlignBase::kJacobianSize; ++index) { (*g)(index,0) = gradient[index]; } return chi2; } __global__ void k_disparities(const imp::cu::Matrix<FloatTypeGpu,3,4> T_cur_ref, const imp::cu::PinholeCamera cam, const Float3TypeGpu* __restrict__ xyz_ref, const BoolTypeGpu* __restrict__ visibility_cache, const Float2TypeGpu* __restrict__ uv, const unsigned int nrFeatures, const FloatTypeGpu not_visible_value, FloatTypeGpu* __restrict__ disparity) { const unsigned int i = blockIdx.x*blockDim.x+threadIdx.x; if(i < nrFeatures) { // Check if projection is within the image. if(visibility_cache[i] == 1) { Float2TypeGpu uv_cur = static_cast<Float2TypeGpu>( cam.world2cam(static_cast<Float3TypeGpu>(transform(T_cur_ref,xyz_ref[i])))); Float2TypeGpu disparity_vec = make_float2(uv[i].x - uv_cur.x, uv[i].y - uv_cur.y); disparity[i] = sqrt(disparity_vec.x*disparity_vec.x + disparity_vec.y*disparity_vec.y); } else { disparity[i] = not_visible_value; } } } template<class T> size_t copyArrayNonNegative(T* __restrict__ dest, T* __restrict__ src,size_t number_elements) { size_t number_elements_copied = 0; for(size_t i = 0; i < number_elements; ++i) { if(src[i] > 0) { dest[number_elements_copied++] = src[i]; } } return number_elements_copied; } FloatTypeGpu computeDisparity( const std::vector<std::vector<imp::cu::ImageGpu8uC1::Ptr>>& cur_pyramid, const std::vector<imp::cu::Matrix<FloatTypeGpu,3,4>>& cu_T_cur_ref_bundle, const std::vector<imp::cu::PinholeCamera::Ptr>& cu_camera_bundle, const std::vector<size_t>& first_ftr_index, const std::vector<size_t>& nbr_of_ftrs, const size_t total_number_of_features, GpuCacheHandler& gpu_cache) { for(int i = 0; i < static_cast<int>(cur_pyramid.size());++i) { imp::cu::Fragmentation<32,1> frag(nbr_of_ftrs.at(i)); k_disparities <<< frag.dimGrid,frag.dimBlock >>>(cu_T_cur_ref_bundle.at(i), *cu_camera_bundle.at(i), &gpu_cache.xyzRef().cuData()[first_ftr_index.at(i)], &gpu_cache.visibility().cuData()[first_ftr_index.at(i)], &gpu_cache.uv().cuData()[first_ftr_index.at(i)], nbr_of_ftrs.at(i), FLT_MAX, &gpu_cache.disparity().cuData()[first_ftr_index.at(i)]); } cudaDeviceSynchronize(); // Transfer disparities from GPU to CPU FloatTypeGpu* disparity_pointer; disparity_pointer = (FloatTypeGpu*) malloc(total_number_of_features*sizeof(FloatTypeGpu)); cudaMemcpy(disparity_pointer,gpu_cache.disparity().cuData(),total_number_of_features*sizeof(FloatTypeGpu),cudaMemcpyDeviceToHost); /// If the "not_visible_value" is set to FLT_MAX, the median value is allways overestimating /// the true median value. We could also remove the values that are not reprojected by setting /// "not_visible_value" in k_disparities to -1 and than run the following operations // { // FloatTypeGpu* disparity_pointer_no_neg; // disparity_pointer_no_neg = (FloatTypeGpu*) malloc(total_number_of_features*sizeof(FloatTypeGpu)); // size_t successfull_reprojections = copyArrayNonNegative<FloatTypeGpu>(disparity_pointer_no_neg,disparity_pointer,total_number_of_features); // FloatTypeGpu* disparity_end = &disparity_pointer_no_neg[successfull_reprojections - 1]; // FloatTypeGpu* middle_ptr = &disparity_pointer_no_neg[successfull_reprojections/2]; // std::nth_element(disparity_pointer_no_neg, middle_ptr, disparity_end); // std::cout << "Median with removal = " << *middle_ptr << std::endl; // std::cout << "Total " << total_number_of_features << std::endl; // std::cout << "successfull " << successfull_reprojections << std::endl; // free(disparity_pointer_no_neg); // } FloatTypeGpu* disparity_end = &disparity_pointer[total_number_of_features - 1]; FloatTypeGpu* middle_ptr = &disparity_pointer[total_number_of_features/2]; std::nth_element(disparity_pointer, middle_ptr, disparity_end); free(disparity_pointer); return *middle_ptr; } } // namespace sparse_img_align_device_utils } // namespace svo
the_stack
#include "CUFLU.h" #if ( MODEL == HYDRO && (FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU) ) // external functions #ifdef __CUDACC__ #if ( RSOLVER == EXACT ) # include "CUFLU_Shared_RiemannSolver_Exact.cu" #elif ( RSOLVER == ROE ) # include "CUFLU_Shared_RiemannSolver_Roe.cu" #elif ( RSOLVER == HLLE ) # include "CUFLU_Shared_RiemannSolver_HLLE.cu" #elif ( RSOLVER == HLLC ) # include "CUFLU_Shared_RiemannSolver_HLLC.cu" #elif ( RSOLVER == HLLD ) # include "CUFLU_Shared_RiemannSolver_HLLD.cu" #endif #else // #ifdef __CUDACC__ #if ( RSOLVER == EXACT ) void Hydro_RiemannSolver_Exact( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[], const real MinDens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] ); #elif ( RSOLVER == ROE ) void Hydro_RiemannSolver_Roe( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[], const real MinDens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] ); #elif ( RSOLVER == HLLE ) void Hydro_RiemannSolver_HLLE( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[], const real MinDens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] ); #elif ( RSOLVER == HLLC ) void Hydro_RiemannSolver_HLLC( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[], const real MinDens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] ); #elif ( RSOLVER == HLLD ) void Hydro_RiemannSolver_HLLD( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[], const real MinDens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] ); #endif #endif // #ifdef __CUDACC__ ... else ... //------------------------------------------------------------------------------------------------------- // Function : Hydro_ComputeFlux // Description : Compute the face-centered fluxes by Riemann solver // // Note : 1. Currently support the exact, HLLC, HLLE, HLLD, and Roe solvers // 2. g_FC_Var[] has the size of N_FC_VAR^3 // --> (N_FC_VAR-1-2*NSkip_N)*(N_FC_VAR-2*NSkip_T)^2 fluxes will be computed // --> See below for the definitions of NSkip_N and NSkip_T // 3. g_FC_Flux[] has the size of N_FC_FLUX^3 // --> But (i,j,k) flux will be stored in the "(k*NFlux+j)*NFlux+i" element in g_FC_Flux[] // --> We have assumed that NFlux <= N_FC_FLUX // --> (i,j,k) in g_FC_Flux_x[] is defined on the +x surface of the cell (i+NSkip_N, j+NSkip_T, k+NSkip_T) in g_FC_Var[] // (i,j,k) in g_FC_Flux_y[] is defined on the +y surface of the cell (i+NSkip_T, j+NSkip_N, k+NSkip_T) in g_FC_Var[] // (i,j,k) in g_FC_Flux_z[] is defined on the +z surface of the cell (i+NSkip_T, j+NSkip_T, k+NSkip_N) in g_FC_Var[] // 4. This function is shared by MHM, MHM_RP, and CTU schemes // 5. For the performance consideration, this function will also be responsible for storing the // inter-patch fluxes // --> Option "DumpIntFlux" // 6. For the unsplitting scheme in gravity (i.e., UNSPLIT_GRAVITY), this function also corrects the half-step // velocity by gravity when CorrHalfVel==true // // Parameter : g_FC_Var : Array storing the input face-centered conserved variables // g_FC_Flux : Array to store the output face-centered fluxes // NFlux : Stride for accessing g_FC_Flux[] // NSkip_N : Number of cells to be skipped in the normal directions // --> "(N_FC_VAR-1-2*NSkip_N)" fluxes will be computed along the normal direction // NSkip_T : Number of cells to be skipped in the transverse directions // --> "(N_FC_VAR-2*NSkip_T)^2" fluxes will be computed along the transverse direction // CorrHalfVel : true --> correct the half-step velocity by gravity (for UNSPLIT_GRAVITY only) // g_Pot_USG : Array storing the input potential for CorrHalfVel (for UNSPLIT_GRAVITY only) // g_Corner : Array storing the corner coordinates of each patch group (for UNSPLIT_GRAVITY only) // dt : Time interval to advance the full-step solution (for UNSPLIT_GRAVITY only) // dh : Cell size (for UNSPLIT_GRAVITY only) // Time : Current physical time (for UNSPLIT_GRAVITY only) // UsePot : Add self-gravity and/or external potential (for UNSPLIT_GRAVITY only) // ExtAcc : Add external acceleration (for UNSPLIT_GRAVITY only) // ExtAcc_Func : Function pointer to the external acceleration routine (for UNSPLIT_GRAVITY only) // ExtAcc_AuxArray : Auxiliary array for external acceleration (for UNSPLIT_GRAVITY only) // MinDens/Pres : Density and pressure floors // DumpIntFlux : true --> store the inter-patch fluxes in g_IntFlux[] // g_IntFlux : Array for DumpIntFlux // EoS : EoS object //------------------------------------------------------------------------------------------------------- GPU_DEVICE void Hydro_ComputeFlux( const real g_FC_Var [][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ], real g_FC_Flux[][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], const int NFlux, const int NSkip_N, const int NSkip_T, const bool CorrHalfVel, const real g_Pot_USG[], const double g_Corner[], const real dt, const real dh, const double Time, const bool UsePot, const OptExtAcc_t ExtAcc, const ExtAcc_t ExtAcc_Func, const double ExtAcc_AuxArray[], const real MinDens, const real MinPres, const bool DumpIntFlux, real g_IntFlux[][NCOMP_TOTAL][ SQR(PS2) ], const EoS_t *EoS ) { // check # ifdef GAMER_DEBUG # ifdef UNSPLIT_GRAVITY if ( CorrHalfVel ) { if ( UsePot && g_Pot_USG == NULL ) printf( "ERROR : g_Pot_USG == NULL !!\n" ); if ( ExtAcc && g_Corner == NULL ) printf( "ERROR : g_Corner == NULL !!\n" ); } # else if ( CorrHalfVel ) printf( "ERROR : CorrHalfVel is NOT supported when UNSPLIT_GRAVITY is off !!\n" ); # endif if ( NFlux > N_FC_FLUX ) printf( "ERROR : NFlux (%d) > N_FC_FLUX (%d) !!\n", NFlux, N_FC_FLUX ); # endif // #ifdef GAMER_DEBUG const int didx_fc[3] = { 1, N_FC_VAR, N_FC_VAR*N_FC_VAR }; real ConVar_L[NCOMP_TOTAL_PLUS_MAG], ConVar_R[NCOMP_TOTAL_PLUS_MAG], Flux_1Face[NCOMP_TOTAL_PLUS_MAG]; # ifdef UNSPLIT_GRAVITY const real GraConst = -(real)0.5*dt/dh; const int didx_usg[3] = { 1, USG_NXT_F, SQR(USG_NXT_F) }; const int fc_ghost = ( N_FC_VAR - PS2 )/2; // number of ghost zones on each side for g_FC_Var[] const int idx_fc2usg = USG_GHOST_SIZE_F - fc_ghost; // index difference between g_FC_Var[] and g_Pot_USG[] const double dh_half = 0.5*(double)dh; // always use double precision to calculate the cell position const real dt_half = (real)0.5*dt; double CrShift[3]; // CrShift[]: central coordinates of the 0th cell in g_FC_Var[] if ( CorrHalfVel && ExtAcc ) for (int d=0; d<3; d++) CrShift[d] = g_Corner[d] - double(dh*fc_ghost); // check # ifdef GAMER_DEBUG if ( CorrHalfVel ) { if ( idx_fc2usg + NSkip_N < 0 ) printf( "ERROR : idx_fc2usg (%d) + NSkip_N (%d) < 0 (USG_GHOST_SIZE_F %d, N_FC_VAR %d) !!\n", idx_fc2usg, NSkip_N, USG_GHOST_SIZE_F, N_FC_VAR ); // one additional cell is required to calculate the derivative along the transverse direction if ( idx_fc2usg + NSkip_T < 1 ) printf( "ERROR : idx_fc2usg (%d) + NSkip_T (%d) < 1 (USG_GHOST_SIZE_F %d, N_FC_VAR %d) !!\n", idx_fc2usg, NSkip_T, USG_GHOST_SIZE_F, N_FC_VAR ); } # endif # endif // #ifdef UNSPLIT_GRAVITY // loop over different spatial directions for (int d=0; d<3; d++) { const int faceL = 2*d; const int faceR = faceL+1; # ifdef UNSPLIT_GRAVITY int d1, d2, d3; if ( CorrHalfVel ) { d1 = d; d2 = (d+1)%3; d3 = (d+2)%3; } # endif int idx_fc_s[3], idx_flux_e[3]; switch ( d ) { case 0 : idx_fc_s [0] = NSkip_N; idx_fc_s [1] = NSkip_T; idx_fc_s [2] = NSkip_T; idx_flux_e[0] = N_FC_VAR-1-2*NSkip_N; idx_flux_e[1] = N_FC_VAR-2*NSkip_T; idx_flux_e[2] = N_FC_VAR-2*NSkip_T; break; case 1 : idx_fc_s [0] = NSkip_T; idx_fc_s [1] = NSkip_N; idx_fc_s [2] = NSkip_T; idx_flux_e[0] = N_FC_VAR-2*NSkip_T; idx_flux_e[1] = N_FC_VAR-1-2*NSkip_N; idx_flux_e[2] = N_FC_VAR-2*NSkip_T; break; case 2 : idx_fc_s [0] = NSkip_T; idx_fc_s [1] = NSkip_T; idx_fc_s [2] = NSkip_N; idx_flux_e[0] = N_FC_VAR-2*NSkip_T; idx_flux_e[1] = N_FC_VAR-2*NSkip_T; idx_flux_e[2] = N_FC_VAR-1-2*NSkip_N; break; } const int size_ij = idx_flux_e[0]*idx_flux_e[1]; CGPU_LOOP( idx, idx_flux_e[0]*idx_flux_e[1]*idx_flux_e[2] ) { const int i_flux = idx % idx_flux_e[0]; const int j_flux = idx % size_ij / idx_flux_e[0]; const int k_flux = idx / size_ij; const int idx_flux = IDX321( i_flux, j_flux, k_flux, NFlux, NFlux ); const int i_fc = i_flux + idx_fc_s[0]; const int j_fc = j_flux + idx_fc_s[1]; const int k_fc = k_flux + idx_fc_s[2]; const int idx_fc = IDX321( i_fc, j_fc, k_fc, N_FC_VAR, N_FC_VAR ); for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) { ConVar_L[v] = g_FC_Var[faceR][v][ idx_fc ]; ConVar_R[v] = g_FC_Var[faceL][v][ idx_fc+didx_fc[d] ]; } // 1. correct the half-step velocity by gravity # ifdef UNSPLIT_GRAVITY if ( CorrHalfVel ) { real Acc[3] = { (real)0.0, (real)0.0, (real)0.0 }; real Enki_L, Enki_R; // external acceleration if ( ExtAcc ) { double xyz[3]; // face-centered coordinates xyz[0] = CrShift[0] + (double)(i_fc*dh); xyz[1] = CrShift[1] + (double)(j_fc*dh); xyz[2] = CrShift[2] + (double)(k_fc*dh); xyz[d] += dh_half; ExtAcc_Func( Acc, xyz[0], xyz[1], xyz[2], Time, ExtAcc_AuxArray ); for (int t=0; t<3; t++) Acc[t] *= dt_half; } // self-gravity and external potential if ( UsePot ) { const int idx_usg = IDX321( i_fc+idx_fc2usg, j_fc+idx_fc2usg, k_fc+idx_fc2usg, USG_NXT_F, USG_NXT_F ); Acc[d1] += GraConst*( g_Pot_USG[ idx_usg+didx_usg[d1] ] - g_Pot_USG[ idx_usg ] ); Acc[d2] += (real)0.25*GraConst*( g_Pot_USG[ idx_usg+didx_usg[d2] ] + g_Pot_USG[ idx_usg+didx_usg[d2]+didx_usg[d1] ] -g_Pot_USG[ idx_usg-didx_usg[d2] ] - g_Pot_USG[ idx_usg-didx_usg[d2]+didx_usg[d1] ] ); Acc[d3] += (real)0.25*GraConst*( g_Pot_USG[ idx_usg+didx_usg[d3] ] + g_Pot_USG[ idx_usg+didx_usg[d3]+didx_usg[d1] ] -g_Pot_USG[ idx_usg-didx_usg[d3] ] - g_Pot_USG[ idx_usg-didx_usg[d3]+didx_usg[d1] ] ); } // store the "non"-kinetic energy (i.e. total energy - kinetic energy) Enki_L = ConVar_L[4] - (real)0.5*( SQR(ConVar_L[1]) + SQR(ConVar_L[2]) + SQR(ConVar_L[3]) )/ConVar_L[0]; Enki_R = ConVar_R[4] - (real)0.5*( SQR(ConVar_R[1]) + SQR(ConVar_R[2]) + SQR(ConVar_R[3]) )/ConVar_R[0]; // advance velocity by gravity for (int t=0; t<3; t++) { ConVar_L[t+1] += ConVar_L[0]*Acc[t]; ConVar_R[t+1] += ConVar_R[0]*Acc[t]; } // update total energy density with the non-kinetic energy fixed ConVar_L[4] = Enki_L + (real)0.5*( SQR(ConVar_L[1]) + SQR(ConVar_L[2]) + SQR(ConVar_L[3]) )/ConVar_L[0]; ConVar_R[4] = Enki_R + (real)0.5*( SQR(ConVar_R[1]) + SQR(ConVar_R[2]) + SQR(ConVar_R[3]) )/ConVar_R[0]; } // if ( CorrHalfVel ) # endif // #ifdef UNSPLIT_GRAVITY // 2. invoke Riemann solver # if ( RSOLVER == EXACT && !defined MHD ) Hydro_RiemannSolver_Exact( d, Flux_1Face, ConVar_L, ConVar_R, MinDens, MinPres, EoS->DensEint2Pres_FuncPtr, EoS->DensPres2CSqr_FuncPtr, EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table ); # elif ( RSOLVER == ROE ) Hydro_RiemannSolver_Roe ( d, Flux_1Face, ConVar_L, ConVar_R, MinDens, MinPres, EoS->DensEint2Pres_FuncPtr, EoS->DensPres2CSqr_FuncPtr, EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table ); # elif ( RSOLVER == HLLE ) Hydro_RiemannSolver_HLLE ( d, Flux_1Face, ConVar_L, ConVar_R, MinDens, MinPres, EoS->DensEint2Pres_FuncPtr, EoS->DensPres2CSqr_FuncPtr, EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table ); # elif ( RSOLVER == HLLC && !defined MHD ) Hydro_RiemannSolver_HLLC ( d, Flux_1Face, ConVar_L, ConVar_R, MinDens, MinPres, EoS->DensEint2Pres_FuncPtr, EoS->DensPres2CSqr_FuncPtr, EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table ); # elif ( RSOLVER == HLLD && defined MHD ) Hydro_RiemannSolver_HLLD ( d, Flux_1Face, ConVar_L, ConVar_R, MinDens, MinPres, EoS->DensEint2Pres_FuncPtr, EoS->DensPres2CSqr_FuncPtr, EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table ); # else # error : ERROR : unsupported Riemann solver (EXACT/ROE/HLLE/HLLC/HLLD) !! # endif // 3. store the fluxes of all cells in g_FC_Flux[] // --> including the magnetic components since they are required for CT for (int v=0; v<NCOMP_TOTAL_PLUS_MAG; v++) g_FC_Flux[d][v][idx_flux] = Flux_1Face[v]; // 4. store the inter-patch fluxes in g_IntFlux[] // --> no need to store the magnetic components since this array is only for the flux fix-up operation if ( DumpIntFlux ) { int int_face, int_idx; // we have assumed N_FC_VAR=PS2+2 for pure hydro // --> for MHD, one additional flux is evaluated along each transverse direction for computing the CT electric field // --> must exclude it when storing the inter-patch fluxes if ( d == 0 && ( i_flux == 0 || i_flux == PS1 || i_flux == PS2 ) ) { # ifdef MHD if ( j_flux > 0 && j_flux < PS2+1 && k_flux > 0 && k_flux < PS2+1 ) # endif { int_face = i_flux/PS1; # ifdef MHD int_idx = (k_flux-1)*PS2 + j_flux-1; # else int_idx = (k_flux )*PS2 + j_flux; # endif for (int v=0; v<NCOMP_TOTAL; v++) g_IntFlux[int_face][v][int_idx] = Flux_1Face[v]; } } else if ( d == 1 && ( j_flux == 0 || j_flux == PS1 || j_flux == PS2 ) ) { # ifdef MHD if ( i_flux > 0 && i_flux < PS2+1 && k_flux > 0 && k_flux < PS2+1 ) # endif { int_face = j_flux/PS1 + 3; # ifdef MHD int_idx = (k_flux-1)*PS2 + i_flux-1; # else int_idx = (k_flux )*PS2 + i_flux; # endif for (int v=0; v<NCOMP_TOTAL; v++) g_IntFlux[int_face][v][int_idx] = Flux_1Face[v]; } } else if ( d == 2 && ( k_flux == 0 || k_flux == PS1 || k_flux == PS2 ) ) { # ifdef MHD if ( i_flux > 0 && i_flux < PS2+1 && j_flux > 0 && j_flux < PS2+1 ) # endif { int_face = k_flux/PS1 + 6; # ifdef MHD int_idx = (j_flux-1)*PS2 + i_flux-1; # else int_idx = (j_flux )*PS2 + i_flux; # endif for (int v=0; v<NCOMP_TOTAL; v++) g_IntFlux[int_face][v][int_idx] = Flux_1Face[v]; } } } // if ( DumpIntFlux ) } // i,j,k } // for (int d=0; d<3; d++) # ifdef __CUDACC__ __syncthreads(); # endif } // FUNCTION : Hydro_ComputeFlux #endif // #if ( MODEL == HYDRO && (FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU) ) #endif // #ifndef __CUFLU_COMPUTEFLUX__
the_stack
#define BLOCK_WIDTH 16 namespace dip { __constant__ float T[4][4]; __device__ bool interpolate(int volume_size, float volume_dimension, float voxel_dimension, float min_weight, const Voxel *volume, Vertex center, float vx, float vy, float vz, float &tsdf) { // Convert Vertex Position to Grid Position float gx, gy, gz; gx = (vx + (volume_dimension / 2.0f) - center.x) / voxel_dimension; gy = (vy + (volume_dimension / 2.0f) - center.y) / voxel_dimension; gz = (vz + (volume_dimension / 2.0f) - center.z) / voxel_dimension; float gx0, gy0, gz0; float gx1, gy1, gz1; gx0 = floor(gx); gx1 = gx0 + 1.0f; gy0 = floor(gy); gy1 = gy0 + 1.0f; gz0 = floor(gz); gz1 = gz0 + 1.0f; int i; float f000, f001, f010, f011, f100, f101, f110, f111; float w000, w001, w010, w011, w100, w101, w110, w111; i = (int)gx0 + (int)gy0 * volume_size + (int)gz0 * volume_size * volume_size; if (i > 0 && i < volume_size * volume_size * volume_size) { f000 = UNCOMPRESS_VALUE(volume[i]); w000 = UNCOMPRESS_WEIGHT(volume[i]); } else { f000 = 0.0f; w000 = 0.0f; } i = (int)gx0 + (int)gy0 * volume_size + (int)gz1 * volume_size * volume_size; if (i > 0 && i < volume_size * volume_size * volume_size) { f001 = UNCOMPRESS_VALUE(volume[i]); w001 = UNCOMPRESS_WEIGHT(volume[i]); } else { f001 = 0.0f; w001 = 0.0f; } i = (int)gx0 + (int)gy1 * volume_size + (int)gz0 * volume_size * volume_size; if (i > 0 && i < volume_size * volume_size * volume_size) { f010 = UNCOMPRESS_VALUE(volume[i]); w010 = UNCOMPRESS_WEIGHT(volume[i]); } else { f010 = 0.0f; w010 = 0.0f; } i = (int)gx0 + (int)gy1 * volume_size + (int)gz1 * volume_size * volume_size; if (i > 0 && i < volume_size * volume_size * volume_size) { f011 = UNCOMPRESS_VALUE(volume[i]); w011 = UNCOMPRESS_WEIGHT(volume[i]); } else { f011 = 0.0f; w011 = 0.0f; } i = (int)gx1 + (int)gy0 * volume_size + (int)gz0 * volume_size * volume_size; if (i > 0 && i < volume_size * volume_size * volume_size) { f100 = UNCOMPRESS_VALUE(volume[i]); w100 = UNCOMPRESS_WEIGHT(volume[i]); } else { f100 = 0.0f; w100 = 0.0f; } i = (int)gx1 + (int)gy0 * volume_size + (int)gz1 * volume_size * volume_size; if (i > 0 && i < volume_size * volume_size * volume_size) { f101 = UNCOMPRESS_VALUE(volume[i]); w101 = UNCOMPRESS_WEIGHT(volume[i]); } else { f101 = 0.0f; w101 = 0.0f; } i = (int)gx1 + (int)gy1 * volume_size + (int)gz0 * volume_size * volume_size; if (i > 0 && i < volume_size * volume_size * volume_size) { f110 = UNCOMPRESS_VALUE(volume[i]); w110 = UNCOMPRESS_WEIGHT(volume[i]); } else { f110 = 0.0f; w110 = 0.0f; } i = (int)gx1 + (int)gy1 * volume_size + (int)gz1 * volume_size * volume_size; if (i > 0 && i < volume_size * volume_size * volume_size) { f111 = UNCOMPRESS_VALUE(volume[i]); w111 = UNCOMPRESS_WEIGHT(volume[i]); } else { f111 = 0.0f; w111 = 0.0f; } if((w000 <= min_weight) || (w001 <= min_weight) || (w010 <= min_weight) || (w011 <= min_weight) || (w100 <= min_weight) || (w101 <= min_weight) || (w110 <= min_weight) || (w111 <= min_weight)) { return false; } float u, v, w; u = (gx - gx0); v = (gy - gy0); w = (gz - gz0); tsdf = (1 - u) * (1 - v) * (1 - w) * f000 + (1 - u) * (1 - v) * ( w) * f001 + (1 - u) * ( v) * (1 - w) * f010 + (1 - u) * ( v) * ( w) * f011 + ( u) * (1 - v) * (1 - w) * f100 + ( u) * (1 - v) * ( w) * f101 + ( u) * ( v) * (1 - w) * f110 + ( u) * ( v) * ( w) * f111; return true; } __device__ bool surface_normal(int volume_size, float volume_dimension, float voxel_dimension, float min_weight, const Voxel *volume, Vertex center, float vx, float vy, float vz, float &nx, float &ny, float &nz) { // Convert Vertex Position to Grid Position float gx, gy, gz; gx = (vx + (volume_dimension / 2.0f) - center.x) / voxel_dimension; gy = (vy + (volume_dimension / 2.0f) - center.y) / voxel_dimension; gz = (vz + (volume_dimension / 2.0f) - center.z) / voxel_dimension; float gx0, gy0, gz0; float gx1, gy1, gz1; gx0 = floor(gx); gx1 = gx0 + 1.0f; gy0 = floor(gy); gy1 = gy0 + 1.0f; gz0 = floor(gz); gz1 = gz0 + 1.0f; int i; float f000, f001, f010, f011, f100, f101, f110, f111; float w000, w001, w010, w011, w100, w101, w110, w111; i = (int)gx0 + (int)gy0 * volume_size + (int)gz0 * volume_size * volume_size; if (i > 0 && i < volume_size * volume_size * volume_size) { f000 = UNCOMPRESS_VALUE(volume[i]); w000 = UNCOMPRESS_WEIGHT(volume[i]); } else { f000 = 0.0f; w000 = 0.0f; } i = (int)gx0 + (int)gy0 * volume_size + (int)gz1 * volume_size * volume_size; if (i > 0 && i < volume_size * volume_size * volume_size) { f001 = UNCOMPRESS_VALUE(volume[i]); w001 = UNCOMPRESS_WEIGHT(volume[i]); } else { f001 = 0.0f; w001 = 0.0f; } i = (int)gx0 + (int)gy1 * volume_size + (int)gz0 * volume_size * volume_size; if (i > 0 && i < volume_size * volume_size * volume_size) { f010 = UNCOMPRESS_VALUE(volume[i]); w010 = UNCOMPRESS_WEIGHT(volume[i]); } else { f010 = 0.0f; w010 = 0.0f; } i = (int)gx0 + (int)gy1 * volume_size + (int)gz1 * volume_size * volume_size; if (i > 0 && i < volume_size * volume_size * volume_size) { f011 = UNCOMPRESS_VALUE(volume[i]); w011 = UNCOMPRESS_WEIGHT(volume[i]); } else { f011 = 0.0f; w011 = 0.0f; } i = (int)gx1 + (int)gy0 * volume_size + (int)gz0 * volume_size * volume_size; if (i > 0 && i < volume_size * volume_size * volume_size) { f100 = UNCOMPRESS_VALUE(volume[i]); w100 = UNCOMPRESS_WEIGHT(volume[i]); } else { f100 = 0.0f; w100 = 0.0f; } i = (int)gx1 + (int)gy0 * volume_size + (int)gz1 * volume_size * volume_size; if (i > 0 && i < volume_size * volume_size * volume_size) { f101 = UNCOMPRESS_VALUE(volume[i]); w101 = UNCOMPRESS_WEIGHT(volume[i]); } else { f101 = 0.0f; w101 = 0.0f; } i = (int)gx1 + (int)gy1 * volume_size + (int)gz0 * volume_size * volume_size; if (i > 0 && i < volume_size * volume_size * volume_size) { f110 = UNCOMPRESS_VALUE(volume[i]); w110 = UNCOMPRESS_WEIGHT(volume[i]); } else { f110 = 0.0f; w110 = 0.0f; } i = (int)gx1 + (int)gy1 * volume_size + (int)gz1 * volume_size * volume_size; if (i > 0 && i < volume_size * volume_size * volume_size) { f111 = UNCOMPRESS_VALUE(volume[i]); w111 = UNCOMPRESS_WEIGHT(volume[i]); } else { f111 = 0.0f; w111 = 0.0f; } if((w000 <= min_weight) || (w001 <= min_weight) || (w010 <= min_weight) || (w011 <= min_weight) || (w100 <= min_weight) || (w101 <= min_weight) || (w110 <= min_weight) || (w111 <= min_weight)) { return false; } float u, v, w; u = (gx - gx0); v = (gy - gy0); w = (gz - gz0); nx = (1 - v) * (1 - w) * f000 + (1 - v) * ( w) * f001 + ( v) * (1 - w) * f010 + ( v) * ( w) * f011 + -(1 - v) * (1 - w) * f100 + -(1 - v) * ( w) * f101 + -( v) * (1 - w) * f110 + -( v) * ( w) * f111; ny = (1 - u) * (1 - w) * f000 + (1 - u) * ( w) * f001 + -(1 - u) * (1 - w) * f010 + -(1 - u) * ( w) * f011 + ( u) * (1 - w) * f100 + ( u) * ( w) * f101 + -( u) * (1 - w) * f110 + -( u) * ( w) * f111; nz = (1 - u) * (1 - v) * f000 + -(1 - u) * (1 - v) * f001 + (1 - u) * ( v) * f010 + -(1 - u) * ( v) * f011 + ( u) * (1 - v) * f100 + -( u) * (1 - v) * f101 + ( u) * ( v) * f110 + -( u) * ( v) * f111; return true; } __global__ void RayCaster(float max_distance, float max_truncation, int volume_size, float volume_dimension, float voxel_dimension, float min_weight, int width, int height, float fx, float fy, float cx, float cy, Vertex center, const Voxel *volume, Vertices model_vertices, Normals model_normals, Color *normal_map) { // Get Block and Thread Id int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // Calculate Row & Column int x = tx + bx * BLOCK_WIDTH; int y = ty + by * BLOCK_WIDTH; int i = x + y * width; if ((x < width) && (y < height)) { // Initialize Normal Map Color normal_color; normal_color.r = normal_color.g = normal_color.b = 0; // Initialize Model Vertex and Normal Vertex model_vertex; Vector model_normal; model_vertex.x = model_vertex.y = model_vertex.z = 0.0f; model_normal.x = model_normal.y = model_normal.z = 0.0f; // Ray Starting Position Vertex ray_start; ray_start.x = T[0][3]; ray_start.y = T[1][3]; ray_start.z = T[2][3]; // Ray Direction Vector ray_direction, ray_temp; ray_temp.x = (x - cx) / fx; ray_temp.y = (y - cy) / fy; ray_temp.z = 1.0f; ray_direction.x = T[0][0] * ray_temp.x + T[0][1] * ray_temp.y + T[0][2] * ray_temp.z; ray_direction.y = T[1][0] * ray_temp.x + T[1][1] * ray_temp.y + T[1][2] * ray_temp.z; ray_direction.z = T[2][0] * ray_temp.x + T[2][1] * ray_temp.y + T[2][2] * ray_temp.z; // Normalize Ray Direction float ray_inorm = rsqrt(ray_direction.x * ray_direction.x + ray_direction.y * ray_direction.y + ray_direction.z * ray_direction.z); ray_direction.x *= ray_inorm; ray_direction.y *= ray_inorm; ray_direction.z *= ray_inorm; // Ray Casting float time = 0.0f, time_last = 0.0f; float tsdf, tsdf_last = 0.0f; while (time < max_distance) { // Determine Vertex Position Vertex vertex_position; vertex_position.x = ray_start.x + ray_direction.x * time; vertex_position.y = ray_start.y + ray_direction.y * time; vertex_position.z = ray_start.z + ray_direction.z * time; // Determine Grid Position Vertex grid_position; grid_position.x = (vertex_position.x + (volume_dimension / 2.0f) - center.x) / voxel_dimension; grid_position.y = (vertex_position.y + (volume_dimension / 2.0f) - center.y) / voxel_dimension; grid_position.z = (vertex_position.z + (volume_dimension / 2.0f) - center.z) / voxel_dimension; if ((grid_position.x) < 1 || (grid_position.x >= volume_size - 1)) { time_last = time; time += max_truncation; continue; } if ((grid_position.y) < 1 || (grid_position.y >= volume_size - 1)) { time_last = time; time += max_truncation; continue; } if ((grid_position.z) < 1 || (grid_position.z >= volume_size - 1)) { time_last = time; time += max_truncation; continue; } if (!interpolate(volume_size, volume_dimension, voxel_dimension, min_weight, volume, center, vertex_position.x, vertex_position.y, vertex_position.z, tsdf)) { time_last = time; time += max_truncation; continue; } if (tsdf_last < 0.0f && tsdf >= 0.0f) break; if (tsdf_last > 0.0f && tsdf <= 0.0f) { // Determine Time float t = time_last -(((time - time_last) * tsdf_last) / (tsdf - tsdf_last)); // Determine Position model_vertex.x = ray_start.x + ray_direction.x * t; model_vertex.y = ray_start.y + ray_direction.y * t; model_vertex.z = ray_start.z + ray_direction.z * t; // Determine Normal if (surface_normal(volume_size, volume_dimension, voxel_dimension, min_weight, volume, center, model_vertex.x, model_vertex.y, model_vertex.z, model_normal.x, model_normal.y, model_normal.z)) { // Normalize float inorm = rsqrt(model_normal.x * model_normal.x + model_normal.y * model_normal.y + model_normal.z * model_normal.z); if (isfinite(inorm)) { model_normal.x *= inorm; model_normal.y *= inorm; model_normal.z *= inorm; normal_color.r = ((model_normal.x + 1.0f) / 2.0f) * 255.0f; normal_color.g = ((model_normal.y + 1.0f) / 2.0f) * 255.0f; normal_color.b = ((model_normal.z + 1.0f) / 2.0f) * 255.0f; } } break; } tsdf_last = tsdf; time_last = time; if (ABS(tsdf) < 1.0f) time += voxel_dimension; else time += max_truncation; } model_vertices.x[i] = model_vertex.x; model_vertices.y[i] = model_vertex.y; model_vertices.z[i] = model_vertex.z; model_normals.x[i] = model_normal.x; model_normals.y[i] = model_normal.y; model_normals.z[i] = model_normal.z; if (normal_map != NULL) normal_map[i] = normal_color; } } void RayCastingKernel(float max_distance, float max_truncation, int volume_size, float volume_dimension, float voxel_dimension, float min_weight, int width, int height, float fx, float fy, float cx, float cy, Vertex center, float *transformation, const Voxel *volume, Vertices model_vertices, Normals model_normals, Color *normal_map) { // Copy Transforms to Constant Memory CUDA_ERROR_CHECK(cudaMemcpyToSymbol(T, transformation, sizeof(float) * 16)); // Launch Ray Casting Kernel int grid_width = (width + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH; int grid_height = (height + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH; dim3 grid_dim(grid_width, grid_height, 1); dim3 block_dim(BLOCK_WIDTH, BLOCK_WIDTH, 1); RayCaster<<<grid_dim, block_dim>>>(max_distance, max_truncation, volume_size, volume_dimension, voxel_dimension, min_weight, width, height, fx, fy, cx, cy, center, volume, model_vertices, model_normals, normal_map); CUDA_ERROR_CHECK(cudaDeviceSynchronize()); } } // namespace dip
the_stack
#if !defined(CUDA_VERSION) #define __device__ __attribute__((device)) #define __global__ __attribute__((global)) #define __shared__ __attribute__((shared)) #define __constant__ __attribute__((constant)) typedef unsigned long long uint64_t; #endif // We have to keep all builtins that depend on particular target feature in the // same function, because the codegen will stop after the very first function // that encounters an error, so -verify will not be able to find errors in // subsequent functions. // CHECK-LABEL: nvvm_wmma __device__ void nvvm_wmma(int *src, int *dst, float *fsrc, float *fdst, int ldm) { // CHECK: call {{.*}} @llvm.nvvm.wmma.load.a.sync.row.m16n16k16.stride.f16 // expected-error@+1 {{'__hmma_m16n16k16_ld_a' needs target feature ptx60}} __hmma_m16n16k16_ld_a(dst, src, ldm, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.load.a.sync.col.m16n16k16.stride.f16 // expected-error@+1 {{'__hmma_m16n16k16_ld_a' needs target feature ptx60}} __hmma_m16n16k16_ld_a(dst, src+1, ldm, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.load.b.sync.row.m16n16k16.stride.f16 // expected-error@+1 {{'__hmma_m16n16k16_ld_b' needs target feature ptx60}} __hmma_m16n16k16_ld_b(dst, src, ldm, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.load.b.sync.col.m16n16k16.stride.f16 // expected-error@+1 {{'__hmma_m16n16k16_ld_b' needs target feature ptx60}} __hmma_m16n16k16_ld_b(dst, src+2, ldm, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.load.c.sync.row.m16n16k16.stride.f16 // expected-error@+1 {{'__hmma_m16n16k16_ld_c_f16' needs target feature ptx60}} __hmma_m16n16k16_ld_c_f16(dst, src, ldm, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.load.c.sync.col.m16n16k16.stride.f16 // expected-error@+1 {{'__hmma_m16n16k16_ld_c_f16' needs target feature ptx60}} __hmma_m16n16k16_ld_c_f16(dst, src, ldm, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.load.c.sync.row.m16n16k16.stride.f32 // expected-error@+1 {{'__hmma_m16n16k16_ld_c_f32' needs target feature ptx60}} __hmma_m16n16k16_ld_c_f32(fdst, fsrc, ldm, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.load.c.sync.col.m16n16k16.stride.f32 // expected-error@+1 {{'__hmma_m16n16k16_ld_c_f32' needs target feature ptx60}} __hmma_m16n16k16_ld_c_f32(fdst, fsrc, ldm, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.store.d.sync.row.m16n16k16.stride.f16 // expected-error@+1 {{'__hmma_m16n16k16_st_c_f16' needs target feature ptx60}} __hmma_m16n16k16_st_c_f16(dst, src, ldm, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.store.d.sync.col.m16n16k16.stride.f16 // expected-error@+1 {{'__hmma_m16n16k16_st_c_f16' needs target feature ptx60}} __hmma_m16n16k16_st_c_f16(dst, src, ldm, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.store.d.sync.row.m16n16k16.stride.f32 // expected-error@+1 {{'__hmma_m16n16k16_st_c_f32' needs target feature ptx60}} __hmma_m16n16k16_st_c_f32(fdst, fsrc, ldm, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.store.d.sync.col.m16n16k16.stride.f32 // expected-error@+1 {{'__hmma_m16n16k16_st_c_f32' needs target feature ptx60}} __hmma_m16n16k16_st_c_f32(fdst, fsrc, ldm, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.row.m16n16k16.f16.f16 // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature ptx60}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 0, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.row.m16n16k16.f16.f16.satfinite // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature ptx60}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 0, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.col.m16n16k16.f16.f16 // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature ptx60}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 1, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.col.m16n16k16.f16.f16.satfinite // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature ptx60}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 1, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.row.m16n16k16.f16.f16 // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature ptx60}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 2, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.row.m16n16k16.f16.f16.satfinite // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature ptx60}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 2, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.col.m16n16k16.f16.f16 // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature ptx60}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 3, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.col.m16n16k16.f16.f16.satfinite // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f16' needs target feature ptx60}} __hmma_m16n16k16_mma_f16f16(dst, src, src, src, 3, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.row.m16n16k16.f16.f32 // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature ptx60}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 0, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.row.m16n16k16.f16.f32.satfinite // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature ptx60}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 0, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.col.m16n16k16.f16.f32 // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature ptx60}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 1, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.col.m16n16k16.f16.f32.satfinite // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature ptx60}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 1, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.row.m16n16k16.f16.f32 // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature ptx60}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 2, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.row.m16n16k16.f16.f32.satfinite // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature ptx60}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 2, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.col.m16n16k16.f16.f32 // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature ptx60}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 3, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.col.m16n16k16.f16.f32.satfinite // expected-error@+1 {{'__hmma_m16n16k16_mma_f16f32' needs target feature ptx60}} __hmma_m16n16k16_mma_f16f32(dst, src, src, fsrc, 3, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.row.m16n16k16.f32.f16 // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature ptx60}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 0, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.row.m16n16k16.f32.f16.satfinite // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature ptx60}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 0, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.col.m16n16k16.f32.f16 // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature ptx60}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 1, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.col.m16n16k16.f32.f16.satfinite // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature ptx60}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 1, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.row.m16n16k16.f32.f16 // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature ptx60}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 2, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.row.m16n16k16.f32.f16.satfinite // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature ptx60}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 2, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.col.m16n16k16.f32.f16 // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature ptx60}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 3, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.col.m16n16k16.f32.f16.satfinite // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f16' needs target feature ptx60}} __hmma_m16n16k16_mma_f32f16(fdst, src, src, src, 3, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.row.m16n16k16.f32.f32 // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature ptx60}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 0, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.row.m16n16k16.f32.f32.satfinite // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature ptx60}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 0, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.col.m16n16k16.f32.f32 // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature ptx60}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 1, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.row.col.m16n16k16.f32.f32.satfinite // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature ptx60}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 1, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.row.m16n16k16.f32.f32 // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature ptx60}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 2, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.row.m16n16k16.f32.f32.satfinite // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature ptx60}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 2, 1); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.col.m16n16k16.f32.f32 // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature ptx60}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 3, 0); // CHECK: call {{.*}} @llvm.nvvm.wmma.mma.sync.col.col.m16n16k16.f32.f32.satfinite // expected-error@+1 {{'__hmma_m16n16k16_mma_f32f32' needs target feature ptx60}} __hmma_m16n16k16_mma_f32f32(fdst, src, src, fsrc, 3, 1); }
the_stack
#include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/copy.h> #include <thrust/sequence.h> #include <thrust/transform.h> #include <cub/device/device_segmented_radix_sort.cuh> namespace cudf { namespace lists { namespace detail { struct SegmentedSortColumn { template <typename KeyT, typename ValueT, typename OffsetIteratorT> void SortPairsAscending(KeyT const* keys_in, KeyT* keys_out, ValueT const* values_in, ValueT* values_out, int num_items, int num_segments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, rmm::cuda_stream_view stream) { rmm::device_buffer d_temp_storage; size_t temp_storage_bytes = 0; cub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage.data(), temp_storage_bytes, keys_in, keys_out, values_in, values_out, num_items, num_segments, begin_offsets, end_offsets, 0, sizeof(KeyT) * 8, stream.value()); d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream}; cub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage.data(), temp_storage_bytes, keys_in, keys_out, values_in, values_out, num_items, num_segments, begin_offsets, end_offsets, 0, sizeof(KeyT) * 8, stream.value()); } template <typename KeyT, typename ValueT, typename OffsetIteratorT> void SortPairsDescending(KeyT const* keys_in, KeyT* keys_out, ValueT const* values_in, ValueT* values_out, int num_items, int num_segments, OffsetIteratorT begin_offsets, OffsetIteratorT end_offsets, rmm::cuda_stream_view stream) { rmm::device_buffer d_temp_storage; size_t temp_storage_bytes = 0; cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage.data(), temp_storage_bytes, keys_in, keys_out, values_in, values_out, num_items, num_segments, begin_offsets, end_offsets, 0, sizeof(KeyT) * 8, stream.value()); d_temp_storage = rmm::device_buffer{temp_storage_bytes, stream}; cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage.data(), temp_storage_bytes, keys_in, keys_out, values_in, values_out, num_items, num_segments, begin_offsets, end_offsets, 0, sizeof(KeyT) * 8, stream.value()); } template <typename T> std::enable_if_t<not is_numeric<T>(), std::unique_ptr<column>> operator()( column_view const& child, column_view const& segment_offsets, order column_order, null_order null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto child_table = segmented_sort_by_key(table_view{{child}}, table_view{{child}}, segment_offsets, {column_order}, {null_precedence}, stream, mr); return std::move(child_table->release().front()); } template <typename T> std::enable_if_t<is_numeric<T>(), std::unique_ptr<column>> operator()( column_view const& child, column_view const& offsets, order column_order, null_order null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // the average list size at which to prefer radixsort: constexpr cudf::size_type MIN_AVG_LIST_SIZE_FOR_RADIXSORT{100}; if ((child.size() / offsets.size()) < MIN_AVG_LIST_SIZE_FOR_RADIXSORT) { auto child_table = segmented_sort_by_key(table_view{{child}}, table_view{{child}}, offsets, {column_order}, {null_precedence}, stream, mr); return std::move(child_table->release().front()); } auto output = cudf::detail::allocate_like(child, child.size(), mask_allocation_policy::NEVER, stream, mr); mutable_column_view mutable_output_view = output->mutable_view(); auto keys = [&]() { if (child.nullable()) { rmm::device_uvector<T> keys(child.size(), stream); auto const null_replace_T = null_precedence == null_order::AFTER ? std::numeric_limits<T>::max() : std::numeric_limits<T>::min(); auto device_child = column_device_view::create(child, stream); auto keys_in = cudf::detail::make_null_replacement_iterator<T>(*device_child, null_replace_T); thrust::copy_n(rmm::exec_policy(stream), keys_in, child.size(), keys.begin()); return keys; } return rmm::device_uvector<T>{0, stream}; }(); std::unique_ptr<column> sorted_indices = cudf::make_numeric_column( data_type(type_to_id<size_type>()), child.size(), mask_state::UNALLOCATED, stream, mr); mutable_column_view mutable_indices_view = sorted_indices->mutable_view(); thrust::sequence(rmm::exec_policy(stream), mutable_indices_view.begin<size_type>(), mutable_indices_view.end<size_type>(), 0); if (column_order == order::ASCENDING) SortPairsAscending(child.nullable() ? keys.data() : child.begin<T>(), mutable_output_view.begin<T>(), mutable_indices_view.begin<size_type>(), mutable_indices_view.begin<size_type>(), child.size(), offsets.size() - 1, offsets.begin<size_type>(), offsets.begin<size_type>() + 1, stream); else SortPairsDescending(child.nullable() ? keys.data() : child.begin<T>(), mutable_output_view.begin<T>(), mutable_indices_view.begin<size_type>(), mutable_indices_view.begin<size_type>(), child.size(), offsets.size() - 1, offsets.begin<size_type>(), offsets.begin<size_type>() + 1, stream); std::vector<std::unique_ptr<column>> output_cols; output_cols.push_back(std::move(output)); // rearrange the null_mask. cudf::detail::gather_bitmask(cudf::table_view{{child}}, mutable_indices_view.begin<size_type>(), output_cols, cudf::detail::gather_bitmask_op::DONT_CHECK, stream, mr); return std::move(output_cols.front()); } }; std::unique_ptr<column> sort_lists(lists_column_view const& input, order column_order, null_order null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (input.is_empty()) return empty_like(input.parent()); auto output_offset = make_numeric_column( input.offsets().type(), input.size() + 1, mask_state::UNALLOCATED, stream, mr); thrust::transform(rmm::exec_policy(stream), input.offsets_begin(), input.offsets_end(), output_offset->mutable_view().begin<size_type>(), [first = input.offsets_begin()] __device__(auto offset_index) { return offset_index - *first; }); // for numeric columns, calls Faster segmented radix sort path // for non-numeric columns, calls segmented_sort_by_key. auto output_child = type_dispatcher(input.child().type(), SegmentedSortColumn{}, input.get_sliced_child(stream), output_offset->view(), column_order, null_precedence, stream, mr); auto null_mask = cudf::detail::copy_bitmask(input.parent(), stream, mr); // Assemble list column & return return make_lists_column(input.size(), std::move(output_offset), std::move(output_child), input.null_count(), std::move(null_mask), stream, mr); } std::unique_ptr<column> stable_sort_lists(lists_column_view const& input, order column_order, null_order null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (input.is_empty()) { return empty_like(input.parent()); } auto output_offset = make_numeric_column( input.offsets().type(), input.size() + 1, mask_state::UNALLOCATED, stream, mr); thrust::transform(rmm::exec_policy(stream), input.offsets_begin(), input.offsets_end(), output_offset->mutable_view().template begin<size_type>(), [first = input.offsets_begin()] __device__(auto offset_index) { return offset_index - *first; }); auto const child = input.get_sliced_child(stream); auto const sorted_child_table = stable_segmented_sort_by_key(table_view{{child}}, table_view{{child}}, output_offset->view(), {column_order}, {null_precedence}, stream, mr); return make_lists_column(input.size(), std::move(output_offset), std::move(sorted_child_table->release().front()), input.null_count(), cudf::detail::copy_bitmask(input.parent(), stream, mr), stream, mr); } } // namespace detail std::unique_ptr<column> sort_lists(lists_column_view const& input, order column_order, null_order null_precedence, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::sort_lists(input, column_order, null_precedence, rmm::cuda_stream_default, mr); } std::unique_ptr<column> stable_sort_lists(lists_column_view const& input, order column_order, null_order null_precedence, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::stable_sort_lists( input, column_order, null_precedence, rmm::cuda_stream_default, mr); } } // namespace lists } // namespace cudf
the_stack
* Copyright (c) 2019 by Contributors * \file pseudo2DTranspose_op-inl.cuh * \brief pseudo 2D transpose * \author Dawid Tracz */ #ifndef MXNET_OPERATOR_TENSOR_PSEUDO2DTRANSPOSE_OP_INL_CUH_ #define MXNET_OPERATOR_TENSOR_PSEUDO2DTRANSPOSE_OP_INL_CUH_ #include <mxnet/tuple.h> #include <mxnet/tensor_blob.h> #include <mshadow/base.h> #include <algorithm> #include <utility> #include "../../common/cuda/utils.h" namespace mxnet { namespace op { namespace cuda { /*! * \brief The `transpose_pseudo2D` based on chosen vectorized types. It transposes an array of * shape (k, m, n) to (k, n, m) * \param out Pointer to output memory. * \param inp Pointer to input memory. * \param m First of tensor dimensions. * \param n Second of tensor dimensions. * \param nIterY The number of iterations in the y-dim of the thread to cover all rows. (1-->m) * \param nIterZ The number of iterations in the z-dim of the thread to cover all rows. (1-->k) * \tparam DType Data type * \tparam CType The type to load the data. * \tparam is_addto Whether to perform out += transpose(data) or out = transpose(data) */ template <typename DType, typename CType, bool is_addto> __global__ void transpose_pseudo2D(DType* out, DType* inp, const index_t m, const index_t n, const index_t nIterY, const index_t nIterZ) { // Calculate the TypeSizeRatio const index_t TSR = sizeof(CType) / sizeof(DType) > 0 ? sizeof(CType) / sizeof(DType) : 1; const index_t chunked_n = n/TSR; const index_t chunked_m = m/TSR; extern __shared__ char buf[]; DType* d_shm = reinterpret_cast<DType*>(buf); CType* c_shm = reinterpret_cast<CType*>(buf); CType* cInp = reinterpret_cast<CType*>(inp); CType* cOut = reinterpret_cast<CType*>(out); for (index_t iterZ = 0; iterZ < nIterZ; iterZ++) { const index_t blockIdx_z = gridDim.z*iterZ + blockIdx.z; for (index_t iterY = 0; iterY < nIterY; iterY++) { const index_t blockIdx_y = gridDim.y*iterY + blockIdx.y; index_t offset = blockIdx_z*m*chunked_n + blockIdx_y*blockDim.y*TSR*chunked_n + (index_t)blockIdx.x*blockDim.x; if ((blockIdx.x*blockDim.x + threadIdx.x)*TSR < n && (blockIdx_y*blockDim.y + threadIdx.y)*TSR < m) { // read from global memory to shared #pragma unroll for (index_t i = 0; i < TSR; i++) { index_t shmIdx = (TSR*threadIdx.y + i)*blockDim.x + threadIdx.x; c_shm[shmIdx] = cInp[offset + (TSR*threadIdx.y + i)*chunked_n + threadIdx.x]; } __syncthreads(); // read from shared to local registers CType tmp[TSR]; #pragma unroll for (index_t i = 0; i < TSR; i++) { DType* tmp_dptr = reinterpret_cast<DType*>(&tmp[i]); #pragma unroll for (int j = 0; j < TSR; j++) { index_t shmIdx = (TSR*threadIdx.y + j)*blockDim.x*TSR + TSR*threadIdx.x + i; tmp_dptr[j] = d_shm[shmIdx]; } } __syncthreads(); // write back to global output offset = blockIdx_z*m*chunked_n + blockIdx.x*blockDim.x*TSR*chunked_m + blockIdx_y*blockDim.y; #pragma unroll for (index_t i = 0; i < TSR; i++) { if (is_addto) { DType* tmp_dptr = reinterpret_cast<DType*>(&tmp[i]); #pragma unroll for (int j = 0; j < TSR; j++) { out[TSR * (offset + (TSR*threadIdx.x + i)*chunked_m + threadIdx.y) + j] += tmp_dptr[j]; } } else { cOut[offset + (TSR*threadIdx.x + i)*chunked_m + threadIdx.y] = tmp[i]; } } } } } } } // namespace cuda /*! * \brief Calls proper version of kernel `transpose_pseudo2D` * basing on chosen type sizes. * \param cTypeSize Size of type that should be use to copy. * \param grid Grid dimensions for the kernel. * \param block Block dimensions for the kernel. * \param stream Strem to run kernel. * \param out Pointer to output memory. * \param inp Pointer to input memory. * \param m First of tensor dimensions. * \param n Second of tensor dimensions. * \tparam DType Data type * \tparam is_addto Whether to trigger add the transpose result to the output tensor. */ template <typename DType, bool is_addto> inline void call_transpose_pseudo2D(index_t cTypeSize, dim3 grid, dim3 block, cudaStream_t stream, DType* d_outPtr, DType* d_inpPtr, const index_t m, const index_t n, const index_t nIterY, const index_t nIterZ) { const int nshared = 1024 * cTypeSize / sizeof(DType) * cTypeSize; switch (cTypeSize) { case (1): cuda::transpose_pseudo2D<DType, uint8_t, is_addto><<<grid, block, nshared, stream>>> (d_outPtr, d_inpPtr, m, n, nIterY, nIterZ); break; case (2): cuda::transpose_pseudo2D<DType, uint16_t, is_addto><<<grid, block, nshared, stream>>> (d_outPtr, d_inpPtr, m, n, nIterY, nIterZ); break; case (4): cuda::transpose_pseudo2D<DType, uint32_t, is_addto><<<grid, block, nshared, stream>>> (d_outPtr, d_inpPtr, m, n, nIterY, nIterZ); break; case (8): cuda::transpose_pseudo2D<DType, uint64_t, is_addto><<<grid, block, nshared, stream>>> (d_outPtr, d_inpPtr, m, n, nIterY, nIterZ); break; default: LOG(FATAL) << "Unsupported type combination. " << "Copy type size = " << cTypeSize; } auto cuErr = cudaGetLastError(); CHECK_EQ(cuErr, cudaSuccess) << "TransposePseudo2D kernel failure: " << cudaGetErrorString(cuErr) << ". " << "block: (" << block.x << "," << block.y << "," << block.z << ")" << " grid: (" << grid.x << "," << grid.y << "," << grid.z << ")"; } /*! * \brief Checks if function `transpose_pseudo2D` can be used * to perform transpose operation with given params. * \param params Parameters (axes) of the transpose. */ inline bool isPseudo2DTranspose(const TShape& params) { index_t n_swpDims = 1; int i=0; while (i < params.ndim() && i == params[i]) i++; // leading dimensions while (i+1 < params.ndim()) { if(params[i]+1 != params[i+1]) n_swpDims++; i++; } return n_swpDims == 2; } struct pseudo2DSizes { index_t leadDimS; index_t M; index_t N; }; /*! * \brief Calculates total size of last two dimension batches * (according to description of transpose_pseudo2D function). * \param shape Shape of tensor to transpose. * \param params Parameters (axes) of the transpose. */ inline pseudo2DSizes getPackedTransposeDimensions(const TShape& shape, const TShape& params) { auto ndim = params.ndim(); pseudo2DSizes sizes; sizes.leadDimS = 1; int i=0; while (i < ndim && i == params[i]) { sizes.leadDimS *= shape[i]; i++; } sizes.N = shape[params[i++]]; while (i < ndim && params[i]-1 == params[i-1]) { sizes.N *= shape[params[i]]; i++; } sizes.M = shape[params[i++]]; while (i < ndim && params[i]-1 == params[i-1]) { sizes.M *= shape[params[i]]; i++; } CHECK_EQ(i, ndim) << "Too many dimensions to transpose"; return sizes; } inline int32_t getBestCopyTypeSize(index_t dTypeSize, index_t sizeM, index_t sizeN) { index_t cTypeSize = std::max((index_t)8, dTypeSize); while (cTypeSize > dTypeSize) { auto tsr = cTypeSize/dTypeSize; if (sizeM % tsr != 0 || sizeN % tsr != 0) cTypeSize /= 2; else break; } // if the cTypeSize is 8x dTypeSize then kernel would require 64kB shared memory if(cTypeSize == 8 && dTypeSize == 1) cTypeSize = 4; return cTypeSize; } inline std::pair<dim3, dim3> calculateKernelParams(pseudo2DSizes sizes, const index_t TSR) { index_t nThreadsPerBlock = 32*32/4; // value chosen empirically index_t thdsY = 1; index_t thdsX = 1; while(sizes.N/TSR > thdsX && thdsX < 32) { thdsX *= 2; } thdsY = nThreadsPerBlock/thdsX; thdsY = std::min(sizes.M/TSR, thdsY); index_t blocksY = (sizes.M/TSR-1)/thdsY + 1; index_t blocksX = (sizes.N/TSR-1)/thdsX + 1; dim3 grid(blocksX, blocksY, sizes.leadDimS); dim3 block(thdsX, thdsY); return {grid, block}; } /*! * \brief Transpose given tensor according to params. * Supports only transposes that satisfy: * Exists n and m such that: * params = (0, ..., n-1, n+m, ..., params.size, n, ..., n+m-1) * Example: (0, 2, 3, 1) or (0, 3, 1, 2), but not (0, 2, 1, 3). * \param outBlob Tensor blob to store result. * \param inpBlob Tensor blob with input data. * \param params Parameters (axes) of the transpose. * \param is_addto Whether to add the transpose result to the outBlob * \param s Pointer to GPU stream. */ template <typename DType, bool is_addto> void transpose_pseudo2D(const TBlob& outBlob, const TBlob& inpBlob, const TShape& params, mshadow::Stream<gpu>* s) { const TShape& shape = inpBlob.shape_; CHECK_EQ(shape.ndim(), params.ndim()); auto sizes = getPackedTransposeDimensions(shape, params); index_t cTypeSize = getBestCopyTypeSize(sizeof(DType), sizes.M, sizes.N); // Type Size Ratio const index_t TSR = cTypeSize/sizeof(DType); CHECK_EQ(cTypeSize, sizeof(DType)*TSR); auto pair = calculateKernelParams(sizes, TSR); dim3 grid = pair.first; dim3 block = pair.second; index_t nIterY = 1; if (grid.y > std::numeric_limits<uint16_t>::max()) { nIterY = (grid.y - 1)/(std::numeric_limits<uint16_t>::max() - 1) + 1; grid.y = (grid.y - 1)/nIterY + 1; } index_t nIterZ = 1; if (grid.z > std::numeric_limits<uint16_t>::max()) { nIterZ = (grid.z - 1)/(std::numeric_limits<uint16_t>::max() - 1) + 1; grid.z = (grid.z - 1)/nIterZ + 1; } cudaStream_t stream = mshadow::Stream<gpu>::GetStream(s); call_transpose_pseudo2D<DType, is_addto> (cTypeSize, grid, block, stream, outBlob.dptr<DType>(), inpBlob.dptr<DType>(), sizes.M, sizes.N, nIterY, nIterZ); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_PSEUDO2DTRANSPOSE_OP_INL_CUH_
the_stack
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24 */ #include "CopyBlocksInGrid.h" #include "CopyBlocksInGrid.cuh" #include "../../XDevice.h" namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_CUDA /* copy data by index (device code) here we keep all the data of a grid within the shared memory, and then move it the indexed positions to the target place >> source - pointer to the source data array >> blockSize - size of a data block >> blockNum - number of the blocks (in a grid) >> gridNum - number of the grids. Note that a grid may have a number of blocks >> target - pointer to the target data array >> index - source block id for each target block */ template<class T> __global__ void KernelCopyBlocksInGrid(T * source, int blockSize, int blockNum, int gridNum, T * target, int * index) { __shared__ T data[SHARED_MEMORY_SIZE / sizeof(T) - 4 * MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int indexData[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int indexOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK]; /* item index */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* grid index */ int j = blockDim.y * blockIdx.y + threadIdx.y; if (j >= gridNum || i >= blockDim.x) return; if (i < blockNum) { indexData[i] = index[j * blockNum + i]; indexOffset[i] = i * blockDim.x; } __syncthreads(); int gridSize = blockSize * blockNum; T * s = source + j * gridSize; T * t = target + j * gridSize; for (int k = i, k2 = i; k < blockSize; k += blockDim.x) { /* load data into shared memroy */ for (int offset = 0, offset2 = 0; offset < gridSize; offset += blockSize, offset2 += blockDim.x) { data[offset2 + k2] = s[offset + k]; } __syncthreads(); /* distribute data to the target grid */ for (int p = 0, offset = 0; p < blockNum; p++, offset += blockSize) { int blockIndex = indexData[p]; if (blockIndex >= 0 && blockIndex < blockNum) { t[offset + k] = data[indexOffset[blockIndex] + k2]; } } __syncthreads(); } } /* copy data by index (device code) here we keep all the data of a grid within the shared memory, and then move it the indexed positions to the target place >> source - pointer to the source data array >> blockSize - size of a data block >> blockNum - number of the blocks (in a grid) >> gridNum - number of the grids. Note that a grid may have a number of blocks >> target - pointer to the target data array >> index - source block id for each target block */ template<class T, int goodBlockNum, int stepScale> __global__ void KernelCopyBlocksInGridFast(T * source, int blockSize, int blockNum, int gridNum, T * target, int * index) { __shared__ T data[SHARED_MEMORY_SIZE / sizeof(T) - 2 * MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int indexData[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int indexOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK]; /* item index */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* grid index */ int j = blockDim.y * blockIdx.y + threadIdx.y; if (j >= gridNum || i >= blockDim.x) return; int step = stepScale == 1 ? blockDim.x : blockDim.x * stepScale; if (i < blockNum) { indexData[i] = index[j * blockNum + i]; indexOffset[i] = i * step; } __syncthreads(); int gridSize = blockSize * blockNum; T * s = source + j * gridSize; T * t = target + j * gridSize; for (int k = i, k2 = i; k < blockSize; k += step) { int bidx; int offset = k; int offset2 = k2; int stepInStep = 0; /* load data into shared memroy */ for (int i = 0; i < stepScale && offset < blockSize; i++) { if (goodBlockNum >= 1) { data[offset2] = s[offset]; offset += blockSize; offset2 += step; } if (goodBlockNum >= 2) { data[offset2] = s[offset]; offset += blockSize; offset2 += step; } if (goodBlockNum >= 3) { data[offset2] = s[offset]; offset += blockSize; offset2 += step; } if (goodBlockNum >= 4) { data[offset2] = s[offset]; offset += blockSize; offset2 += step; } if (goodBlockNum >= 5) { data[offset2] = s[offset]; offset += blockSize; offset2 += step; } if (goodBlockNum >= 6) { data[offset2] = s[offset]; offset += blockSize; offset2 += step; } if (goodBlockNum >= 7) { data[offset2] = s[offset]; offset += blockSize; offset2 += step; } if (goodBlockNum >= 8) { data[offset2] = s[offset]; offset += blockSize; offset2 += step; } if (goodBlockNum >= 9) { data[offset2] = s[offset]; offset += blockSize; offset2 += step; } if (goodBlockNum >= 10) { data[offset2] = s[offset]; offset += blockSize; offset2 += step; } if (goodBlockNum >= 11) { data[offset2] = s[offset]; offset += blockSize; offset2 += step; } if (goodBlockNum >= 12) { data[offset2] = s[offset]; offset += blockSize; offset2 += step; } if (goodBlockNum >= 13) { for (; offset < gridSize; offset += blockSize, offset2 += step) { data[offset2] = s[offset]; } } if (stepScale > 1) { stepInStep += blockDim.x; offset = k + stepInStep; offset2 = k2 + stepInStep; } } __syncthreads(); offset = k; offset2 = k2; stepInStep = 0; /* distribute data to the target grid */ for (int i = 0; i < stepScale && offset < blockSize; i++) { if (goodBlockNum >= 1) { bidx = indexData[0]; if (bidx >= 0 && bidx < blockNum) { t[offset] = data[indexOffset[bidx] + offset2]; } offset += blockSize; } if (goodBlockNum >= 2) { bidx = indexData[1]; if (bidx >= 0 && bidx < blockNum) { t[offset] = data[indexOffset[bidx] + offset2]; } offset += blockSize; } if (goodBlockNum >= 3) { bidx = indexData[2]; if (bidx >= 0 && bidx < blockNum) { t[offset] = data[indexOffset[bidx] + offset2]; } offset += blockSize; } if (goodBlockNum >= 4) { bidx = indexData[3]; if (bidx >= 0 && bidx < blockNum) { t[offset] = data[indexOffset[bidx] + offset2]; } offset += blockSize; } if (goodBlockNum >= 5) { bidx = indexData[4]; if (bidx >= 0 && bidx < blockNum) { t[offset] = data[indexOffset[bidx] + offset2]; } offset += blockSize; } if (goodBlockNum >= 6) { bidx = indexData[5]; if (bidx >= 0 && bidx < blockNum) { t[offset] = data[indexOffset[bidx] + offset2]; } offset += blockSize; } if (goodBlockNum >= 7) { bidx = indexData[6]; if (bidx >= 0 && bidx < blockNum) { t[offset] = data[indexOffset[bidx] + offset2]; } offset += blockSize; } if (goodBlockNum >= 8) { bidx = indexData[7]; if (bidx >= 0 && bidx < blockNum) { t[offset] = data[indexOffset[bidx] + offset2]; } offset += blockSize; } if (goodBlockNum >= 9) { bidx = indexData[8]; if (bidx >= 0 && bidx < blockNum) { t[offset] = data[indexOffset[bidx] + offset2]; } offset += blockSize; } if (goodBlockNum >= 10) { bidx = indexData[9]; if (bidx >= 0 && bidx < blockNum) { t[offset] = data[indexOffset[bidx] + offset2]; } offset += blockSize; } if (goodBlockNum >= 11) { bidx = indexData[10]; if (bidx >= 0 && bidx < blockNum) { t[offset] = data[indexOffset[bidx] + offset2]; } offset += blockSize; } if (goodBlockNum >= 12) { bidx = indexData[11]; if (bidx >= 0 && bidx < blockNum) { t[offset] = data[indexOffset[bidx] + offset2]; } offset += blockSize; } if (goodBlockNum >= 13) { for (int p = 12; p < blockNum; p++, offset += blockSize) { bidx = indexData[p]; if (bidx >= 0 && bidx < blockNum) { t[offset] = data[indexOffset[bidx] + offset2]; } } } if (stepScale > 1) { stepInStep += blockDim.x; offset = k + stepInStep; offset2 = k2 + stepInStep; } } __syncthreads(); } } /* copy data by index (host code) >> source - pointer to the source data array >> blockSize - size of a data block >> blockNum - number of the blocks (in a grid) >> gridNum - number of the grids. Note that a grid may have a number of blocks >> target - pointer to the target data array >> index - source block id for each target block (on the device) >> itemSize - size of each data item >> myMem - the memory pool */ void _CudaCopyBlocksInGrid(void * source, int blockSize, int blockNum, int gridNum, void * target, int * index, int itemSize, XMem * myMem) { CheckNTErrors((myMem != NULL && myMem->devID >= 0), "This code must be run on GPUs!"); CheckNTErrors((itemSize == sizeof(int)), "TODO!"); int cudaGrids[3]; int cudaBlocks[3]; int threadNum = MIN(MAX(blockSize, blockNum), MAX_CUDA_THREAD_NUM_PER_BLOCK); int devIDBackup; ProtectCudaDev(myMem->devID, devIDBackup); GDevs.GetCudaThread2D(myMem->devID, threadNum, gridNum * blockNum, INT_MAX, cudaGrids, cudaBlocks); cudaBlocks[1] = 1; cudaGrids[0] = 1; cudaGrids[1] = gridNum; CheckNTErrors(((SHARED_MEMORY_SIZE / itemSize - 2 * MAX_CUDA_THREAD_NUM_PER_BLOCK) > cudaBlocks[0] * blockNum), "No enough shared memory!"); if (blockNum == 4) { if ((SHARED_MEMORY_SIZE / itemSize - 2 * MAX_CUDA_THREAD_NUM_PER_BLOCK) >= 2 * cudaBlocks[0] * blockNum) KernelCopyBlocksInGridFast<int, 4, 2> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > ((int*)source, blockSize, blockNum, gridNum, (int*)target, index); else KernelCopyBlocksInGridFast<int, 4, 1> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > ((int*)source, blockSize, blockNum, gridNum, (int*)target, index); } else if (blockNum == 6) { if ((SHARED_MEMORY_SIZE / itemSize - 2 * MAX_CUDA_THREAD_NUM_PER_BLOCK) >= 2 * cudaBlocks[0] * blockNum) KernelCopyBlocksInGridFast<int, 6, 2> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > ((int*)source, blockSize, blockNum, gridNum, (int*)target, index); else KernelCopyBlocksInGridFast<int, 6, 1> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > ((int*)source, blockSize, blockNum, gridNum, (int*)target, index); } else if (blockNum == 8) { if ((SHARED_MEMORY_SIZE / itemSize - 2 * MAX_CUDA_THREAD_NUM_PER_BLOCK) >= 2 * cudaBlocks[0] * blockNum) KernelCopyBlocksInGridFast<int, 8, 2> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > ((int*)source, blockSize, blockNum, gridNum, (int*)target, index); else KernelCopyBlocksInGridFast<int, 8, 1> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > ((int*)source, blockSize, blockNum, gridNum, (int*)target, index); } else if (blockNum == 12) { if ((SHARED_MEMORY_SIZE / itemSize - 2 * MAX_CUDA_THREAD_NUM_PER_BLOCK) >= 2 * cudaBlocks[0] * blockNum) KernelCopyBlocksInGridFast<int, 12, 2> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > ((int*)source, blockSize, blockNum, gridNum, (int*)target, index); else KernelCopyBlocksInGridFast<int, 12, 1> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > ((int*)source, blockSize, blockNum, gridNum, (int*)target, index); } else { KernelCopyBlocksInGrid<int> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > ((int*)source, blockSize, blockNum, gridNum, (int*)target, index); } BacktoCudaDev(myMem->devID, devIDBackup); } #endif // USE_CUDA } // namespace nts(NiuTrans.Tensor)
the_stack
namespace { template <typename scalar_t> __global__ void wb_cuda_scale_calc_kernel( const scalar_t* __restrict__ input, scalar_t* __restrict__ scale_output, scalar_t* __restrict__ dev_tmp, int* __restrict__ dev_last_block_counter, const int64_t total_elements_count) { const uint16_t tidx = threadIdx.x; const uint32_t bidx = blockIdx.x; const uint64_t gtidx = bidx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx; const uint64_t grid_size = CUDA_MAX_NUM_THREADS_PER_BLOCK * gridDim.x; scalar_t sum = 0; for (int i = gtidx; i < total_elements_count; i += grid_size) { sum += abs(*(input + i)); } sum /= total_elements_count; __shared__ scalar_t sh_mem[CUDA_MAX_NUM_THREADS_PER_BLOCK]; reduce_with_shared_memory<scalar_t>(sh_mem, sum, tidx, bidx, dev_tmp, dev_last_block_counter, scale_output, gridDim.x); } template <typename scalar_t> __global__ void wb_cuda_binarize_kernel( scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ scale, const int64_t scale_count, const int64_t elements_per_scale, const int64_t size) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int64_t scale_idx = static_cast<int64_t>(idx / elements_per_scale) % scale_count; scalar_t scale_element = *(scale + scale_idx); *(output + idx) = (*(input + idx) > 0) ? scale_element : -scale_element; } } template <typename scalar_t> __global__ void ab_cuda_forward_kernel( scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ scale, const scalar_t* __restrict__ thresholds, const int64_t threshold_count, const int64_t contiguous_elements_per_threshold, const int64_t size) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int64_t threshold_idx = static_cast<int64_t>(idx / contiguous_elements_per_threshold) % threshold_count; scalar_t threshold_element = (*(thresholds + threshold_idx)) * (*scale); *(output + idx) = (*(input + idx) > threshold_element) ? (*scale) : static_cast<scalar_t>(0.0); } } template <typename scalar_t> __global__ void ab_cuda_grad_input_kernel( scalar_t* __restrict__ grad_input, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ scale, const int64_t size) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { const scalar_t input_element = *(input + idx); *(grad_input + idx) = (input_element > 0 && input_element < *scale) ? *(grad_output + idx) : static_cast<scalar_t>(0.0); } } template <typename scalar_t> __global__ void ab_cuda_grad_scale_kernel( scalar_t* __restrict__ grad_scale, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ scale, scalar_t* __restrict__ dev_tmp, int* __restrict__ dev_last_block_counter, const int64_t total_elements_count) { const uint16_t tidx = threadIdx.x; const uint32_t bidx = blockIdx.x; const uint32_t gtidx = bidx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx; const uint64_t grid_size = CUDA_MAX_NUM_THREADS_PER_BLOCK * gridDim.x; scalar_t sum = 0; for (int i = gtidx; i < total_elements_count; i += grid_size) { scalar_t err_element = (*(output + i) - *(input + i)) / *scale; scalar_t grad_element = *(grad_output + i); sum += (*(input + i) < *scale) ? err_element * grad_element : grad_element; } __shared__ scalar_t sh_mem[CUDA_MAX_NUM_THREADS_PER_BLOCK]; reduce_with_shared_memory<scalar_t>(sh_mem, sum, tidx, bidx, dev_tmp, dev_last_block_counter, grad_scale, gridDim.x); } template <typename scalar_t> __global__ void ab_cuda_grad_thresholds_kernel( scalar_t* __restrict__ grad_thresholds, const scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ input, const scalar_t* __restrict__ scale, scalar_t* __restrict__ dev_tmp, int* __restrict__ dev_last_block_counter, int64_t total_elements_per_threshold, int64_t contiguous_elements_per_threshold, int64_t threshold_count, int64_t channel_offset) { const uint16_t tidx = threadIdx.x; const uint32_t bidx = blockIdx.x; const uint32_t gtidx = bidx * CUDA_MAX_NUM_THREADS_PER_BLOCK + tidx; const uint64_t grid_size = CUDA_MAX_NUM_THREADS_PER_BLOCK * gridDim.x; scalar_t sum = 0; for (int i = gtidx; i < total_elements_per_threshold; i += grid_size) { // i is the global thread index - need to calculate the input array index // that belongs to a specific scale index from i. Will do this by treating i // as the index in a non-existing array where input values belonging to a single // scale have a contiguous block layout, but will recalculate actual index into the // input/output array based on the fact that the values belonging to a single scale // in reality have interleaved block layout, with a spacing between the blocks // equal to channel_offset int actual_idx = (i / contiguous_elements_per_threshold) * channel_offset + (i % contiguous_elements_per_threshold); scalar_t input_element = *(input + actual_idx); if (input_element < *scale && input_element > 0) { sum += -*(grad_output + actual_idx); } } __shared__ scalar_t sh_mem[CUDA_MAX_NUM_THREADS_PER_BLOCK]; reduce_with_shared_memory<scalar_t>(sh_mem, sum, tidx, bidx, dev_tmp, dev_last_block_counter, grad_thresholds, gridDim.x); } } at::Tensor wb_cuda_forward( at::Tensor input, bool per_channel) { at::DeviceGuard guard(input.device()); const auto quantized_elements_count = input.numel(); int64_t elements_per_scale = 0; int64_t scale_count = per_channel ? input.size(0) : 1; int64_t input_elements_count = input.numel(); auto scale = at::zeros({scale_count}, input.options()); elements_per_scale = input_elements_count / input.size(0); auto grid_size = std::min(GET_BLOCKS(elements_per_scale), CUDA_BLOCKS_PER_GRID_FOR_UNIFORM_ELTWISE); auto dev_tmp = at::empty({grid_size}, input.options()); auto dev_last_block_counter = at::zeros({1}, at::device(input.options().device()).dtype(at::kInt)); auto output = at::empty_like(input); for (int ch_idx = 0; ch_idx < scale_count; ch_idx++) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "wb_cuda_forward_scale", ([&] { wb_cuda_scale_calc_kernel<scalar_t><<<grid_size, CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( input.data<scalar_t>() + ch_idx * elements_per_scale, scale.data<scalar_t>() + ch_idx, dev_tmp.data<scalar_t>(), dev_last_block_counter.data<int>(), elements_per_scale); })); dev_tmp.fill_(0.0); dev_last_block_counter.fill_(0); } AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "wb_cuda_forward_binarize", ([&] { wb_cuda_binarize_kernel<scalar_t><<<GET_BLOCKS(input_elements_count), CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( output.data<scalar_t>(), input.data<scalar_t>(), scale.data<scalar_t>(), scale_count, elements_per_scale, input_elements_count ); })); return output; } at::Tensor ab_cuda_forward( at::Tensor input, at::Tensor scale, at::Tensor thresholds) { at::DeviceGuard guard(input.device()); const auto quantized_elements_count = input.numel(); int64_t input_elements_count = input.numel(); int64_t threshold_count = thresholds.numel(); TORCH_CHECK(input.size(1) == threshold_count, "Threshold count is not equal to activations channel count"); int64_t contiguous_elements_per_threshold = input_elements_count / input.size(0) / input.size(1); auto output = at::empty_like(input); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ab_cuda_forward", ([&] { ab_cuda_forward_kernel<scalar_t><<<GET_BLOCKS(input_elements_count), CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( output.data<scalar_t>(), input.data<scalar_t>(), scale.data<scalar_t>(), thresholds.data<scalar_t>(), threshold_count, contiguous_elements_per_threshold, input_elements_count ); })); return output; } std::vector<at::Tensor> ab_cuda_backward( at::Tensor grad_output, at::Tensor input, at::Tensor scale, at::Tensor output) { at::DeviceGuard guard(input.device()); int64_t input_elements_count = input.numel(); int64_t threshold_count = input.size(1); int64_t channel_offset = input.numel() / input.size(0); std::vector<int64_t> threshold_shape(input.dim()); for (int64_t dim_idx = 0; dim_idx < input.dim(); dim_idx++) { if (dim_idx != 1) { threshold_shape[dim_idx] = 1; } else { threshold_shape[dim_idx] = input.size(dim_idx); } } auto grad_input = at::empty_like(input); auto grad_scale = at::empty_like(scale); auto grad_thresholds = at::empty(threshold_shape, input.options()); int64_t total_elements_per_threshold = input.numel() / threshold_count; int64_t contiguous_elements_per_threshold = input_elements_count / input.size(0) / input.size(1); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ab_cuda_backward", ([&] { ab_cuda_grad_input_kernel<scalar_t><<<GET_BLOCKS(input_elements_count), CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( grad_input.data<scalar_t>(), grad_output.data<scalar_t>(), input.data<scalar_t>(), scale.data<scalar_t>(), input_elements_count ); })); auto grid_size = std::min(GET_BLOCKS(input_elements_count), CUDA_BLOCKS_PER_GRID_FOR_UNIFORM_ELTWISE); auto dev_tmp = at::empty({grid_size}, grad_output.options()); auto dev_last_block_counter = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ab_cuda_backward", ([&] { ab_cuda_grad_scale_kernel<scalar_t><<<grid_size, CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( grad_scale.data<scalar_t>(), grad_output.data<scalar_t>(), output.data<scalar_t>(), input.data<scalar_t>(), scale.data<scalar_t>(), dev_tmp.data<scalar_t>(), dev_last_block_counter.data<int>(), input_elements_count); })); grid_size = std::min(GET_BLOCKS(total_elements_per_threshold), CUDA_BLOCKS_PER_GRID_FOR_UNIFORM_ELTWISE); dev_tmp = at::empty({grid_size}, grad_output.options()); dev_last_block_counter = at::zeros({1}, at::device(grad_output.options().device()).dtype(at::kInt)); // Same concept as for per activation channel quantization for (int64_t ch_idx = 0; ch_idx < threshold_count; ch_idx++) { auto init_element_offset = contiguous_elements_per_threshold * ch_idx; AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "ab_cuda_backward", ([&] { ab_cuda_grad_thresholds_kernel<scalar_t><<<grid_size, CUDA_MAX_NUM_THREADS_PER_BLOCK, 0, at::cuda::getCurrentCUDAStream()>>>( grad_thresholds.data<scalar_t>() + ch_idx, grad_output.data<scalar_t>() + init_element_offset, input.data<scalar_t>() + init_element_offset, scale.data<scalar_t>(), dev_tmp.data<scalar_t>(), dev_last_block_counter.data<int>(), total_elements_per_threshold, contiguous_elements_per_threshold, threshold_count, channel_offset); })); dev_tmp.fill_(0.0); dev_last_block_counter.fill_(0); } return {grad_input, grad_scale, grad_thresholds}; }
the_stack
#include <chrono> #include <cmath> #include <cstdio> #include <cstdlib> #include <cstring> #include <iostream> #include <hip/hip_runtime.h> #include "snap.h" #include "utils.cu" #if REFDATA_TWOJ == 14 #include "refdata_2J14_W.h" #elif REFDATA_TWOJ == 8 #include "refdata_2J8_W.h" #elif REFDATA_TWOJ == 4 #include "refdata_2J4_W.h" #else #include "refdata_2J2_W.h" #endif int nsteps = 1; // num of force evaluations __global__ void reset_ulisttot(COMPLEX *ulisttot, const int ulisttot_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < ulisttot_size) ulisttot[i] = {0.0, 0.0}; } __global__ void set_ulisttot(COMPLEX *ulisttot, const int* idxu_block, const int num_atoms, const int twojmax, const double wself) { int natom = blockIdx.x * blockDim.x + threadIdx.x; if (natom < num_atoms) for (int j = 0; j <= twojmax; j++) { int jju = idxu_block[j]; for (int ma = 0; ma <= j; ma++) { ulisttot[INDEX_2D(natom, jju)] = { wself, 0.0 }; jju += j + 2; } } } __global__ void update_ulisttot( const double* rij, const double* rcutij, const double* wj, const int* ulist_parity, const int* idxu_block, const double* rootpqarray, COMPLEX *ulist, COMPLEX *ulisttot, const int num_atoms, const int num_nbor, const int switch_flag, const int twojmax, const int jdimpq) { int natom = blockIdx.x * blockDim.x + threadIdx.x; int nbor = blockIdx.y * blockDim.y + threadIdx.y; if (natom < num_atoms && nbor < num_nbor) { double x = rij[ULIST_INDEX(natom, nbor, 0)]; double y = rij[ULIST_INDEX(natom, nbor, 1)]; double z = rij[ULIST_INDEX(natom, nbor, 2)]; double rsq = x * x + y * y + z * z; double r = sqrt(rsq); double theta0 = (r - rmin0) * rfac0 * MY_PI / (rcutij[INDEX_2D(natom, nbor)] - rmin0); double z0 = r / tan(theta0); double rootpq; int jju, jjup; // compute Cayley-Klein parameters for unit quaternion double r0inv = 1.0 / sqrt(r * r + z0 * z0); double a_r = r0inv * z0; double a_i = -r0inv * z; double b_r = r0inv * y; double b_i = -r0inv * x; double sfac; sfac = compute_sfac(r, rcutij[INDEX_2D(natom, nbor)], switch_flag); sfac *= wj[INDEX_2D(natom, nbor)]; // Recursion relations // VMK Section 4.8.2 // u[j,ma,mb] = Sqrt((j-ma)/(j-mb)) a* u[j-1,ma,mb] // -Sqrt((ma)/(j-mb)) b* u[j-1,ma-1,mb] // u[j,ma,mb] = Sqrt((j-ma)/(mb)) b u[j-1,ma,mb-1] // Sqrt((ma)/(mb)) a u[j-1,ma-1,mb-1] // initialize first entry // initialize top row of each layer to zero ulist[ULIST_INDEX(natom, nbor, 0)].re = 1.0; ulist[ULIST_INDEX(natom, nbor, 0)].im = 0.0; // skip over right half of each uarray jju = 1; for (int j = 1; j <= twojmax; j++) { int deljju = j + 1; for (int mb = 0; 2 * mb <= j; mb++) { ulist[ULIST_INDEX(natom, nbor, jju)].re = 0.0; ulist[ULIST_INDEX(natom, nbor, jju)].im = 0.0; jju += deljju; } int ncolhalf = deljju / 2; jju += deljju * ncolhalf; } jju = 1; jjup = 0; for (int j = 1; j <= twojmax; j++) { int deljju = j + 1; int deljjup = j; int mb_max = (j + 1) / 2; int ma_max = j; int m_max = ma_max * mb_max; // fill in left side of matrix layer from previous layer for (int m_iter = 0; m_iter < m_max; ++m_iter) { int mb = m_iter / ma_max; int ma = m_iter % ma_max; double up_r = ulist[ULIST_INDEX(natom, nbor, jjup)].re; double up_i = ulist[ULIST_INDEX(natom, nbor, jjup)].im; rootpq = rootpqarray[ROOTPQ_INDEX(j - ma, j - mb)]; ulist[ULIST_INDEX(natom, nbor, jju)].re += rootpq * (a_r * up_r + a_i * up_i); ulist[ULIST_INDEX(natom, nbor, jju)].im += rootpq * (a_r * up_i - a_i * up_r); rootpq = rootpqarray[ROOTPQ_INDEX(ma + 1, j - mb)]; ulist[ULIST_INDEX(natom, nbor, jju+1)].re = -rootpq * (b_r * up_r + b_i * up_i); ulist[ULIST_INDEX(natom, nbor, jju+1)].im = -rootpq * (b_r * up_i - b_i * up_r); // assign middle column i.e. mb+1 if (2 * (mb + 1) == j) { rootpq = rootpqarray[ROOTPQ_INDEX(j - ma, mb + 1)]; ulist[ULIST_INDEX(natom, nbor, jju+deljju)].re += rootpq * (b_r * up_r - b_i * up_i); ulist[ULIST_INDEX(natom, nbor, jju+deljju)].im += rootpq * (b_r * up_i + b_i * up_r); rootpq = rootpqarray[ROOTPQ_INDEX(ma + 1, mb + 1)]; ulist[ULIST_INDEX(natom, nbor, jju+deljju+1)].re = rootpq * (a_r * up_r - a_i * up_i); ulist[ULIST_INDEX(natom, nbor, jju+deljju+1)].im = rootpq * (a_r * up_i + a_i * up_r); } jju++; jjup++; if (ma == ma_max - 1) jju++; } // copy left side to right side with inversion symmetry VMK 4.4(2) // u[ma-j][mb-j] = (-1)^(ma-mb)*Conj([u[ma][mb]) // dependence on idxu_block could be removed // renamed counters b/c can not modify jju, jjup int jjui = idxu_block[j]; int jjuip = jjui + (j + 1) * (j + 1) - 1; for (int mb = 0; 2 * mb < j; mb++) { for (int ma = 0; ma <= j; ma++) { ulist[ULIST_INDEX(natom, nbor, jjuip)].re = ulist_parity[jjui] * ulist[ULIST_INDEX(natom, nbor, jjui)].re; ulist[ULIST_INDEX(natom, nbor, jjuip)].im = ulist_parity[jjui] * -ulist[ULIST_INDEX(natom, nbor, jjui)].im; jjui++; jjuip--; } } // skip middle and right half cols // b/c no longer using idxu_block if (j % 2 == 0) jju += deljju; int ncolhalf = deljju / 2; jju += deljju * ncolhalf; int ncolhalfp = deljjup / 2; jjup += deljjup * ncolhalfp; } sfac = compute_sfac(r, rcutij[INDEX_2D(natom, nbor)], switch_flag); sfac *= wj[INDEX_2D(natom, nbor)]; for (int j = 0; j <= twojmax; j++) { int jju = idxu_block[j]; for (int mb = 0; mb <= j; mb++) for (int ma = 0; ma <= j; ma++) { atomicAdd(&(ulisttot[INDEX_2D(natom, jju)].re), sfac * ulist[ULIST_INDEX(natom, nbor, jju)].re); atomicAdd(&(ulisttot[INDEX_2D(natom, jju)].im), sfac * ulist[ULIST_INDEX(natom, nbor, jju)].im); jju++; } } } } __global__ void reset_ylist(COMPLEX *ylist, const int ylist_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < ylist_size) ylist[i] = {0.0, 0.0}; } __global__ void compute_yi ( const int* idxz, const double* idxzbeta, const double* cglist, const int* idxcg_block, const int* idxu_block, const int* idxdu_block, const COMPLEX* ulisttot, COMPLEX* ylist, const int num_atoms, const int idxz_max, const int jdim) { int natom = blockIdx.x * blockDim.x + threadIdx.x; int jjz = blockIdx.y * blockDim.y + threadIdx.y; if (jjz < idxz_max && natom < num_atoms) { const int j1 = idxz[IDXZ_INDEX(jjz, 0)]; const int j2 = idxz[IDXZ_INDEX(jjz, 1)]; const int j = idxz[IDXZ_INDEX(jjz, 2)]; const int ma1min = idxz[IDXZ_INDEX(jjz, 3)]; const int ma2max = idxz[IDXZ_INDEX(jjz, 4)]; const int na = idxz[IDXZ_INDEX(jjz, 5)]; const int mb1min = idxz[IDXZ_INDEX(jjz, 6)]; const int mb2max = idxz[IDXZ_INDEX(jjz, 7)]; const int nb = idxz[IDXZ_INDEX(jjz, 8)]; const double betaj = idxzbeta[jjz]; const double* cgblock = cglist + idxcg_block[j1 + jdim*j2 + jdim*jdim*j]; int mb = (2 * (mb1min + mb2max) - j1 - j2 + j) / 2; int ma = (2 * (ma1min + ma2max) - j1 - j2 + j) / 2; const int jjdu = idxdu_block[j] + (j + 1) * mb + ma; int jju1 = idxu_block[j1] + (j1 + 1) * mb1min; int jju2 = idxu_block[j2] + (j2 + 1) * mb2max; int icgb = mb1min * (j2 + 1) + mb2max; double ztmp_r = 0.0; double ztmp_i = 0.0; // loop over columns of u1 and corresponding // columns of u2 satisfying Clebsch-Gordan constraint // 2*mb-j = 2*mb1-j1 + 2*mb2-j2 for (int ib = 0; ib < nb; ib++) { double suma1_r = 0.0; double suma1_i = 0.0; int ma1 = ma1min; int ma2 = ma2max; int icga = ma1min * (j2 + 1) + ma2max; // loop over elements of row u1[mb1] and corresponding elements // of row u2[mb2] satisfying Clebsch-Gordan constraint // 2*ma-j = 2*ma1-j1 + 2*ma2-j2 for (int ia = 0; ia < na; ia++) { suma1_r += cgblock[icga] * (ulisttot[INDEX_2D(natom, jju1 + ma1)].re * ulisttot[INDEX_2D(natom, jju2 + ma2)].re - ulisttot[INDEX_2D(natom, jju1 + ma1)].im * ulisttot[INDEX_2D(natom, jju2 + ma2)].im); suma1_i += cgblock[icga] * (ulisttot[INDEX_2D(natom, jju1 + ma1)].re * ulisttot[INDEX_2D(natom, jju2 + ma2)].im + ulisttot[INDEX_2D(natom, jju1 + ma1)].im * ulisttot[INDEX_2D(natom, jju2 + ma2)].re); ma1++; ma2--; icga += j2; } // end loop over ia ztmp_r += cgblock[icgb] * suma1_r; ztmp_i += cgblock[icgb] * suma1_i; jju1 += j1 + 1; jju2 -= j2 + 1; icgb += j2; } // end loop over ib // apply z(j1,j2,j,ma,mb) to unique element of y(j) atomicAdd(&(ylist[INDEX_2D(natom, jjdu)].re), betaj * ztmp_r); atomicAdd(&(ylist[INDEX_2D(natom, jjdu)].im), betaj * ztmp_i); } // end jjz and natom loop } __global__ void compute_duidrj ( const double *wj, const double *rij, const double *rcutij, const double* rootpqarray, const COMPLEX* ulist, COMPLEX* dulist, const int num_atoms, const int num_nbor, const int twojmax, const int idxdu_max, const int jdimpq, const int switch_flag) { int natom = blockIdx.x * blockDim.x + threadIdx.x; int nbor = blockIdx.y * blockDim.y + threadIdx.y; if (natom < num_atoms && nbor < num_nbor) { double wj_in = wj[INDEX_2D(natom, nbor)]; double rcut = rcutij[INDEX_2D(natom, nbor)]; double x = rij[ULIST_INDEX(natom, nbor, 0)]; double y = rij[ULIST_INDEX(natom, nbor, 1)]; double z = rij[ULIST_INDEX(natom, nbor, 2)]; double rsq = x * x + y * y + z * z; double r = sqrt(rsq); double rscale0 = rfac0 * MY_PI / (rcut - rmin0); double theta0 = (r - rmin0) * rscale0; double cs = cos(theta0); double sn = sin(theta0); double z0 = r * cs / sn; double dz0dr = z0 / r - (r * rscale0) * (rsq + z0 * z0) / rsq; compute_duarray(natom, nbor, num_atoms, num_nbor, twojmax, idxdu_max, jdimpq, switch_flag, x, y, z, z0, r, dz0dr, wj_in, rcut, rootpqarray, ulist, dulist); } } __global__ void compute_deidrj( const int* idxdu_block, const COMPLEX* dulist, const COMPLEX* ylist, double* dedr, const int num_atoms, const int num_nbor, const int twojmax, const int idxdu_max) { int natom = blockIdx.x * blockDim.x + threadIdx.x; int nbor = blockIdx.y * blockDim.y + threadIdx.y; if (natom < num_atoms && nbor < num_nbor) { for (int k = 0; k < 3; k++) dedr[ULIST_INDEX(natom, nbor, k)] = 0.0; for (int j = 0; j <= twojmax; j++) { int jjdu = idxdu_block[j]; for (int mb = 0; 2 * mb < j; mb++) for (int ma = 0; ma <= j; ma++) { double jjjmambyarray_r = ylist[INDEX_2D(natom, jjdu)].re; double jjjmambyarray_i = ylist[INDEX_2D(natom, jjdu)].im; for (int k = 0; k < 3; k++) dedr[ULIST_INDEX(natom, nbor, k)] += dulist[DULIST_INDEX(natom, nbor, jjdu, k)].re * jjjmambyarray_r + dulist[DULIST_INDEX(natom, nbor, jjdu, k)].im * jjjmambyarray_i; jjdu++; } // end loop over ma mb // For j even, handle middle column if (j % 2 == 0) { int mb = j / 2; for (int ma = 0; ma < mb; ma++) { double jjjmambyarray_r = ylist[INDEX_2D(natom, jjdu)].re; double jjjmambyarray_i = ylist[INDEX_2D(natom, jjdu)].im; for (int k = 0; k < 3; k++) dedr[ULIST_INDEX(natom, nbor, k)] += dulist[DULIST_INDEX(natom, nbor, jjdu, k)].re * jjjmambyarray_r + dulist[DULIST_INDEX(natom, nbor, jjdu, k)].im * jjjmambyarray_i; jjdu++; } double jjjmambyarray_r = ylist[INDEX_2D(natom, jjdu)].re; double jjjmambyarray_i = ylist[INDEX_2D(natom, jjdu)].im; for (int k = 0; k < 3; k++) dedr[ULIST_INDEX(natom, nbor, k)] += (dulist[DULIST_INDEX(natom, nbor, jjdu, k)].re * jjjmambyarray_r + dulist[DULIST_INDEX(natom, nbor, jjdu, k)].im * jjjmambyarray_i) * 0.5; jjdu++; } // end if jeven } // end loop over j for (int k = 0; k < 3; k++) dedr[ULIST_INDEX(natom, nbor, k)] *= 2.0; } } int main(int argc, char* argv[]) { options(argc, argv); const int switch_flag = 1; // SNAP parameter // record timings of individual routines double elapsed_ui = 0.0, elapsed_yi = 0.0, elapsed_duidrj = 0.0, elapsed_deidrj = 0.0; const int ninside = refdata.ninside; const int ncoeff = refdata.ncoeff; const int nlocal = refdata.nlocal; const int nghost = refdata.nghost; const int ntotal = nlocal + nghost; const int twojmax = refdata.twojmax; const double rcutfac = refdata.rcutfac; const double wself = 1.0; const int num_atoms = nlocal; const int num_nbor = ninside; //coeffi = memory->grow(coeffi, ncoeff + 1, "coeffi"); double* coeffi = (double*) malloc (sizeof(double) * (ncoeff+1)); for (int icoeff = 0; icoeff < ncoeff + 1; icoeff++) coeffi[icoeff] = refdata.coeff[icoeff]; double* beta = coeffi + 1; // build index list const int jdim = twojmax + 1; // index list for cglist int *idxcg_block = (int*) malloc(sizeof(int) * jdim * jdim * jdim); int idxcg_count = 0; for (int j1 = 0; j1 <= twojmax; j1++) for (int j2 = 0; j2 <= j1; j2++) for (int j = abs(j1 - j2); j <= MIN(twojmax, j1 + j2); j += 2) { idxcg_block[j1 + j2 *jdim + jdim*jdim*j] = idxcg_count; for (int m1 = 0; m1 <= j1; m1++) for (int m2 = 0; m2 <= j2; m2++) idxcg_count++; } const int idxcg_max = idxcg_count; // index list for uarray // need to include both halves // **** only place rightside is used is in compute_yi() *** int* idxu_block = (int*) malloc (sizeof(int) * jdim); int idxu_count = 0; for (int j = 0; j <= twojmax; j++) { idxu_block[j] = idxu_count; for (int mb = 0; mb <= j; mb++) for (int ma = 0; ma <= j; ma++) idxu_count++; } const int idxu_max = idxu_count; // parity list for uarray inversion symmetry // parity +1: u[ma-j][mb-j] = +Conj([u[ma][mb]) // parity -1: u[ma-j][mb-j] = -Conj([u[ma][mb]) // ulist_parity.resize(idxu_max); int* ulist_parity = (int*) malloc (sizeof(int) * idxu_max); idxu_count = 0; for (int j = 0; j <= twojmax; j++) { int mbpar = 1; for (int mb = 0; mb <= j; mb++) { int mapar = mbpar; for (int ma = 0; ma <= j; ma++) { ulist_parity[idxu_count] = mapar; mapar = -mapar; idxu_count++; } mbpar = -mbpar; } } // index list for duarray, yarray // only include left half // NOTE: idxdu indicates lefthalf only // idxu indicates both halves int* idxdu_block = (int*) malloc (sizeof(int) * jdim); int idxdu_count = 0; for (int j = 0; j <= twojmax; j++) { idxdu_block[j] = idxdu_count; for (int mb = 0; 2 * mb <= j; mb++) for (int ma = 0; ma <= j; ma++) idxdu_count++; } const int idxdu_max = idxdu_count; // index list for beta and B int idxb_count = 0; for (int j1 = 0; j1 <= twojmax; j1++) for (int j2 = 0; j2 <= j1; j2++) for (int j = abs(j1 - j2); j <= MIN(twojmax, j1 + j2); j += 2) if (j >= j1) idxb_count++; const int idxb_max = idxb_count; SNA_BINDICES* idxb = (SNA_BINDICES*) malloc (sizeof(SNA_BINDICES) * idxb_max); idxb_count = 0; for (int j1 = 0; j1 <= twojmax; j1++) for (int j2 = 0; j2 <= j1; j2++) for (int j = abs(j1 - j2); j <= MIN(twojmax, j1 + j2); j += 2) if (j >= j1) { idxb[idxb_count].j1 = j1; idxb[idxb_count].j2 = j2; idxb[idxb_count].j = j; idxb_count++; } // reverse index list for beta and b int* idxb_block = (int*) malloc (sizeof(int) * jdim * jdim * jdim); idxb_count = 0; for (int j1 = 0; j1 <= twojmax; j1++) for (int j2 = 0; j2 <= j1; j2++) for (int j = abs(j1 - j2); j <= MIN(twojmax, j1 + j2); j += 2) { if (j < j1) continue; idxb_block[j1*jdim*jdim+j2*jdim+j] = idxb_count; idxb_count++; } // index list for zlist int idxz_count = 0; for (int j1 = 0; j1 <= twojmax; j1++) for (int j2 = 0; j2 <= j1; j2++) for (int j = abs(j1 - j2); j <= MIN(twojmax, j1 + j2); j += 2) for (int mb = 0; 2 * mb <= j; mb++) for (int ma = 0; ma <= j; ma++) idxz_count++; const int idxz_max = idxz_count; //idxz.resize(idxz_max, 9); int* idxz = (int*) malloc (sizeof(int) * idxz_max * 9); //idxzbeta.resize(idxz_max); double* idxzbeta = (double*) malloc (sizeof(double) * idxz_max); //memory->create(idxz_block, jdim, jdim, jdim, "sna:idxz_block"); int* idxz_block = (int*) malloc (sizeof(int) * jdim * jdim * jdim); idxz_count = 0; for (int j1 = 0; j1 <= twojmax; j1++) for (int j2 = 0; j2 <= j1; j2++) for (int j = abs(j1 - j2); j <= MIN(twojmax, j1 + j2); j += 2) { idxz_block[j1*jdim*jdim+j2*jdim+j] = idxz_count; // find right beta[jjb] entries // multiply and divide by j+1 factors // account for multiplicity of 1, 2, or 3 // this should not be computed here double betaj; if (j >= j1) { const int jjb = idxb_block[j1*jdim*jdim+j2*jdim+j]; if (j1 == j) { if (j2 == j) { betaj = 3 * beta[jjb]; } else { betaj = 2 * beta[jjb]; } } else { betaj = beta[jjb]; } } else if (j >= j2) { const int jjb = idxb_block[j*jdim*jdim+j2*jdim+j1]; if (j2 == j) { betaj = 2 * beta[jjb] * (j1 + 1) / (j + 1.0); } else { betaj = beta[jjb] * (j1 + 1) / (j + 1.0); } } else { const int jjb = idxb_block[j2*jdim*jdim+j*jdim+j1]; betaj = beta[jjb] * (j1 + 1) / (j + 1.0); } for (int mb = 0; 2 * mb <= j; mb++) for (int ma = 0; ma <= j; ma++) { idxz[IDXZ_INDEX(idxz_count, 0)] = j1; idxz[IDXZ_INDEX(idxz_count, 1)] = j2; idxz[IDXZ_INDEX(idxz_count, 2)] = j; int ma1min = MAX(0, (2 * ma - j - j2 + j1) / 2); idxz[IDXZ_INDEX(idxz_count, 3)] = ma1min; idxz[IDXZ_INDEX(idxz_count, 4)] = (2 * ma - j - (2 * ma1min - j1) + j2) / 2; idxz[IDXZ_INDEX(idxz_count, 5)] = MIN(j1, (2 * ma - j + j2 + j1) / 2) - ma1min + 1; int mb1min = MAX(0, (2 * mb - j - j2 + j1) / 2); idxz[IDXZ_INDEX(idxz_count, 6)] = mb1min; idxz[IDXZ_INDEX(idxz_count, 7)] = (2 * mb - j - (2 * mb1min - j1) + j2) / 2; idxz[IDXZ_INDEX(idxz_count, 8)] = MIN(j1, (2 * mb - j + j2 + j1) / 2) - mb1min + 1; idxzbeta[idxz_count] = betaj; idxz_count++; } } // omit beta0 from beta vector if (compute_ncoeff(twojmax) != ncoeff) { printf("ERROR: ncoeff from SNA does not match reference data\n"); exit(1); } //snaptr->grow_rij(ninside); double *rij = (double*) malloc(sizeof(double) * (num_atoms * num_nbor * 3)); double *inside = (double*) malloc(sizeof(double) * (num_atoms * num_nbor)); double *wj = (double*) malloc(sizeof(double) * (num_atoms * num_nbor)); double *rcutij = (double*) malloc(sizeof(double) * (num_atoms * num_nbor)); const int jdimpq = twojmax + 2; double* rootpqarray = (double*) malloc(sizeof(double) * jdimpq * jdimpq); double* cglist = (double*) malloc (sizeof(double) * idxcg_max); double* dedr = (double*) malloc (sizeof(double) * num_atoms * num_nbor * 3); COMPLEX* ulist = (COMPLEX*) malloc (sizeof(COMPLEX) * num_atoms * num_nbor * idxu_max); COMPLEX* ylist = (COMPLEX*) malloc (sizeof(COMPLEX) * num_atoms * idxdu_max); COMPLEX* ulisttot = (COMPLEX*) malloc (sizeof(COMPLEX) * num_atoms * idxu_max); COMPLEX* dulist = (COMPLEX*) malloc (sizeof(COMPLEX) * num_atoms * num_nbor * 3 * idxdu_max); // init rootpqarray for (int p = 1; p <= twojmax; p++) for (int q = 1; q <= twojmax; q++) rootpqarray[ROOTPQ_INDEX(p, q)] = sqrt(static_cast<double>(p) / q); // init_clebsch_gordan() double sum, dcg, sfaccg; int m, aa2, bb2, cc2; int ifac; idxcg_count = 0; for (int j1 = 0; j1 <= twojmax; j1++) for (int j2 = 0; j2 <= j1; j2++) for (int j = abs(j1 - j2); j <= MIN(twojmax, j1 + j2); j += 2) { for (int m1 = 0; m1 <= j1; m1++) { aa2 = 2 * m1 - j1; for (int m2 = 0; m2 <= j2; m2++) { // -c <= cc <= c bb2 = 2 * m2 - j2; m = (aa2 + bb2 + j) / 2; if (m < 0 || m > j) { cglist[idxcg_count] = 0.0; idxcg_count++; continue; } sum = 0.0; for (int z = MAX(0, MAX(-(j - j2 + aa2) / 2, -(j - j1 - bb2) / 2)); z <= MIN((j1 + j2 - j) / 2, MIN((j1 - aa2) / 2, (j2 + bb2) / 2)); z++) { ifac = z % 2 ? -1 : 1; sum += ifac / (factorial(z) * factorial((j1 + j2 - j) / 2 - z) * factorial((j1 - aa2) / 2 - z) * factorial((j2 + bb2) / 2 - z) * factorial((j - j2 + aa2) / 2 + z) * factorial((j - j1 - bb2) / 2 + z)); } cc2 = 2 * m - j; dcg = deltacg(j1, j2, j); sfaccg = sqrt( factorial((j1 + aa2) / 2) * factorial((j1 - aa2) / 2) * factorial((j2 + bb2) / 2) * factorial((j2 - bb2) / 2) * factorial((j + cc2) / 2) * factorial((j - cc2) / 2) * (j + 1)); cglist[idxcg_count] = sum * dcg * sfaccg; idxcg_count++; } } } double* f = (double*) malloc (sizeof(double) * ntotal * 3); // initialize error tally double sumsqferr = 0.0; int* d_idxu_block; hipMalloc((void**)&d_idxu_block, sizeof(int)*jdim); hipMemcpy(d_idxu_block, idxu_block, sizeof(int)*jdim, hipMemcpyHostToDevice); int* d_ulist_parity; hipMalloc((void**)&d_ulist_parity, sizeof(int)*idxu_max); hipMemcpy(d_ulist_parity, ulist_parity, sizeof(int)*idxu_max, hipMemcpyHostToDevice); double* d_rootpqarray; hipMalloc((void**)&d_rootpqarray, sizeof(double)*jdimpq*jdimpq); hipMemcpy(d_rootpqarray, rootpqarray, sizeof(double)*jdimpq*jdimpq, hipMemcpyHostToDevice); int* d_idxz; hipMalloc((void**)&d_idxz, sizeof(int)*idxz_max*9); hipMemcpy(d_idxz, idxz, sizeof(int)*idxz_max*9, hipMemcpyHostToDevice); double* d_idxzbeta; hipMalloc((void**)&d_idxzbeta, sizeof(double)*idxz_max); hipMemcpy(d_idxzbeta, idxzbeta, sizeof(double)*idxz_max, hipMemcpyHostToDevice); int* d_idxcg_block; hipMalloc((void**)&d_idxcg_block, sizeof(int)*jdim*jdim*jdim); hipMemcpy(d_idxcg_block, idxcg_block, sizeof(int)*jdim*jdim*jdim, hipMemcpyHostToDevice); int* d_idxdu_block; hipMalloc((void**)&d_idxdu_block, sizeof(int)*jdim); hipMemcpy(d_idxdu_block, idxdu_block, sizeof(int)*jdim, hipMemcpyHostToDevice); double* d_cglist; hipMalloc((void**)&d_cglist, sizeof(double)*idxcg_max); hipMemcpy(d_cglist, cglist, sizeof(double)*idxcg_max, hipMemcpyHostToDevice); COMPLEX* d_dulist; hipMalloc((void**)&d_dulist, sizeof(COMPLEX)*num_atoms*num_nbor*3*idxdu_max); hipMemcpy(d_dulist, dulist, sizeof(COMPLEX)*num_atoms*num_nbor*3*idxdu_max, hipMemcpyHostToDevice); COMPLEX* d_ulist; hipMalloc((void**)&d_ulist, sizeof(COMPLEX)*num_atoms*num_nbor*idxu_max); hipMemcpy(d_ulist, ulist, sizeof(COMPLEX)*num_atoms*num_nbor*idxu_max, hipMemcpyHostToDevice); double* d_dedr; hipMalloc((void**)&d_dedr, sizeof(double)*num_atoms*num_nbor*3); hipMemcpy(d_dedr, dedr, sizeof(double)*num_atoms*num_nbor*3, hipMemcpyHostToDevice); COMPLEX* d_ulisttot; hipMalloc((void**)&d_ulisttot, sizeof(COMPLEX)*num_atoms*idxu_max); COMPLEX* d_ylist; hipMalloc((void**)&d_ylist, sizeof(COMPLEX)*num_atoms*idxdu_max); double *d_rij; hipMalloc((void**)&d_rij, sizeof(double)*num_atoms*num_nbor*3); double *d_rcutij; hipMalloc((void**)&d_rcutij, sizeof(double)*num_atoms*num_nbor); double *d_wj; hipMalloc((void**)&d_wj, sizeof(double)*num_atoms*num_nbor); // loop over steps auto begin = myclock::now(); for (int istep = 0; istep < nsteps; istep++) { time_point<system_clock> start, end; duration<double> elapsed; for (int j = 0; j < ntotal * 3; j++) { f[j] = 0.0; } int jt = 0, jjt = 0; for (int natom = 0; natom < num_atoms; natom++) { for (int nbor = 0; nbor < num_nbor; nbor++) { // n1 = num_atoms, n2 = num_nbor, n3 = 3 rij[ULIST_INDEX(natom, nbor, 0)] = refdata.rij[jt++]; rij[ULIST_INDEX(natom, nbor, 1)] = refdata.rij[jt++]; rij[ULIST_INDEX(natom, nbor, 2)] = refdata.rij[jt++]; inside[INDEX_2D(natom, nbor)] = refdata.jlist[jjt++]; wj[INDEX_2D(natom, nbor)] = 1.0; rcutij[INDEX_2D(natom, nbor)] = rcutfac; } } hipMemcpy(d_rij, rij, sizeof(double)*num_atoms*num_nbor*3, hipMemcpyHostToDevice); hipMemcpy(d_rcutij, rcutij, sizeof(double)*num_atoms*num_nbor, hipMemcpyHostToDevice); hipMemcpy(d_wj, wj, sizeof(double)*num_atoms*num_nbor, hipMemcpyHostToDevice); // compute_ui start = system_clock::now(); // compute_ui(); // utot(j,ma,mb) = 0 for all j,ma,ma // utot(j,ma,ma) = 1 for all j,ma // for j in neighbors of i: // compute r0 = (x,y,z,z0) // utot(j,ma,mb) += u(r0;j,ma,mb) for all j,ma,mb dim3 grid_k1 ((num_atoms*idxu_max+255)/256); dim3 block_k1 (256); hipLaunchKernelGGL(reset_ulisttot, dim3(grid_k1), dim3(block_k1), 0, 0, d_ulisttot, num_atoms*idxu_max); dim3 grid_k2 ((num_atoms+255)/256); dim3 block_k2 (256); hipLaunchKernelGGL(set_ulisttot, dim3(grid_k2), dim3(block_k2), 0, 0, d_ulisttot, d_idxu_block, num_atoms, twojmax, wself); dim3 grid_k3 ((num_atoms+15)/16, (num_nbor+15)/16); dim3 block_k3 (16, 16); hipLaunchKernelGGL(update_ulisttot, dim3(grid_k3), dim3(block_k3), 0, 0, d_rij, d_rcutij, d_wj, d_ulist_parity, d_idxu_block, d_rootpqarray, d_ulist, d_ulisttot, num_atoms, num_nbor, switch_flag, twojmax, jdimpq); end = system_clock::now(); elapsed = end - start; elapsed_ui += elapsed.count(); start = system_clock::now(); // Initialize ylist elements to zeros dim3 grid_k4 ((num_atoms*idxdu_max+255)/256); dim3 block_k4 (256); hipLaunchKernelGGL(reset_ylist, dim3(grid_k4), dim3(block_k4), 0, 0, d_ylist, num_atoms*idxdu_max); dim3 grid_k5 ((num_atoms+15)/16, (idxz_max+15)/16); dim3 block_k5 (16, 16); hipLaunchKernelGGL(compute_yi, dim3(grid_k5), dim3(block_k5), 0, 0, d_idxz, d_idxzbeta, d_cglist, d_idxcg_block, d_idxu_block, d_idxdu_block, d_ulisttot, d_ylist, num_atoms, idxz_max, jdim); end = system_clock::now(); elapsed = end - start; elapsed_yi += elapsed.count(); // compute_duidrj start = system_clock::now(); dim3 grid_k6 ((num_atoms+15)/16, (num_nbor+15)/16); dim3 block_k6 (16, 16); hipLaunchKernelGGL(compute_duidrj, dim3(grid_k6), dim3(block_k6), 0, 0, d_wj, d_rij, d_rcutij, d_rootpqarray, d_ulist, d_dulist, num_atoms, num_nbor, twojmax, idxdu_max, jdimpq, switch_flag); end = system_clock::now(); elapsed = end - start; elapsed_duidrj += elapsed.count(); start = system_clock::now(); // compute_deidrj(); dim3 grid_k7 ((num_atoms+15)/16, (num_nbor+15)/16); dim3 block_k7 (16, 16); hipLaunchKernelGGL(compute_deidrj, dim3(grid_k7), dim3(block_k7), 0, 0, d_idxdu_block, d_dulist, d_ylist, d_dedr, num_atoms, num_nbor, twojmax, idxdu_max); hipMemcpy(dedr, d_dedr, sizeof(double)*num_atoms*num_nbor*3, hipMemcpyDeviceToHost); end = system_clock::now(); elapsed = end - start; elapsed_deidrj += elapsed.count(); // Compute forces and error //compute_forces(snaptr); for (int natom = 0; natom < num_atoms; natom++) { for (int nbor = 0; nbor < num_nbor; nbor++) { int j = inside[INDEX_2D(natom, nbor)]; f[F_INDEX(natom, 0)] += dedr[ULIST_INDEX(natom, nbor, 0)]; f[F_INDEX(natom, 1)] += dedr[ULIST_INDEX(natom, nbor, 1)]; f[F_INDEX(natom, 2)] += dedr[ULIST_INDEX(natom, nbor, 2)]; f[F_INDEX(j, 0)] -= dedr[ULIST_INDEX(natom, nbor, 0)]; f[F_INDEX(j, 1)] -= dedr[ULIST_INDEX(natom, nbor, 1)]; f[F_INDEX(j, 2)] -= dedr[ULIST_INDEX(natom, nbor, 2)]; } // loop over neighbor forces } // loop over atoms // compute_error(snaptr); jt = 0; for (int j = 0; j < ntotal; j++) { double ferrx = f[F_INDEX(j, 0)] - refdata.fj[jt++]; double ferry = f[F_INDEX(j, 1)] - refdata.fj[jt++]; double ferrz = f[F_INDEX(j, 2)] - refdata.fj[jt++]; sumsqferr += ferrx * ferrx + ferry * ferry + ferrz * ferrz; } } auto stop = myclock::now(); myduration elapsed = stop - begin; printf("-----------------------\n"); printf("Summary of TestSNAP run\n"); printf("-----------------------\n"); printf("natoms = %d \n", nlocal); printf("nghostatoms = %d \n", nghost); printf("nsteps = %d \n", nsteps); printf("nneighs = %d \n", ninside); printf("twojmax = %d \n", twojmax); printf("duration = %g [sec]\n", elapsed.count()); printf("step time = %g [sec/step]\n", elapsed.count() / nsteps); printf("grind time = %g [msec/atom-step]\n", 1000.0 * elapsed.count() / (nlocal * nsteps)); printf("RMS |Fj| deviation %g [eV/A]\n", sqrt(sumsqferr / (ntotal * nsteps))); printf("\n Individual routine timings\n"); printf("compute_ui = %f\n", elapsed_ui); printf("compute_yi = %f\n", elapsed_yi); printf("compute_duidrj = %f\n", elapsed_duidrj); printf("compute_deidrj = %f\n", elapsed_deidrj); hipFree(d_idxu_block); hipFree(d_ulist_parity); hipFree(d_rootpqarray); hipFree(d_idxz); hipFree(d_idxzbeta); hipFree(d_idxcg_block); hipFree(d_idxdu_block); hipFree(d_cglist); hipFree(d_dulist); hipFree(d_ulist); hipFree(d_dedr); hipFree(d_ulisttot); hipFree(d_ylist); hipFree(d_rij); hipFree(d_rcutij); hipFree(d_wj); free(coeffi); free(idxcg_block); free(idxu_block); free(ulist_parity); free(idxdu_block); free(idxb); free(idxb_block); free(idxz); free(idxzbeta); free(idxz_block); free(rij); free(inside); free(wj); free(rcutij); free(rootpqarray); free(cglist); free(dedr); free(ulist); free(ylist); free(ulisttot); free(dulist); free(f); return 0; }
the_stack