text
stringlengths 2.5k
6.39M
| kind
stringclasses 3
values |
---|---|
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/tuple.h>
#include <stdgpu/contract.h>
#include <stdgpu/iterator.h>
#include <stdgpu/memory.h>
#include <stdgpu/utility.h>
namespace stdgpu
{
template <typename T, typename Allocator>
vector<T, Allocator>
vector<T, Allocator>::createDeviceObject(const index_t& capacity,
const Allocator& allocator)
{
STDGPU_EXPECTS(capacity > 0);
vector<T, Allocator> result(mutex_array<mutex_default_type, mutex_array_allocator_type>::createDeviceObject(capacity, mutex_array_allocator_type(allocator)),
bitset<bitset_default_type, bitset_allocator_type>::createDeviceObject(capacity, bitset_allocator_type(allocator)),
atomic<int, atomic_allocator_type>::createDeviceObject(atomic_allocator_type(allocator)),
allocator);
result._data = detail::createUninitializedDeviceArray<T, allocator_type>(result._allocator, capacity);
return result;
}
template <typename T, typename Allocator>
void
vector<T, Allocator>::destroyDeviceObject(vector<T, Allocator>& device_object)
{
if (!detail::is_allocator_destroy_optimizable<value_type, allocator_type>())
{
device_object.clear();
}
detail::destroyUninitializedDeviceArray<T, allocator_type>(device_object._allocator, device_object._data);
mutex_array<mutex_default_type, mutex_array_allocator_type>::destroyDeviceObject(device_object._locks);
bitset<bitset_default_type, bitset_allocator_type>::destroyDeviceObject(device_object._occupied);
atomic<int, atomic_allocator_type>::destroyDeviceObject(device_object._size);
}
template <typename T, typename Allocator>
inline
vector<T, Allocator>::vector(const mutex_array<mutex_default_type, mutex_array_allocator_type>& locks,
const bitset<bitset_default_type, bitset_allocator_type>& occupied,
const atomic<int, atomic_allocator_type>& size,
const Allocator& allocator)
: _locks(locks),
_occupied(occupied),
_size(size),
_allocator(allocator)
{
}
template <typename T, typename Allocator>
inline STDGPU_HOST_DEVICE typename vector<T, Allocator>::allocator_type
vector<T, Allocator>::get_allocator() const
{
return _allocator;
}
template <typename T, typename Allocator>
inline STDGPU_DEVICE_ONLY typename vector<T, Allocator>::reference
vector<T, Allocator>::at(const vector<T, Allocator>::index_type n)
{
return const_cast<vector<T, Allocator>::reference>(static_cast<const vector<T, Allocator>*>(this)->at(n));
}
template <typename T, typename Allocator>
inline STDGPU_DEVICE_ONLY typename vector<T, Allocator>::const_reference
vector<T, Allocator>::at(const vector<T, Allocator>::index_type n) const
{
STDGPU_EXPECTS(0 <= n);
STDGPU_EXPECTS(n < size());
STDGPU_EXPECTS(occupied(n));
return operator[](n);
}
template <typename T, typename Allocator>
inline STDGPU_DEVICE_ONLY typename vector<T, Allocator>::reference
vector<T, Allocator>::operator[](const vector<T, Allocator>::index_type n)
{
return const_cast<vector<T, Allocator>::reference>(static_cast<const vector<T, Allocator>*>(this)->operator[](n));
}
template <typename T, typename Allocator>
inline STDGPU_DEVICE_ONLY typename vector<T, Allocator>::const_reference
vector<T, Allocator>::operator[](const vector<T, Allocator>::index_type n) const
{
return _data[n];
}
template <typename T, typename Allocator>
inline STDGPU_DEVICE_ONLY typename vector<T, Allocator>::reference
vector<T, Allocator>::front()
{
return const_cast<reference>(static_cast<const vector<T, Allocator>*>(this)->front());
}
template <typename T, typename Allocator>
inline STDGPU_DEVICE_ONLY typename vector<T, Allocator>::const_reference
vector<T, Allocator>::front() const
{
return operator[](0);
}
template <typename T, typename Allocator>
inline STDGPU_DEVICE_ONLY typename vector<T, Allocator>::reference
vector<T, Allocator>::back()
{
return const_cast<reference>(static_cast<const vector<T, Allocator>*>(this)->back());
}
template <typename T, typename Allocator>
inline STDGPU_DEVICE_ONLY typename vector<T, Allocator>::const_reference
vector<T, Allocator>::back() const
{
return operator[](size() - 1);
}
template <typename T, typename Allocator>
template <class... Args>
inline STDGPU_DEVICE_ONLY bool
vector<T, Allocator>::emplace_back(Args&&... args)
{
return push_back(T(forward<Args>(args)...));
}
template <typename T, typename Allocator>
inline STDGPU_DEVICE_ONLY bool
vector<T, Allocator>::push_back(const T& element)
{
bool pushed = false;
// Preemptive check
if (full())
{
printf("stdgpu::vector::push_back : Object full\n");
return pushed;
}
index_t push_position = _size++;
// Check position
if (0 <= push_position && push_position < capacity())
{
while (!pushed)
{
if (_locks[push_position].try_lock())
{
// START --- critical section --- START
if (!occupied(push_position))
{
allocator_traits<allocator_type>::construct(_allocator, &(_data[push_position]), element);
bool was_occupied = _occupied.set(push_position);
pushed = true;
if (was_occupied)
{
printf("stdgpu::vector::push_back : Expected entry to be not occupied but actually was\n");
}
}
// END --- critical section --- END
_locks[push_position].unlock();
}
}
}
else
{
printf("stdgpu::vector::push_back : Index out of bounds: %" STDGPU_PRIINDEX " not in [0, %" STDGPU_PRIINDEX "]\n", push_position, capacity() - 1);
}
return pushed;
}
template <typename T, typename Allocator>
inline STDGPU_DEVICE_ONLY thrust::pair<T, bool>
vector<T, Allocator>::pop_back()
{
// Value if no element will be popped, i.e. undefined behavior for element of type T
thrust::pair<T, bool> popped = thrust::make_pair(_data[0], false);
// Preemptive check
if (empty())
{
printf("stdgpu::vector::pop_back : Object empty\n");
return popped;
}
index_t pop_position = --_size;
// Check position
if (0 <= pop_position && pop_position < capacity())
{
while (!popped.second)
{
if (_locks[pop_position].try_lock())
{
// START --- critical section --- START
if (occupied(pop_position))
{
bool was_occupied = _occupied.reset(pop_position);
allocator_traits<allocator_type>::construct(_allocator, &popped, _data[pop_position], true);
allocator_traits<allocator_type>::destroy(_allocator, &(_data[pop_position]));
if (!was_occupied)
{
printf("stdgpu::vector::pop_back : Expected entry to be occupied but actually was not\n");
}
}
// END --- critical section --- END
_locks[pop_position].unlock();
}
}
}
else
{
printf("stdgpu::vector::pop_back : Index out of bounds: %" STDGPU_PRIINDEX " not in [0, %" STDGPU_PRIINDEX "]\n", pop_position, capacity() - 1);
}
return popped;
}
namespace detail
{
template <typename T, typename Allocator, bool update_occupancy>
class vector_insert
{
public:
explicit vector_insert(const vector<T, Allocator>& v)
: _v(v)
{
}
template <typename Value>
STDGPU_DEVICE_ONLY void
operator()(const thrust::tuple<index_t, Value>& value)
{
allocator_traits<typename vector<T, Allocator>::allocator_type>::construct(_v._allocator, &(_v._data[thrust::get<0>(value)]), thrust::get<1>(value));
if (update_occupancy)
{
_v._occupied.set(thrust::get<0>(value));
}
}
private:
vector<T, Allocator> _v;
};
template <typename T, typename Allocator, bool update_occupancy>
class vector_erase
{
public:
explicit vector_erase(const vector<T, Allocator>& v)
: _v(v)
{
}
STDGPU_DEVICE_ONLY void
operator()(const index_t n)
{
allocator_traits<typename vector<T, Allocator>::allocator_type>::destroy(_v._allocator, &(_v._data[n]));
if (update_occupancy)
{
_v._occupied.reset(n);
}
}
private:
vector<T, Allocator> _v;
};
template <typename T, typename Allocator>
class vector_clear_fill
{
public:
explicit vector_clear_fill(const vector<T, Allocator>& v)
: _v(v)
{
}
template <typename ValueIterator, STDGPU_DETAIL_OVERLOAD_IF(detail::is_iterator<ValueIterator>::value)>
void
operator()(ValueIterator begin,
ValueIterator end)
{
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<index_t>(0), begin)),
thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<index_t>(_v.capacity()), end)),
detail::vector_insert<T, Allocator, false>(_v));
_v._occupied.set();
_v._size.store(_v.capacity());
}
private:
vector<T, Allocator> _v;
};
} // namespace detail
template <typename T, typename Allocator>
template <typename ValueIterator, STDGPU_DETAIL_OVERLOAD_DEFINITION_IF(detail::is_iterator<ValueIterator>::value)>
inline void
vector<T, Allocator>::insert(device_ptr<const T> position,
ValueIterator begin,
ValueIterator end)
{
if (position != device_end())
{
printf("stdgpu::vector::insert : Position not equal to device_end()\n");
return;
}
index_t new_size = size() + static_cast<index_t>(thrust::distance(begin, end));
if (new_size > capacity())
{
printf("stdgpu::vector::insert : Unable to insert all values: New size %" STDGPU_PRIINDEX " would exceed capacity %" STDGPU_PRIINDEX "\n", new_size, capacity());
return;
}
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<index_t>(size()), begin)),
thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<index_t>(new_size), end)),
detail::vector_insert<T, Allocator, true>(*this));
_size.store(new_size);
}
template <typename T, typename Allocator>
inline void
vector<T, Allocator>::erase(device_ptr<const T> begin,
device_ptr<const T> end)
{
if (end != device_end())
{
printf("stdgpu::vector::erase : End iterator not equal to device_end()\n");
return;
}
index_t new_size = size() - static_cast<index_t>(thrust::distance(begin, end));
if (new_size < 0)
{
printf("stdgpu::vector::erase : Unable to erase all values: New size %" STDGPU_PRIINDEX " would be invalid\n", new_size);
return;
}
thrust::for_each(thrust::counting_iterator<index_t>(new_size),
thrust::counting_iterator<index_t>(size()),
detail::vector_erase<T, Allocator, true>(*this));
_size.store(new_size);
}
template <typename T, typename Allocator>
inline STDGPU_HOST_DEVICE bool
vector<T, Allocator>::empty() const
{
return (size() == 0);
}
template <typename T, typename Allocator>
inline STDGPU_HOST_DEVICE bool
vector<T, Allocator>::full() const
{
return (size() == max_size());
}
template <typename T, typename Allocator>
inline STDGPU_HOST_DEVICE index_t
vector<T, Allocator>::size() const
{
index_t current_size = _size.load();
// Check boundary cases where the push/pop caused the pointers to be overful/underful
if (current_size < 0)
{
printf("stdgpu::vector::size : Size out of bounds: %" STDGPU_PRIINDEX " not in [0, %" STDGPU_PRIINDEX "]. Clamping to 0\n", current_size, capacity());
return 0;
}
if (current_size > capacity())
{
printf("stdgpu::vector::size : Size out of bounds: %" STDGPU_PRIINDEX " not in [0, %" STDGPU_PRIINDEX "]. Clamping to %" STDGPU_PRIINDEX "\n", current_size, capacity(), capacity());
return capacity();
}
STDGPU_ENSURES(current_size <= capacity());
return current_size;
}
template <typename T, typename Allocator>
inline STDGPU_HOST_DEVICE index_t
vector<T, Allocator>::max_size() const
{
return capacity();
}
template <typename T, typename Allocator>
inline STDGPU_HOST_DEVICE index_t
vector<T, Allocator>::capacity() const
{
return _occupied.size();
}
template <typename T, typename Allocator>
inline void
vector<T, Allocator>::shrink_to_fit()
{
// Reject request for performance reasons
}
template <typename T, typename Allocator>
inline const T*
vector<T, Allocator>::data() const
{
return _data;
}
template <typename T, typename Allocator>
inline T*
vector<T, Allocator>::data()
{
return _data;
}
template <typename T, typename Allocator>
inline void
vector<T, Allocator>::clear()
{
if (empty())
{
return;
}
if (!detail::is_allocator_destroy_optimizable<value_type, allocator_type>())
{
const index_t current_size = size();
stdgpu::detail::unoptimized_destroy(stdgpu::device_begin(_data), stdgpu::device_begin(_data) + current_size);
}
_occupied.reset();
_size.store(0);
STDGPU_ENSURES(empty());
STDGPU_ENSURES(valid());
}
template <typename T, typename Allocator>
inline bool
vector<T, Allocator>::valid() const
{
// Special case : Zero capacity is valid
if (capacity() == 0)
{
return true;
}
return (size_valid()
&& occupied_count_valid()
&& _locks.valid());
}
template <typename T, typename Allocator>
device_ptr<T>
vector<T, Allocator>::device_begin()
{
return stdgpu::device_begin(_data);
}
template <typename T, typename Allocator>
device_ptr<T>
vector<T, Allocator>::device_end()
{
return device_begin() + size();
}
template <typename T, typename Allocator>
device_ptr<const T>
vector<T, Allocator>::device_begin() const
{
return stdgpu::device_begin(_data);
}
template <typename T, typename Allocator>
device_ptr<const T>
vector<T, Allocator>::device_end() const
{
return device_begin() + size();
}
template <typename T, typename Allocator>
device_ptr<const T>
vector<T, Allocator>::device_cbegin() const
{
return stdgpu::device_cbegin(_data);
}
template <typename T, typename Allocator>
device_ptr<const T>
vector<T, Allocator>::device_cend() const
{
return device_cbegin() + size();
}
template <typename T, typename Allocator>
stdgpu::device_range<T>
vector<T, Allocator>::device_range()
{
return stdgpu::device_range<T>(_data, size());
}
template <typename T, typename Allocator>
stdgpu::device_range<const T>
vector<T, Allocator>::device_range() const
{
return stdgpu::device_range<const T>(_data, size());
}
template <typename T, typename Allocator>
inline STDGPU_DEVICE_ONLY bool
vector<T, Allocator>::occupied(const index_t n) const
{
STDGPU_EXPECTS(0 <= n);
STDGPU_EXPECTS(n < capacity());
return _occupied[n];
}
template <typename T, typename Allocator>
bool
vector<T, Allocator>::occupied_count_valid() const
{
index_t size_count = size();
index_t size_sum = _occupied.count();
return (size_count == size_sum);
}
template <typename T, typename Allocator>
bool
vector<T, Allocator>::size_valid() const
{
int current_size = _size.load();
return (0 <= current_size && current_size <= static_cast<int>(capacity()));
}
} // namespace stdgpu
#endif // STDGPU_VECTOR_DETAIL_H
|
the_stack
|
const int HIGHEST = 3;
const int ITER = 100;
const int WORKLOAD = 1;
int sizepernode;
// global var
float preScore = -99999999999.f;
float score = 0.f;
float maxScore[HIGHEST] = {-999999999.f};
bool orders[NODE_N][NODE_N];
bool preOrders[NODE_N][NODE_N];
bool preGraph[NODE_N][NODE_N];
bool bestGraph[HIGHEST][NODE_N][NODE_N];
bool graph[NODE_N][NODE_N];
float *localscore, *scores;
float *LG;
int *parents;
void initial(); // initial orders and data
int genOrders(); // swap
int ConCore(); // discard new order or not
// get every possible set of parents for a node
void incr(int *bit, int n); // binary code increases 1 each time
void incrS(int *bit, int n); // STATE_N code increases 1 each time
// get every possible combination of state for a parent set
bool getState( int parN, int *state, int time);
float logGamma(int N); // log and gamma
float findBestGraph(float* D_localscore, int* D_resP, float* D_Score, bool *D_parent);
void genScore();
void sortGraph();
void swap(int a, int b);
void Pre_logGamma();
int findindex(int *arr, int size);
int C(int n, int a);
FILE *fpout;
int main(int argc, char** argv) {
// save output in a file
fpout = fopen(argv[1], "w");
if (fpout == NULL) {
printf("Usage: ./%s <output file>\n", argv[0]);
return -1;
}
int i, j, c = 0, tmp, a, b;
float tmpd;
clock_t start, finish, total = 0, pre1, pre2;
printf("NODE_N=%d\nInitialization...\n", NODE_N);
srand(2);
initial(); // update sizepernode
scores = (float*) malloc ((sizepernode / (256 * WORKLOAD) + 1) * sizeof(float));
parents = (int*) malloc ((sizepernode / (256 * WORKLOAD) + 1) * 4 * sizeof(int));
Pre_logGamma();
int *D_data;
float *D_LG;
float *D_localscore;
float *D_Score;
bool *D_parent;
int *D_resP;
cudaMalloc((void **)&D_data, NODE_N * DATA_N * sizeof(int));
cudaMalloc((void **)&D_localscore, NODE_N * sizepernode * sizeof(float));
cudaMalloc((void **)&D_LG, (DATA_N + 2) * sizeof(float));
cudaMalloc((void **)&D_Score, (sizepernode / (256 * WORKLOAD) + 1) * sizeof(float));
cudaMalloc((void **)&D_parent, NODE_N * sizeof(bool));
cudaMalloc((void **)&D_resP, (sizepernode / (256 * WORKLOAD) + 1) * 4 * sizeof(int));
pre1 = clock();
dim3 grid(sizepernode / 256 + 1, 1, 1);
dim3 threads(256, 1, 1);
cudaMemset(D_localscore, 0.f, NODE_N * sizepernode * sizeof(float));
cudaMemcpy(D_data, data, NODE_N * DATA_N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(D_LG, LG, (DATA_N + 2) * sizeof(float), cudaMemcpyHostToDevice);
genScoreKernel<<<grid, threads>>>(sizepernode, D_localscore, D_data, D_LG);
cudaMemcpy(localscore, D_localscore, NODE_N * sizepernode * sizeof(float), cudaMemcpyDeviceToHost);
printf("Begin to generate orders.\n");
pre2 = clock();
i = 0;
while (i != ITER) {
start = clock();
i++;
score = 0;
for (a = 0; a < NODE_N; a++) {
for (j = 0; j < NODE_N; j++) {
orders[a][j] = preOrders[a][j];
}
}
tmp = rand() % 6;
for (j = 0; j < tmp; j++)
genOrders();
score = findBestGraph(D_localscore, D_resP, D_Score, D_parent);
finish = clock();
total += finish - start;
ConCore();
// store the top HIGHEST highest orders
if (c < HIGHEST) {
tmp = 1;
for (j = 0; j < c; j++) {
if (maxScore[j] == preScore) {
tmp = 0;
}
}
if (tmp != 0) {
maxScore[c] = preScore;
for (a = 0; a < NODE_N; a++) {
for (b = 0; b < NODE_N; b++) {
bestGraph[c][a][b] = preGraph[a][b];
}
}
c++;
}
} else if (c == HIGHEST) {
sortGraph();
c++;
} else {
tmp = 1;
for (j = 0; j < HIGHEST; j++) {
if (maxScore[j] == preScore) {
tmp = 0;
break;
}
}
if (tmp != 0 && preScore > maxScore[HIGHEST - 1]) {
maxScore[HIGHEST - 1] = preScore;
for (a = 0; a < NODE_N; a++) {
for (b = 0; b < NODE_N; b++) {
bestGraph[HIGHEST - 1][a][b] = preGraph[a][b];
}
}
b = HIGHEST - 1;
for (a = HIGHEST - 2; a >= 0; a--) {
if (maxScore[b] > maxScore[a]) {
swap(a, b);
tmpd = maxScore[a];
maxScore[a] = maxScore[b];
maxScore[b] = tmpd;
b = a;
}
}
}
}
} // endwhile
free(localscore);
free(scores);
free(parents);
free(LG);
cudaFree(D_LG);
cudaFree(D_data);
cudaFree(D_localscore);
cudaFree(D_parent);
cudaFree(D_Score);
cudaFree(D_resP);
for(j=0;j<HIGHEST;j++){
fprintf(fpout,"score:%f\n",maxScore[j]);
fprintf(fpout,"Best Graph:\n");
for(int a=0;a<NODE_N;a++){
for(int b=0;b<NODE_N;b++)
fprintf(fpout,"%d ",bestGraph[j][a][b]);
fprintf(fpout,"\n");
}
fprintf(fpout,"--------------------------------------------------------------------\n");
}
fprintf(fpout, "Duration per iteration is %f seconds.\n",
((float)total / ITER) / CLOCKS_PER_SEC);
fprintf(fpout, "Total duration is %f seconds.\n",
(float)(pre2 - pre1 + total) / CLOCKS_PER_SEC);
fprintf(fpout, "Preprocessing duration is %f seconds.\n",
(float)(pre2 - pre1) / CLOCKS_PER_SEC);
printf("Duration per iteration is %f seconds.\n",
((float)total / ITER) / CLOCKS_PER_SEC);
printf("Total duration is %f seconds.\n",
(float)(pre2 - pre1 + total) / CLOCKS_PER_SEC);
printf("Preprocessing duration is %f seconds.\n",
(float)(pre2 - pre1) / CLOCKS_PER_SEC);
return 0;
}
float findBestGraph(float* D_localscore, int* D_resP, float* D_Score, bool *D_parent) {
float bestls = -99999999.f;
int bestparent[5];
int bestpN, total;
int node, index;
int pre[NODE_N] = {0};
int parent[NODE_N] = {0};
int posN = 0, i, j, parN, tmp, k, l;
float ls = -99999999999.f, score = 0;
int blocknum;
for (i = 0; i < NODE_N; i++)
for (j = 0; j < NODE_N; j++)
graph[i][j] = 0;
for (node = 0; node < NODE_N; node++) {
bestls = -99999999.f;
posN = 0;
for (i = 0; i < NODE_N; i++) {
if (orders[node][i] == 1) {
pre[posN++] = i;
}
}
if (posN >= 0) {
total = C(posN, 4) + C(posN, 3) + C(posN, 2) + posN + 1;
blocknum = total / (256 * WORKLOAD) + 1;
cudaMemset(D_resP, 0, blocknum * 4 * sizeof(int));
cudaMemset(D_Score, -999999.f, blocknum * sizeof(float));
cudaMemcpy(D_parent, orders[node], NODE_N * sizeof(bool), cudaMemcpyHostToDevice);
computeKernel<<<blocknum, 256, 256 * sizeof(float)>>>(
WORKLOAD, sizepernode, D_localscore, D_parent, node, total, D_Score,
D_resP);
cudaMemcpy(parents, D_resP, blocknum * 4 * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(scores, D_Score, blocknum * sizeof(float), cudaMemcpyDeviceToHost);
for (i = 0; i < blocknum; i++) {
if (scores[i] > bestls) {
bestls = scores[i];
parN = 0;
for (tmp = 0; tmp < 4; tmp++) {
if (parents[i * 4 + tmp] < 0)
break;
bestparent[tmp] = parents[i * 4 + tmp];
parN++;
}
bestpN = parN;
}
}
} else {
if (posN >= 4) {
for (i = 0; i < posN; i++) {
for (j = i + 1; j < posN; j++) {
for (k = j + 1; k < posN; k++) {
for (l = k + 1; l < posN; l++) {
parN = 4;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
if (pre[j] > node)
parent[2] = pre[j];
else
parent[2] = pre[j] + 1;
if (pre[k] > node)
parent[3] = pre[k];
else
parent[3] = pre[k] + 1;
if (pre[l] > node)
parent[4] = pre[l];
else
parent[4] = pre[l] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
}
}
}
if (posN >= 3) {
for (i = 0; i < posN; i++) {
for (j = i + 1; j < posN; j++) {
for (k = j + 1; k < posN; k++) {
parN = 3;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
if (pre[j] > node)
parent[2] = pre[j];
else
parent[2] = pre[j] + 1;
if (pre[k] > node)
parent[3] = pre[k];
else
parent[3] = pre[k] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
}
}
if (posN >= 2) {
for (i = 0; i < posN; i++) {
for (j = i + 1; j < posN; j++) {
parN = 2;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
if (pre[j] > node)
parent[2] = pre[j];
else
parent[2] = pre[j] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
}
if (posN >= 1) {
for (i = 0; i < posN; i++) {
parN = 1;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
parN = 0;
index = sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = 0;
}
}
if (bestls > -99999999.f) {
for (i = 0; i < bestpN; i++) {
if (bestparent[i] < node)
graph[node][bestparent[i] - 1] = 1;
else
graph[node][bestparent[i]] = 1;
}
score += bestls;
}
}
return score;
}
void sortGraph() {
float max = -99999999999999.f;
int maxi, i, j;
float tmp;
for (j = 0; j < HIGHEST - 1; j++) {
max = maxScore[j];
maxi = j;
for (i = j + 1; i < HIGHEST; i++) {
if (maxScore[i] > max) {
max = maxScore[i];
maxi = i;
}
}
swap(j, maxi);
tmp = maxScore[j];
maxScore[j] = max;
maxScore[maxi] = tmp;
}
}
void swap(int a, int b) {
int i, j;
bool tmp;
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++) {
tmp = bestGraph[a][i][j];
bestGraph[a][i][j] = bestGraph[b][i][j];
bestGraph[b][i][j] = tmp;
}
}
}
void initial() {
int i, j, tmp, a, b, r;
bool tmpd;
tmp = 1;
for (i = 1; i <= 4; i++) {
tmp += C(NODE_N - 1, i);
}
sizepernode = tmp;
tmp *= NODE_N;
localscore = (float*) malloc(tmp * sizeof(float));
for (i = 0; i < tmp; i++)
localscore[i] = 0;
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++)
orders[i][j] = 0;
}
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < i; j++)
orders[i][j] = 1;
}
r = rand() % 10000;
for (i = 0; i < r; i++) {
a = rand() % NODE_N;
b = rand() % NODE_N;
for (j = 0; j < NODE_N; j++) {
tmpd = orders[j][a];
orders[j][a] = orders[j][b];
orders[j][b] = tmpd;
}
for (j = 0; j < NODE_N; j++) {
tmpd = orders[a][j];
orders[a][j] = orders[b][j];
orders[b][j] = tmpd;
}
}
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++) {
preOrders[i][j] = orders[i][j];
}
}
}
// generate ramdom order
int genOrders() {
int a, b, j;
bool tmp;
a = rand() % NODE_N;
b = rand() % NODE_N;
for (j = 0; j < NODE_N; j++) {
tmp = orders[a][j];
orders[a][j] = orders[b][j];
orders[b][j] = tmp;
}
for (j = 0; j < NODE_N; j++) {
tmp = orders[j][a];
orders[j][a] = orders[j][b];
orders[j][b] = tmp;
}
return 1;
}
// decide leave or discard an order
int ConCore() {
int i, j;
float tmp;
tmp = log((rand() % 100000) / 100000.0);
if (tmp < (score - preScore)) {
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++) {
preOrders[i][j] = orders[i][j];
preGraph[i][j] = graph[i][j];
}
}
preScore = score;
return 1;
}
return 0;
}
void genScore() {
}
void Pre_logGamma() {
LG = (float*) malloc ((DATA_N + 2) * sizeof(float));
LG[1] = log(1.0);
float i;
for (i = 2; i <= DATA_N + 1; i++) {
LG[(int)i] = LG[(int)i - 1] + log((float)i);
}
}
void incr(int *bit, int n) {
bit[n]++;
if (bit[n] >= 2) {
bit[n] = 0;
incr(bit, n + 1);
}
return;
}
void incrS(int *bit, int n) {
bit[n]++;
if (bit[n] >= STATE_N) {
bit[n] = 0;
incr(bit, n + 1);
}
return;
}
bool getState(int parN, int *state, int time) {
int j = 1;
j = pow(STATE_N, (float)parN) - 1;
if (time > j)
return false;
if (time >= 1)
incrS(state, 0);
return true;
}
int findindex(int *arr, int size) { // reminder: arr[0] has to be 0 && size ==
// array size-1 && index start from 0
int i, j, index = 0;
for (i = 1; i < size; i++) {
index += C(NODE_N - 1, i);
}
for (i = 1; i <= size - 1; i++) {
for (j = arr[i - 1] + 1; j <= arr[i] - 1; j++) {
index += C(NODE_N - 1 - j, size - i);
}
}
index += arr[size] - arr[size - 1];
return index;
}
int C(int n, int a) {
int i, res = 1, atmp = a;
for (i = 0; i < atmp; i++) {
res *= n;
n--;
}
for (i = 0; i < atmp; i++) {
res /= a;
a--;
}
return res;
}
|
the_stack
|
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/slice.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/cuda/utils/nd_index.cuh>
#include <nbla/cuda/utils/nd_index.hpp>
#include <nbla/variable.hpp>
namespace nbla {
// 1d slice
template <typename T>
__global__ void kernel_slice_1d_forward(const int size, const T *x, T *y,
const int start, const int step) {
NBLA_CUDA_KERNEL_LOOP(idx, size) { y[idx] = x[start + idx * step]; }
}
template <typename T, bool accum>
__global__ void kernel_slice_1d_backward(const int size, const T *dy, T *dx,
const int start, const int step) {
NBLA_CUDA_KERNEL_LOOP(idx, size) {
dx[start + idx * step] = accum ? dx[start + idx * step] + dy[idx] : dy[idx];
}
}
template <typename T>
void slice_1d_forward(const T *x_data, T *y_data, Size_t ndim, Size_t ysize,
const Shape_t &xshape, const Shape_t &yshape,
const Shape_t &xstrides, const Shape_t &ystrides,
const vector<int> &start, const vector<int> &step) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_slice_1d_forward, ysize, x_data, y_data,
start[0], step[0]);
}
template <typename T, bool accum>
void slice_1d_backward(const T *y_grad, T *x_grad, Size_t ndim, Size_t ysize,
const Shape_t &xshape, const Shape_t &yshape,
const Shape_t &xstrides, const Shape_t &ystrides,
const vector<int> &start, const vector<int> &step) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_slice_1d_backward<T, accum>), ysize,
y_grad, x_grad, start[0], step[0]);
}
// 2d slice
template <typename T>
__global__ void kernel_slice_2d_forward(const int size, const T *x, T *y,
const int xstrides, const int ystrides,
const int2 start, const int2 step) {
NBLA_CUDA_KERNEL_LOOP(yidx, size) {
auto nd_yidx = device_flat_to_2d(yidx, ystrides);
auto nd_xidx =
make_int2(start.x + nd_yidx.x * step.x, start.y + nd_yidx.y * step.y);
auto xidx = device_2d_to_flat(nd_xidx, xstrides);
y[yidx] = x[xidx];
}
}
template <typename T, bool accum>
__global__ void kernel_slice_2d_backward(const int size, const T *dy, T *dx,
const int xstrides, const int ystrides,
const int2 start, const int2 step) {
NBLA_CUDA_KERNEL_LOOP(yidx, size) {
auto nd_yidx = device_flat_to_2d(yidx, ystrides);
auto nd_xidx =
make_int2(start.x + nd_yidx.x * step.x, start.y + nd_yidx.y * step.y);
auto xidx = device_2d_to_flat(nd_xidx, xstrides);
dx[xidx] = accum ? dy[yidx] + dx[xidx] : dy[yidx];
}
}
template <typename T>
void slice_2d_forward(const T *x_data, T *y_data, Size_t ndim, Size_t ysize,
const Shape_t &xshape, const Shape_t &yshape,
const Shape_t &xstrides, const Shape_t &ystrides,
const vector<int> &start, const vector<int> &step) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_slice_2d_forward, ysize, x_data, y_data,
xstrides[0], ystrides[0], to_int2(start),
to_int2(step));
}
template <typename T, bool accum>
void slice_2d_backward(const T *y_grad, T *x_grad, Size_t ndim, Size_t ysize,
const Shape_t &xshape, const Shape_t &yshape,
const Shape_t &xstrides, const Shape_t &ystrides,
const vector<int> &start, const vector<int> &step) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_slice_2d_backward<T, accum>), ysize,
y_grad, x_grad, xstrides[0], ystrides[0],
to_int2(start), to_int2(step));
}
// 3d slice
template <typename T>
__global__ void kernel_slice_3d_forward(const int size, const T *x, T *y,
const int2 xstrides,
const int2 ystrides, const int3 start,
const int3 step) {
NBLA_CUDA_KERNEL_LOOP(yidx, size) {
auto nd_yidx = device_flat_to_3d(yidx, ystrides);
auto nd_xidx =
make_int3(start.x + nd_yidx.x * step.x, start.y + nd_yidx.y * step.y,
start.z + nd_yidx.z * step.z);
auto xidx = device_3d_to_flat(nd_xidx, xstrides);
y[yidx] = x[xidx];
}
}
template <typename T, bool accum>
__global__ void kernel_slice_3d_backward(const int size, const T *dy, T *dx,
const int2 xstrides,
const int2 ystrides, const int3 start,
const int3 step) {
NBLA_CUDA_KERNEL_LOOP(yidx, size) {
auto nd_yidx = device_flat_to_3d(yidx, ystrides);
auto nd_xidx =
make_int3(start.x + nd_yidx.x * step.x, start.y + nd_yidx.y * step.y,
start.z + nd_yidx.z * step.z);
auto xidx = device_3d_to_flat(nd_xidx, xstrides);
dx[xidx] = accum ? dy[yidx] + dx[xidx] : dy[yidx];
}
}
template <typename T>
void slice_3d_forward(const T *x_data, T *y_data, Size_t ndim, Size_t ysize,
const Shape_t &xshape, const Shape_t &yshape,
const Shape_t &xstrides, const Shape_t &ystrides,
const vector<int> &start, const vector<int> &step) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_slice_3d_forward, ysize, x_data, y_data,
to_int2(xstrides), to_int2(ystrides),
to_int3(start), to_int3(step));
}
template <typename T, bool accum>
void slice_3d_backward(const T *y_grad, T *x_grad, Size_t ndim, Size_t ysize,
const Shape_t &xshape, const Shape_t &yshape,
const Shape_t &xstrides, const Shape_t &ystrides,
const vector<int> &start, const vector<int> &step) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_slice_3d_backward<T, accum>), ysize, y_grad, x_grad,
to_int2(xstrides), to_int2(ystrides), to_int3(start), to_int3(step));
}
// 4d slice
template <typename T>
__global__ void kernel_slice_4d_forward(const int size, const T *x, T *y,
const int3 xstrides,
const int3 ystrides, const int4 start,
const int4 step) {
NBLA_CUDA_KERNEL_LOOP(yidx, size) {
auto nd_yidx = device_flat_to_4d(yidx, ystrides);
auto nd_xidx =
make_int4(start.x + nd_yidx.x * step.x, start.y + nd_yidx.y * step.y,
start.z + nd_yidx.z * step.z, start.w + nd_yidx.w * step.w);
auto xidx = device_4d_to_flat(nd_xidx, xstrides);
y[yidx] = x[xidx];
}
}
template <typename T, bool accum>
__global__ void kernel_slice_4d_backward(const int size, const T *dy, T *dx,
const int3 xstrides,
const int3 ystrides, const int4 start,
const int4 step) {
NBLA_CUDA_KERNEL_LOOP(yidx, size) {
auto nd_yidx = device_flat_to_4d(yidx, ystrides);
auto nd_xidx =
make_int4(start.x + nd_yidx.x * step.x, start.y + nd_yidx.y * step.y,
start.z + nd_yidx.z * step.z, start.w + nd_yidx.w * step.w);
auto xidx = device_4d_to_flat(nd_xidx, xstrides);
dx[xidx] = accum ? dy[yidx] + dx[xidx] : dy[yidx];
}
}
template <typename T>
void slice_4d_forward(const T *x_data, T *y_data, Size_t ndim, Size_t ysize,
const Shape_t &xshape, const Shape_t &yshape,
const Shape_t &xstrides, const Shape_t &ystrides,
const vector<int> &start, const vector<int> &step) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_slice_4d_forward, ysize, x_data, y_data,
to_int3(xstrides), to_int3(ystrides),
to_int4(start), to_int4(step));
}
template <typename T, bool accum>
void slice_4d_backward(const T *y_grad, T *x_grad, Size_t ndim, Size_t ysize,
const Shape_t &xshape, const Shape_t &yshape,
const Shape_t &xstrides, const Shape_t &ystrides,
const vector<int> &start, const vector<int> &step) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_slice_4d_backward<T, accum>), ysize, y_grad, x_grad,
to_int3(xstrides), to_int3(ystrides), to_int4(start), to_int4(step));
}
// nd slice with template
template <typename T, int NDIM>
__global__ void kernel_slice_nd_forward(const int size, const T *x, T *y,
const NdIndex<NDIM> xstrides,
const NdIndex<NDIM> ystrides,
const NdIndex<NDIM> start,
const NdIndex<NDIM> step) {
NBLA_CUDA_KERNEL_LOOP(yidx, size) {
auto nd_yidx = device_flat_to_nd(yidx, ystrides);
NdIndex<NDIM> nd_xidx;
for (int i = 0; i < NDIM; i++) {
nd_xidx.nd_idx[i] = start.nd_idx[i] + nd_yidx.nd_idx[i] * step.nd_idx[i];
}
auto xidx = device_nd_to_flat(nd_xidx, xstrides);
y[yidx] = x[xidx];
}
}
template <typename T, int NDIM>
void slice_nd_forward(const T *x_data, T *y_data, Size_t ndim, Size_t ysize,
const Shape_t &xshape, const Shape_t &yshape,
const Shape_t &xstrides, const Shape_t &ystrides,
const vector<int> &start, const vector<int> &step) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_slice_nd_forward<T, NDIM>), ysize, x_data, y_data,
to_nd_index<NDIM>(xstrides), to_nd_index<NDIM>(ystrides),
to_nd_index<NDIM>(start), to_nd_index<NDIM>(step));
}
template <typename T, int NDIM, bool accum>
__global__ void kernel_slice_nd_backward(const int size, const T *dy, T *dx,
const NdIndex<NDIM> xstrides,
const NdIndex<NDIM> ystrides,
const NdIndex<NDIM> start,
const NdIndex<NDIM> step) {
NBLA_CUDA_KERNEL_LOOP(yidx, size) {
auto nd_yidx = device_flat_to_nd(yidx, ystrides);
NdIndex<NDIM> nd_xidx;
for (int i = 0; i < NDIM; i++) {
nd_xidx.nd_idx[i] = start.nd_idx[i] + nd_yidx.nd_idx[i] * step.nd_idx[i];
}
auto xidx = device_nd_to_flat(nd_xidx, xstrides);
dx[xidx] = accum ? dy[yidx] + dx[xidx] : dy[yidx];
}
}
template <typename T, int NDIM, bool accum>
void slice_nd_backward(const T *y_grad, T *x_grad, Size_t ndim, Size_t ysize,
const Shape_t &xshape, const Shape_t &yshape,
const Shape_t &xstrides, const Shape_t &ystrides,
const vector<int> &start, const vector<int> &step) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
(kernel_slice_nd_backward<T, NDIM, accum>), ysize, y_grad, x_grad,
to_nd_index<NDIM>(xstrides), to_nd_index<NDIM>(ystrides),
to_nd_index<NDIM>(start), to_nd_index<NDIM>(step));
}
// nd slice in general
template <typename T>
void slice_nd_forward_loop(const T *x_data, T *y_data, Size_t ndim,
Size_t ysize, const Shape_t &xshape,
const Shape_t &yshape, const Shape_t &xstrides,
const Shape_t &ystrides, const vector<int> &start,
const vector<int> &step) {
// (D_1, ..., D_N) -> (D1 * ...* D_{N-1}, D_N) -> (O, I) ->
// o -> nd_index_y -> nd_index_x -> x_idx -> slice_1d for each x_idx
auto inner_size = yshape[ndim - 1];
auto outer_size = ysize / inner_size;
Shape_t outer_shape(yshape.begin(), yshape.end() - 1);
auto outer_strides = ndi::strides(outer_shape);
auto start_n = start[ndim - 1];
auto step_n = step[ndim - 1];
for (Size_t o = 0; o < outer_size; o++) {
auto nd_yidx = ndi::flat2nd(o, outer_strides);
Shape_t nd_xidx(ndim);
for (Size_t d = 0; d < ndim - 1; d++) {
auto iy = nd_yidx[d];
auto ix = start[d] + iy * step[d];
nd_xidx[d] = ix;
}
nd_xidx[ndim - 1] = 0;
auto x_idx = ndi::nd2flat(nd_xidx, xstrides);
auto y_idx = o * inner_size;
auto x_o = x_data + x_idx;
auto y_o = y_data + y_idx;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_slice_1d_forward, inner_size, x_o,
y_o, start_n, step_n);
}
}
template <typename T, bool accum>
void slice_nd_backward_loop(const T *y_grad, T *x_grad, Size_t ndim,
Size_t ysize, const Shape_t &xshape,
const Shape_t &yshape, const Shape_t &xstrides,
const Shape_t &ystrides, const vector<int> &start,
const vector<int> &step) {
// (D_1, ..., D_N) -> (D1 * ...* D_{N-1}, D_N) -> (O, I) ->
// o -> nd_index_y -> nd_index_x -> x_idx -> slice_1d for each x_idx
auto inner_size = yshape[ndim - 1];
auto outer_size = ysize / inner_size;
Shape_t outer_shape(yshape.begin(), yshape.end() - 1);
auto outer_strides = ndi::strides(outer_shape);
auto start_n = start[ndim - 1];
auto step_n = step[ndim - 1];
for (Size_t o = 0; o < outer_size; o++) {
auto nd_yidx = ndi::flat2nd(o, outer_strides);
Shape_t nd_xidx(ndim);
for (Size_t d = 0; d < ndim - 1; d++) {
auto iy = nd_yidx[d];
auto ix = start[d] + iy * step[d];
nd_xidx[d] = ix;
}
nd_xidx[ndim - 1] = 0;
auto x_idx = ndi::nd2flat(nd_xidx, xstrides);
auto y_idx = o * inner_size;
auto dx_o = x_grad + x_idx;
auto dy_o = y_grad + y_idx;
auto kernel = kernel_slice_1d_backward<T, accum>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, inner_size, dy_o, dx_o, start_n,
step_n);
}
}
template <typename T>
void SliceCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
Slice<T>::setup_impl(inputs, outputs);
if (outputs[0]->size() == 0)
return;
}
template <typename T>
void SliceCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
if (outputs[0]->size() == 0)
return;
cuda_set_device(std::stoi(this->ctx_.device_id));
auto x = inputs[0];
auto y = outputs[0];
auto start = this->start_[0];
auto step = this->step_[0];
auto xshape = x->shape();
auto yshape = y->shape();
auto xstrides = x->strides();
auto ystrides = y->strides();
auto ndim = x->ndim();
auto ysize = y->size();
auto x_data = x->get_data_pointer<Tcu>(this->ctx_);
auto y_data = y->cast_data_and_get_pointer<Tcu>(this->ctx_);
if (ndim == 1) {
slice_1d_forward(x_data, y_data, ndim, ysize, xshape, yshape, xstrides,
ystrides, start, step);
} else if (ndim == 2) {
slice_2d_forward(x_data, y_data, ndim, ysize, xshape, yshape, xstrides,
ystrides, start, step);
} else if (ndim == 3) {
slice_3d_forward(x_data, y_data, ndim, ysize, xshape, yshape, xstrides,
ystrides, start, step);
} else if (ndim == 4) {
slice_4d_forward(x_data, y_data, ndim, ysize, xshape, yshape, xstrides,
ystrides, start, step);
} else if (ndim == 5) {
slice_nd_forward<Tcu, 5>(x_data, y_data, ndim, ysize, xshape, yshape,
xstrides, ystrides, start, step);
} else if (ndim == 6) {
slice_nd_forward<Tcu, 6>(x_data, y_data, ndim, ysize, xshape, yshape,
xstrides, ystrides, start, step);
} else if (ndim == 7) {
slice_nd_forward<Tcu, 7>(x_data, y_data, ndim, ysize, xshape, yshape,
xstrides, ystrides, start, step);
} else {
slice_nd_forward_loop(x_data, y_data, ndim, ysize, xshape, yshape, xstrides,
ystrides, start, step);
}
}
template <typename T>
void SliceCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!propagate_down[0])
return;
if (outputs[0]->size() == 0)
return;
cuda_set_device(std::stoi(this->ctx_.device_id));
auto x = inputs[0];
auto y = outputs[0];
auto start = this->start_[0];
auto step = this->step_[0];
auto xshape = x->shape();
auto yshape = y->shape();
auto xstrides = x->strides();
auto ystrides = y->strides();
auto ndim = x->ndim();
auto ysize = y->size();
auto x_grad = x->cast_grad_and_get_pointer<Tcu>(this->ctx_);
auto y_grad = y->get_grad_pointer<Tcu>(this->ctx_);
if (ndim == 1) {
auto slice =
accum[0] ? slice_1d_backward<Tcu, true> : slice_1d_backward<Tcu, false>;
slice(y_grad, x_grad, ndim, ysize, xshape, yshape, xstrides, ystrides,
start, step);
} else if (ndim == 2) {
auto slice =
accum[0] ? slice_2d_backward<Tcu, true> : slice_2d_backward<Tcu, false>;
slice(y_grad, x_grad, ndim, ysize, xshape, yshape, xstrides, ystrides,
start, step);
} else if (ndim == 3) {
auto slice =
accum[0] ? slice_3d_backward<Tcu, true> : slice_3d_backward<Tcu, false>;
slice(y_grad, x_grad, ndim, ysize, xshape, yshape, xstrides, ystrides,
start, step);
} else if (ndim == 4) {
auto slice =
accum[0] ? slice_4d_backward<Tcu, true> : slice_4d_backward<Tcu, false>;
slice(y_grad, x_grad, ndim, ysize, xshape, yshape, xstrides, ystrides,
start, step);
} else if (ndim == 5) {
auto slice = accum[0] ? slice_nd_backward<Tcu, 5, true>
: slice_nd_backward<Tcu, 5, false>;
slice(y_grad, x_grad, ndim, ysize, xshape, yshape, xstrides, ystrides,
start, step);
} else if (ndim == 6) {
auto slice = accum[0] ? slice_nd_backward<Tcu, 6, true>
: slice_nd_backward<Tcu, 6, false>;
slice(y_grad, x_grad, ndim, ysize, xshape, yshape, xstrides, ystrides,
start, step);
} else if (ndim == 7) {
auto slice = accum[0] ? slice_nd_backward<Tcu, 7, true>
: slice_nd_backward<Tcu, 7, false>;
slice(y_grad, x_grad, ndim, ysize, xshape, yshape, xstrides, ystrides,
start, step);
} else {
auto slice = accum[0] ? slice_nd_backward_loop<Tcu, true>
: slice_nd_backward_loop<Tcu, false>;
slice(y_grad, x_grad, ndim, ysize, xshape, yshape, xstrides, ystrides,
start, step);
}
}
}
|
the_stack
|
#include <amg.h>
#include <types.h>
#include <cutil.h>
#include <iostream>
#include <iomanip>
#include <amg_level.h>
#include <cycles/cgcycle.h>
#include <allocator.h>
#include <my_timer.h>
template<class Matrix, class Vector>
AMG<Matrix, Vector>::AMG(bool verbose, int convergeType, int cycleType,
int solverType, double tolerance, int cycleIters, int maxIters,
int maxLevels, int topSize, double smootherWeight,
int preInnerIters, int postInnerIters, int postRelaxes,
int dsType, int randMisParameters, int partitionMaxSize, double proOmega,
int aggregatorType, int blockSize, TriMesh* triMesh, TetMesh* tetMesh) :
fine(0), verbose_(verbose),
convergeType_(convergeType == 0 ? ABSOLUTE_CONVERGENCE : RELATIVE_CONVERGENCE),
solverType_(solverType == 0 ? AMG_SOLVER : PCG_SOLVER),
tolerance_(tolerance), cycleIters_(cycleIters),
maxIters_(maxIters), maxLevels_(maxLevels), topSize_(topSize),
smootherWeight_(smootherWeight), preInnerIters_(preInnerIters),
postInnerIters_(postInnerIters), postRelaxes_(postRelaxes),
dsType_(dsType), randMisParameters_(randMisParameters), partitionMaxSize_(partitionMaxSize),
proOmega_(proOmega), aggregatorType_(aggregatorType), blockSize_(blockSize),
triMesh_(triMesh), tetMesh_(tetMesh) {
switch (cycleType) {
case 0:
this->cycleType_ = V_CYCLE;
break;
case 1:
this->cycleType_ = W_CYCLE;
break;
case 2:
this->cycleType_ = F_CYCLE;
break;
case 3:
this->cycleType_ = K_CYCLE;
break;
}
}
template<class Matrix, class Vector>
AMG<Matrix, Vector>::~AMG() { }
/**********************************************************
* Returns true of the solver has converged
*********************************************************/
template<class Matrix, class Vector>
bool AMG<Matrix, Vector>::converged(const Vector &r, ValueType &nrm)
{
// nrm = get_norm(r, norm);
nrm = cusp::blas::nrm2(r);
if (this->convergeType_ == ABSOLUTE_CONVERGENCE)
{
return nrm <= this->tolerance_;
} else //if (convergence==RELATIVE)
{
if (initial_nrm == -1)
{
initial_nrm = nrm;
return false;
}
//if the norm has been reduced by the tolerance then return true
if (nrm / initial_nrm <= this->tolerance_)
return true;
else
return false;
}
}
/**********************************************************
* Creates the AMG hierarchy
*********************************************************/
template <class Matrix, class Vector>
void AMG<Matrix, Vector>::setup(const Matrix_d &Acsr_d) {
num_levels = 1;
//allocate the fine level
AMG_Level<Matrix, Vector>* level = AMG_Level<Matrix, Vector>::allocate(this);
//set the fine level pointer
fine = level;
level->A_d = Acsr_d;
// Ahyb_d_CG = level->A_d;
level->level_id = 0;
level->nn = Acsr_d.num_rows;
level->m_meshPtr = this->triMesh_;
level->m_tetmeshPtr = this->tetMesh_;
if (this->verbose_) std::cout << "Entering AMG setup loop." << std::endl;
while (true) {
int N = level->A_d.num_rows;
if (this->verbose_)
std::cout << "Rows: " << N << " of max: " << this->topSize_ << std::endl;
if (N < this->topSize_ || num_levels >= this->maxLevels_) {
coarsestlevel = num_levels - 1;
Matrix_h Atmp = level->A_d;
cusp::array2d<ValueType, cusp::host_memory> coarse_dense(Atmp);
LU = cusp::detail::lu_solver<ValueType, cusp::host_memory >(coarse_dense);
if (this->verbose_) std::cout << "Finished with lu_solver." << std::endl;
break;
}
level->next = AMG_Level<Matrix, Vector>::allocate(this);
if (this->verbose_) std::cout << "Finished with AMG_Level_allocate." << std::endl;
level->createNextLevel(this->verbose_);
if (this->verbose_) std::cout << "Finished with createNextLevel call." << std::endl;
if (level->level_id == 0) {
Ahyb_d_CG = level->A_d;
}
if (this->verbose_) std::cout << "Copied A_d." << std::endl;
level->setup(); //allocate smoother !! must be after createNextLevel since A_d is used
if (this->verbose_) std::cout << "level->setup." << std::endl;
level->next->level_id = num_levels;
level->next->nn = level->nnout;
level->next->m_xadj_d = level->m_xadjout_d;
level->next->m_adjncy_d = level->m_adjncyout_d;
int nextN = level->next->A_d.num_rows;
if (this->verbose_) std::cout << "level->next finished" << std::endl;
//resize vectors
level->xc_d = Vector_d(nextN, -1);
level->bc_d = Vector_d(nextN, -1);
if (this->verbose_) std::cout << "resize vectors finished" << std::endl;
//advance to the next level
level = level->next;
//increment the level counter
num_levels++;
if (this->verbose_)
std::cout << "Looping with num_levels=" << num_levels << std::endl;
}
}
/***************************************************
* Launches a single iteration of the outer solver
***************************************************/
template <class Matrix, class Vector>
void AMG<Matrix, Vector>::solve_iteration(const Vector_d_CG &b, Vector_d_CG &x)
{
Vector_d b_d(b);
Vector_d x_d(x);
switch (this->solverType_)
{
case AMG_SOLVER:
//perform a single cycle on the amg hierarchy
fine->cycle(this->cycleType_, b_d, x_d, this->verbose_);
x = Vector_d_CG(x_d);
break;
case PCG_SOLVER:
//create a single CG cycle (this will run CG immediatly)
CG_Flex_Cycle<Matrix_h_CG, Vector_h_CG >(this->cycleType_, this->cycleIters_,
fine, Ahyb_d_CG, b, x, this->tolerance_, this->maxIters_, this->verbose_); //DHL
break;
}
}
/**********************************************************
* Solves the AMG system
*********************************************************/
template <class Matrix, class Vector>
void AMG<Matrix, Vector>::solve(const Vector_d_CG &b_d, Vector_d_CG &x_d)
{
if (this->verbose_)
printf("AMG Solve:\n");
iterations = 0;
initial_nrm = -1;
if (this->verbose_) {
std::cout << std::setw(15) << "iter" << std::setw(15) << "time(s)" << std::setw(15)
<< "residual" << std::setw(15) << "rate" << std::setw(15) << std::endl;
std::cout << " ----------------------------------------------------\n";
}
solve_start = CLOCK();
bool done = false;
do
{
//launch a single solve iteration
solve_iteration(b_d, x_d);
done = true; //converged(r_d, nrm);
} while (++iterations < this->maxIters_ && !done);
if (this->verbose_)
std::cout << " ----------------------------------------------------\n";
solve_stop = CLOCK();
Allocator<Vector>::clear(); // DHL
}
template <class Matrix, class Vector>
void AMG<Matrix, Vector>::printGridStatistics()
{
int total_rows = 0;
int total_nnz = 0;
AMG_Level<Matrix, Vector> *level = fine; // DHL
std::cout << "AMG Grid:\n";
std::cout << std::setw(15) << "LVL" << std::setw(10) << "ROWS" <<
std::setw(18) << "NNZ" << std::setw(10) << "SPRSTY" << std::endl;
std::cout << " ---------------------------------------------\n";
level = fine;
while (level != NULL)
{
total_rows += level->A_d.num_rows;
total_nnz += level->A_d.num_entries;
std::cout << std::setw(15) << level->level_id << std::setw(10) <<
level->A_d.num_rows << std::setw(18) << level->A_d.num_entries <<
std::setw(10) << std::setprecision(3) <<
level->A_d.num_entries / (double)(level->A_d.num_rows * level->A_d.num_cols)
<< std::setprecision(6) << std::endl;
level = level->next;
}
// DHL
std::cout << " ---------------------------------------------\n";
std::cout << " Grid Complexity: " << total_rows / (double)fine->A_d.num_rows << std::endl;
std::cout << " Operator Complexity: " << total_nnz / (double)fine->A_d.num_entries << std::endl;
}
// print a line of length l, starting at character s
void printLine(const int l, const int s)
{
std::cout << std::setw(s) << " ";
for (int i = 0; i < l; i++)
{
std::cout << "-";
}
std::cout << std::endl;
}
template <class Matrix, class Vector>
void AMG<Matrix, Vector>::printProfile()
{
#ifdef PROFILE
// print headers from first AMG level
std::vector<const char *> headers = fine[0].Profile.getHeaders();
std::vector<double> levelTimes;
std::vector<std::vector<double> > globalTimes;
cout << "\n" << setw(7) << "Level";
typedef std::vector<const char *>::iterator headerIter;
for(headerIter it = headers.begin(); it != headers.end(); ++it)
{
cout << setw(max((int)strlen(*it) + 1, 18)) << *it;
// centerString(*it,16);
}
cout << setw(12) << "Total" << endl;
// now print the sub titles
cout << setw(7) << " ";
for(headerIter it = headers.begin(); it != headers.end(); ++it)
{
cout << setw(6) << "t" << setw(6) << "l%" << setw(6) << "g%";
}
cout << setw(6) << "l" << setw(6) << "g%" << endl;
// print a line across
printLine(108, 2);
AMG_Level<Matrix, Vector> *level = fine;
while(level != NULL)
{
levelTimes = level->Profile.getTimes();
globalTimes.push_back(levelTimes);
// cout << setw(4) << level->level_id;
//level->Profile.writeTimes();
//cout << endl;
level = level->next;
}
// now we have all of the times for all levels, work on them
// get the global total time
double tTotal = 0.0;
double *levelTotals = new double[globalTimes.size()];
for(int i = 0; i < globalTimes.size(); i++)
{
levelTotals[i] = 0.0;
}
// get both total (global) time and level totals
for(int i = 0; i < globalTimes.size(); i++)
{
for(int j = 0; j < globalTimes[i].size(); j++)
{
tTotal += globalTimes[i][j];
levelTotals[i] += globalTimes[i][j];
}
}
// only ever print out 2 decimal places
cout.precision(2);
// loop over each level & print stats
level = fine;
while(level != NULL)
{
int level_id = level->level_id;
cout << setw(7) << level_id;
for(int i = 0; i < globalTimes[level_id].size(); i++)
{
double t = globalTimes[level_id][i];
double levelPercent = t / levelTotals[level_id] * 100;
double globalPercent = t / tTotal * 100;
cout << scientific << setw(6) << fixed << t << setw(6) << levelPercent << setw(6) << globalPercent;
}
// totals here
cout << setw(6) << fixed << levelTotals[level_id] << setw(6) << levelTotals[level_id] / tTotal * 100;
cout << endl;
// next level
level = level->next;
}
// print final line across
printLine(108, 2);
#endif
}
template <class Matrix, class Vector>
void AMG<Matrix, Vector>::printCoarsePoints()
{
#ifdef DEBUG
typedef std::vector<int> iVec;
typedef std::vector<int>::iterator iVecIter;
ofstream coarsePoints("coarse_points.dat");
iVec originalRows;
AMG_Level<Matrix, Vector> *level = fine;
while(level != NULL)
{
originalRows = level->getOriginalRows();
level = level->next;
if(level == NULL)
{
break;
}
coarsePoints << level->level_id << " " << level->getNumRows() << endl;
for(iVecIter it = originalRows.begin(); it != originalRows.end(); ++it)
{
coarsePoints << *it << endl;
}
}
coarsePoints.close();
#endif
}
template <class Matrix, class Vector>
void AMG<Matrix, Vector>::printConnections()
{
#ifdef DEBUG
ofstream connFile("connections.dat");
AMG_Level<Matrix, Vector> *level = fine;
Matrix ATemp;
while(level != NULL)
{
connFile << level->level_id << " " << level->getNumRows() << endl;
ATemp = level->getA();
for(int i = 0; i < ATemp.num_rows; i++)
{
// get the row offset & num rows
int offset = ATemp.row_offsets[i];
int numEntries = ATemp.row_offsets[i + 1] - offset;
// # of connections is numEntries - 1 (ignoring diagonal)
// this->numConnections.push_back(numEntries-1);
connFile << numEntries - 1 << " ";
// loop over non-zeros and add non-diagonal terms
for(int j = offset; j < offset + numEntries; j++)
{
int columnIndex = ATemp.column_indices[j];
if(i != columnIndex)
{
// this->connections.push_back(columnIndex);
connFile << columnIndex << " ";
}
}
connFile << endl;
}
level = level->next;
}
#endif
}
/****************************************
* Explict instantiations
***************************************/
template class AMG < Matrix_h, Vector_h > ;
//template class AMG<Matrix_h_CG, Vector_h_CG>;
//template class AMG<Matrix_d,Vector_d>;
|
the_stack
|
* \test Tests conversion between vectors with different numeric type
**/
//
// *** System
//
#include <iostream>
#include <iomanip>
#include <vector>
//
// *** ViennaCL
//
//#define VIENNACL_DEBUG_ALL
#include "viennacl/vector.hpp"
#include "viennacl/vector_proxy.hpp"
template<typename NumericT, typename VectorT>
int check(std::vector<NumericT> const & std_dest, std::size_t start_dest, std::size_t inc_dest, std::size_t size_dest,
VectorT const & vcl_dest)
{
std::vector<NumericT> tempvec(vcl_dest.size());
viennacl::copy(vcl_dest, tempvec);
for (std::size_t i=0; i < size_dest; ++i)
{
if ( std_dest[start_dest + i * inc_dest] < tempvec[i]
|| std_dest[start_dest + i * inc_dest] > tempvec[i])
{
std::cerr << "Failure at index " << i << ": STL value " << std_dest[start_dest + i * inc_dest] << ", ViennaCL value " << tempvec[i] << std::endl;
return EXIT_FAILURE;
}
}
return EXIT_SUCCESS;
}
//
// -------------------------------------------------------------
//
template<typename STLVectorT1, typename STLVectorT2, typename ViennaCLVectorT1, typename ViennaCLVectorT2 >
int test(STLVectorT1 & std_src, std::size_t start_src, std::size_t inc_src, std::size_t size_src,
STLVectorT2 & std_dest, std::size_t start_dest, std::size_t inc_dest, std::size_t size_dest,
ViennaCLVectorT1 const & vcl_src, ViennaCLVectorT2 & vcl_dest)
{
assert(size_src == size_dest && bool("Size mismatch for STL vectors"));
assert(vcl_src.size() == vcl_dest.size() && bool("Size mismatch for ViennaCL vectors"));
assert(size_src == vcl_src.size() && bool("Size mismatch for STL and ViennaCL vectors"));
typedef typename STLVectorT2::value_type DestNumericT;
for (std::size_t i=0; i<size_src; ++i)
std_dest[start_dest + i * inc_dest] = static_cast<DestNumericT>(std_src[start_src + i * inc_src]);
vcl_dest = vcl_src; // here is the conversion taking place
if (check(std_dest, start_dest, inc_dest, size_dest, vcl_dest) != EXIT_SUCCESS)
return EXIT_FAILURE;
viennacl::vector<DestNumericT> x(vcl_src);
if (check(std_dest, start_dest, inc_dest, size_dest, x) != EXIT_SUCCESS)
return EXIT_FAILURE;
// --------------------------------------------------------------------------
return EXIT_SUCCESS;
}
inline std::string type_string(unsigned int) { return "unsigned int"; }
inline std::string type_string(int) { return "int"; }
inline std::string type_string(unsigned long) { return "unsigned long"; }
inline std::string type_string(long) { return "long"; }
inline std::string type_string(float) { return "float"; }
inline std::string type_string(double) { return "double"; }
template<typename FromNumericT, typename ToNumericT>
int test()
{
int retval = EXIT_SUCCESS;
std::cout << std::endl;
std::cout << "-----------------------------------------------" << std::endl;
std::cout << std::endl;
std::cout << "Conversion test from " << type_string(FromNumericT()) << " to " << type_string(ToNumericT()) << std::endl;
std::cout << std::endl;
std::size_t full_size = 12345;
std::size_t small_size = full_size / 4;
//
// Set up STL objects
//
std::vector<FromNumericT> std_src(full_size);
std::vector<ToNumericT> std_dest(std_src.size());
for (std::size_t i=0; i<std_src.size(); ++i)
std_src[i] = FromNumericT(1.0) + FromNumericT(i);
//
// Set up ViennaCL objects
//
viennacl::vector<FromNumericT> vcl_src(std_src.size());
viennacl::vector<ToNumericT> vcl_dest(std_src.size());
viennacl::copy(std_src, vcl_src);
viennacl::vector<FromNumericT> vcl_src_small(small_size);
viennacl::copy(std_src.begin(), std_src.begin() + typename std::vector<FromNumericT>::difference_type(small_size), vcl_src_small.begin());
viennacl::vector<ToNumericT> vcl_dest_small(small_size);
std::size_t r1_start = 1 + vcl_src.size() / 4;
std::size_t r1_stop = 1 + 2 * vcl_src.size() / 4;
viennacl::range vcl_r1(r1_start, r1_stop);
std::size_t r2_start = 2 * vcl_src.size() / 4;
std::size_t r2_stop = 3 * vcl_src.size() / 4;
viennacl::range vcl_r2(r2_start, r2_stop);
viennacl::vector_range< viennacl::vector<FromNumericT> > vcl_range_src(vcl_src, vcl_r1);
viennacl::vector_range< viennacl::vector<ToNumericT> > vcl_range_dest(vcl_dest, vcl_r2);
std::size_t s1_start = 1 + vcl_src.size() / 5;
std::size_t s1_inc = 3;
std::size_t s1_size = vcl_src.size() / 4;
viennacl::slice vcl_s1(s1_start, s1_inc, s1_size);
std::size_t s2_start = 2 * vcl_dest.size() / 5;
std::size_t s2_inc = 2;
std::size_t s2_size = vcl_dest.size() / 4;
viennacl::slice vcl_s2(s2_start, s2_inc, s2_size);
viennacl::vector_slice< viennacl::vector<FromNumericT> > vcl_slice_src(vcl_src, vcl_s1);
viennacl::vector_slice< viennacl::vector<ToNumericT> > vcl_slice_dest(vcl_dest, vcl_s2);
//
// Now start running tests for vectors, ranges and slices:
//
std::cout << " ** vcl_src = vector, vcl_dest = vector **" << std::endl;
retval = test(std_src, 0, 1, std_src.size(),
std_dest, 0, 1, std_dest.size(),
vcl_src, vcl_dest);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_src = vector, vcl_dest = range **" << std::endl;
retval = test(std_src, 0, 1, small_size,
std_dest, r2_start, 1, r2_stop - r2_start,
vcl_src_small, vcl_range_dest);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_src = vector, vcl_dest = slice **" << std::endl;
retval = test(std_src, 0, 1, small_size,
std_dest, s2_start, s2_inc, s2_size,
vcl_src_small, vcl_slice_dest);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
///////
std::cout << " ** vcl_src = range, vcl_dest = vector **" << std::endl;
retval = test(std_src, r1_start, 1, r1_stop - r1_start,
std_dest, 0, 1, small_size,
vcl_range_src, vcl_dest_small);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_src = range, vcl_dest = range **" << std::endl;
retval = test(std_src, r1_start, 1, r1_stop - r1_start,
std_dest, r2_start, 1, r2_stop - r2_start,
vcl_range_src, vcl_range_dest);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_src = range, vcl_dest = slice **" << std::endl;
retval = test(std_src, r1_start, 1, r1_stop - r1_start,
std_dest, s2_start, s2_inc, s2_size,
vcl_range_src, vcl_slice_dest);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
///////
std::cout << " ** vcl_src = slice, vcl_dest = vector **" << std::endl;
retval = test(std_src, s1_start, s1_inc, s1_size,
std_dest, 0, 1, small_size,
vcl_slice_src, vcl_dest_small);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_src = slice, vcl_dest = range **" << std::endl;
retval = test(std_src, s1_start, s1_inc, s1_size,
std_dest, r2_start, 1, r2_stop - r2_start,
vcl_slice_src, vcl_range_dest);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
std::cout << " ** vcl_src = slice, vcl_dest = slice **" << std::endl;
retval = test(std_src, s1_start, s1_inc, s1_size,
std_dest, s2_start, s2_inc, s2_size,
vcl_slice_src, vcl_slice_dest);
if (retval != EXIT_SUCCESS)
return EXIT_FAILURE;
return EXIT_SUCCESS;
}
//
// -------------------------------------------------------------
//
int main()
{
std::cout << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "## Test :: Type conversion test for vectors " << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << "----------------------------------------------" << std::endl;
std::cout << std::endl;
int retval = EXIT_SUCCESS;
//
// from int
//
retval = test<int, int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<int, unsigned int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<int, long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<int, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<int, float>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<int, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
#ifdef VIENNACL_WITH_OPENCL
if ( viennacl::ocl::current_device().double_support() )
#endif
{
retval = test<int, double>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
}
//
// from unsigned int
//
retval = test<unsigned int, int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned int, unsigned int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned int, long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned int, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned int, float>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned int, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
#ifdef VIENNACL_WITH_OPENCL
if ( viennacl::ocl::current_device().double_support() )
#endif
{
retval = test<unsigned int, double>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
}
//
// from long
//
retval = test<long, int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<long, unsigned int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<long, long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<long, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<long, float>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<long, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
#ifdef VIENNACL_WITH_OPENCL
if ( viennacl::ocl::current_device().double_support() )
#endif
{
retval = test<long, double>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
}
//
// from unsigned long
//
retval = test<unsigned long, int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned long, unsigned int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned long, long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned long, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned long, float>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<unsigned long, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
#ifdef VIENNACL_WITH_OPENCL
if ( viennacl::ocl::current_device().double_support() )
#endif
{
retval = test<unsigned long, double>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
}
//
// from float
//
retval = test<float, int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<float, unsigned int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<float, long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<float, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<float, float>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<float, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
#ifdef VIENNACL_WITH_OPENCL
if ( viennacl::ocl::current_device().double_support() )
#endif
{
retval = test<float, double>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
}
//
// from double
//
#ifdef VIENNACL_WITH_OPENCL
if ( viennacl::ocl::current_device().double_support() )
#endif
{
retval = test<double, int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<double, unsigned int>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<double, long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<double, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<double, float>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<double, unsigned long>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
retval = test<double, double>();
if ( retval == EXIT_SUCCESS ) std::cout << "# Test passed" << std::endl;
else return retval;
}
std::cout << std::endl;
std::cout << "------- Test completed --------" << std::endl;
std::cout << std::endl;
return retval;
}
|
the_stack
|
#include <cub/cub.h>
#include <mgpuhost.cuh>
#include <moderngpu.cuh>
#include <nvbio/strings/string_set.h>
#include <nvbio/basic/thrust_view.h>
#include <nvbio/basic/cuda/sort.h>
#include <nvbio/basic/cuda/timer.h>
#include <nvbio/basic/cuda/ldg.h>
#include <nvbio/basic/cuda/primitives.h>
#include <nvbio/io/output/output_types.h>
#include <moderngpu.cuh>
#include <mgpuhost.cuh>
#include "bam_io.h"
#include "bam_sort.h"
//#ifdef _OPENMP
#include <omp.h>
//#endif
using namespace nvbio;
int test_sorted(const H_KVP_batch& result);
/** --------- Sorting Modules -------- **/
// generate sort keys
void sortkey_gen(bamsort_context* context)
{
thrust::for_each(context->active_read_ids.begin(),
context->active_read_ids.end(),
generate_sort_keys(*context));
}
// local sort of key-val pairs on the device
void sort(bamsort_context* context)
{
thrust::sort_by_key(context->sort_keys.begin(), context->sort_keys.end(), context->active_read_ids.begin());
}
// used by out-of-core merge for searching a sorted array
// to find the position until which all the elements are less than the pivot
// TODO: ensure thrust uses binary search
uint32 find_split_idx(bamsort_context* context, const uint64 len, const uint64 pivot)
{
return thrust::distance(context->patch_searched.begin(),
thrust::partition_point(context->patch_searched.begin(), context->patch_searched.begin() + len, is_less(pivot)));
}
// out-of-core merge of two sorted batches
// for single GPU: sequentially merge the pivot elements on the CPU to determine the partitions
// since only one partition at a time can be merged on the device -- this is to avoid
// extracting and sorting the pivot elements separately;
// TODO: optimize if pivots have the same value
void merge_batches_1GPU(bamsort_context* context, const H_KVP_batch* b1, const H_KVP_batch* b2, H_KVP_batch* out,
float& merge_time, float& data_time, float& search_time)
{
cuda::Timer timer;
// partition info
uint64 b1_npivots = b1->keys.size() / PIVOT_SAMPLING_INTERVAL;
uint64 b2_npivots = b2->keys.size() / PIVOT_SAMPLING_INTERVAL;
uint64 b1_pivot = 1, b2_pivot = 1;
uint64 p1L = 0, p1H = 0; // batch partition limits [L, H)
uint64 p2L = 0, p2H = 0;
uint64 p1_size = 0, p2_size = 0;
uint64 out_idx = 0;
// mgpu context
int current_device;
cudaGetDevice(¤t_device);
mgpu::ContextPtr mgpu_ctxt = mgpu::CreateCudaDevice(current_device);
while(1) {
// check if we're in the last partition
if(b1_pivot > b1_npivots || b2_pivot > b2_npivots) {
break; // still need to merge the batch remainders
}
// find the next partition
p1L = p1H;
p2L = p2H;
timer.start();
if(b1->keys[b1_pivot*PIVOT_SAMPLING_INTERVAL-1] <= b2->keys[b2_pivot*PIVOT_SAMPLING_INTERVAL-1]) {
p1H = b1_pivot*PIVOT_SAMPLING_INTERVAL;
// only need to search this patch since the pivots are sorted
NVBIO_CUDA_ASSERT(context->patch_searched.size() <= PIVOT_SAMPLING_INTERVAL);
thrust::copy(b2->keys.begin() + p2L, b2->keys.begin() + b2_pivot*PIVOT_SAMPLING_INTERVAL, context->patch_searched.begin());
p2H = p2L + find_split_idx(context, b2_pivot*PIVOT_SAMPLING_INTERVAL - p2L, b1->keys[b1_pivot*PIVOT_SAMPLING_INTERVAL-1]);
b1_pivot++; // advance the pivot pointer
} else {
p2H = b2_pivot*PIVOT_SAMPLING_INTERVAL;
NVBIO_CUDA_ASSERT(context->patch_searched.size() <= PIVOT_SAMPLING_INTERVAL);
thrust::copy(b1->keys.begin() + p1L, b1->keys.begin() + b1_pivot*PIVOT_SAMPLING_INTERVAL, context->patch_searched.begin());
p1H = p1L + find_split_idx(context, b1_pivot*PIVOT_SAMPLING_INTERVAL - p1L, b2->keys[b2_pivot*PIVOT_SAMPLING_INTERVAL-1]);
b2_pivot++;
}
timer.stop();
search_time += timer.seconds();
p1_size = p1H - p1L;
p2_size = p2H - p2L;
//printf("Partition sizes: %llu %llu \n", p1_size, p2_size);
// if one of the batch partitions is empty, we are done
if(p1_size == 0) {
thrust::copy(b2->keys.begin() + p2L, b2->keys.begin() + p2H, out->keys.begin()+out_idx);
thrust::copy(b2->ids.begin() + p2L, b2->ids.begin() + p2H, out->ids.begin()+out_idx);
out_idx += p2_size;
continue;
} else if(p2_size == 0) {
thrust::copy(b1->keys.begin() + p1L, b1->keys.begin() + p1H, out->keys.begin()+out_idx);
thrust::copy(b1->ids.begin() + p1L, b1->ids.begin() + p1H, out->ids.begin()+out_idx);
out_idx += p1_size;
continue;
} // TODO: if the sizes are less than a given threshold, merge on the CPU
// transfer the partitions to the device
NVBIO_CUDA_ASSERT(context->p1.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL);
NVBIO_CUDA_ASSERT(context->p2.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL);
NVBIO_CUDA_ASSERT(context->r.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL);
timer.start();
thrust::copy(b1->keys.begin() + p1L, b1->keys.begin() + p1H, context->p1.keys.begin());
thrust::copy(b2->keys.begin() + p2L, b2->keys.begin() + p2H, context->p2.keys.begin());
thrust::copy(b1->ids.begin() + p1L, b1->ids.begin() + p1H, context->p1.ids.begin());
thrust::copy(b2->ids.begin() + p2L, b2->ids.begin() + p2H, context->p2.ids.begin());
timer.stop();
data_time += timer.seconds();
// merge
timer.start();
mgpu::MergePairs(context->p1.keys.begin(), context->p1.ids.begin(), p1_size,
context->p2.keys.begin(), context->p2.ids.begin(), p2_size,
context->r.keys.begin(), context->r.ids.begin(), *mgpu_ctxt);
timer.stop();
merge_time += timer.seconds();
// transfer the results to the host
timer.start();
thrust::copy(context->r.keys.begin(), context->r.keys.begin() + p1_size + p2_size, out->keys.begin()+out_idx);
thrust::copy(context->r.ids.begin(), context->r.ids.begin() + p1_size + p2_size, out->ids.begin()+out_idx);
timer.stop();
data_time += timer.seconds();
out_idx += p1_size + p2_size;
}
// merge the final pieces
p1_size = b1->keys.size() - p1H;
p2_size = b2->keys.size() - p2H;
//printf("Final partition sizes: %llu %llu \n", p1_size, p2_size);
// if one of the batch remainders is empty, we are done
if(p1_size == 0) {
thrust::copy(b2->keys.begin() + p2H, b2->keys.end(), out->keys.begin()+out_idx);
thrust::copy(b2->ids.begin() + p2H, b2->ids.end(), out->ids.begin()+out_idx);
return;
} else if(p2_size == 0) {
thrust::copy(b1->keys.begin() + p1H, b1->keys.end(), out->keys.begin()+out_idx);
thrust::copy(b1->ids.begin() + p1H, b1->ids.end(), out->ids.begin()+out_idx);
return;
}
NVBIO_CUDA_ASSERT(context->p1.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL);
NVBIO_CUDA_ASSERT(context->p2.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL);
NVBIO_CUDA_ASSERT(context->r.keys.size() <= 2*PIVOT_SAMPLING_INTERVAL);
timer.start();
thrust::copy(b1->keys.begin() + p1H, b1->keys.end(), context->p1.keys.begin());
thrust::copy(b2->keys.begin() + p2H, b2->keys.end(), context->p2.keys.begin());
thrust::copy(b1->ids.begin() + p1H, b1->ids.end(), context->p1.ids.begin());
thrust::copy(b2->ids.begin() + p2H, b2->ids.end(), context->p2.ids.begin());
timer.stop();
data_time += timer.seconds();
timer.start();
mgpu::MergePairs(context->p1.keys.begin(), context->p1.ids.begin(), p1_size,
context->p2.keys.begin(), context->p2.ids.begin(), p2_size,
context->r.keys.begin(), context->r.ids.begin(), *mgpu_ctxt);
timer.stop();
merge_time += timer.seconds();
timer.start();
thrust::copy(context->r.keys.begin(), context->r.keys.begin() + p1_size + p2_size, out->keys.begin()+out_idx);
thrust::copy(context->r.ids.begin(), context->r.ids.begin() + p1_size + p2_size, out->ids.begin()+out_idx);
timer.stop();
data_time += timer.seconds();
}
// out-of-core merge of two sorted batches
// two GPUs
void merge_batches_2GPU(H_KVP_batch* b1, H_KVP_batch* b2, H_KVP_batch* out)
{
// partition info
uint64 b1_npivots = (b1->keys.size()-1) / PIVOT_SAMPLING_INTERVAL;
uint64 b2_npivots = (b2->keys.size()-1) / PIVOT_SAMPLING_INTERVAL;
// 1. sort the pivots
H_VectorU64 pivots(b1_npivots + b2_npivots);
for(uint64 i = 0; i < b1_npivots; i++) {
pivots[i] = b1->keys[(i+1)*PIVOT_SAMPLING_INTERVAL-1];
}
for(uint64 i = 0; i < b2_npivots; i++) {
pivots[b1_npivots + i] = b2->keys[(i+1)*PIVOT_SAMPLING_INTERVAL-1];
}
thrust::sort(pivots.begin(), pivots.end());
printf("Merge: found and sorted pivots. Num pivots %llu \n", (uint64) pivots.size());
std::vector<H_KVP_batch*> batches(2);
std::vector<H_VectorU64> pivot_idx(2);
batches[0] = b1;
batches[1] = b2;
// 2. search each batch for the partition delimiters
omp_set_num_threads(2);
#pragma omp parallel
{
int tid = omp_get_thread_num();
cudaSetDevice(tid);
H_KVP_batch* b = batches[tid];
D_VectorU64 d_bkeys(D_BATCH_SIZE);
pivot_idx[tid].resize(pivots.size());
uint64 num_processed = 0;
uint64 pid = 0;
while(num_processed < b->keys.size() && pid < pivots.size()) {
uint64 batch_size = D_BATCH_SIZE;
if(b->keys.size() - num_processed < D_BATCH_SIZE) {
batch_size = b->keys.size() - num_processed;
}
thrust::copy(b->keys.begin() + num_processed, b->keys.begin() + num_processed + batch_size, d_bkeys.begin());
// find as many pivots as possible in the loaded partition
while(1) {
if(pid >= pivots.size() || pivots[pid] > b->keys[num_processed + batch_size - 1]) {
break; // load the next batch
}
// pivot is in the loaded section
uint64 offset = thrust::distance(d_bkeys.begin(),
thrust::partition_point(d_bkeys.begin(), d_bkeys.begin() + batch_size, is_less(pivots[pid])));
pivot_idx[tid][pid] = num_processed + offset;
pid++;
}
num_processed += batch_size;
}
if(pid < pivots.size()) {
// if pid == 0, all elements in this batch are smaller than the elements in the second batch
for(uint64 i = pid; i < pivots.size(); i++) {
pivot_idx[tid][i] = b->keys.size();
}
}
printf("Thread %d processed %llu elements \n", tid, num_processed);
}
// 3. find partition offsets into output
// TODO: optimize out empty partitions (when pivots are equal)
uint64 num_partitions = pivots.size() + 1;
H_VectorU64 p_offsets(num_partitions);
p_offsets[0] = 0;
for(uint64 i = 1; i < num_partitions; i++) {
p_offsets[i] = pivot_idx[0][i-1] + pivot_idx[1][i-1];
}
printf("Total number of partitions: %llu \n", num_partitions);
std::vector<mgpu::ContextPtr> mgpu_ctxt(2);
mgpu_ctxt[0] = mgpu::CreateCudaDevice(0);
mgpu_ctxt[1] = mgpu::CreateCudaDevice(1);
// 4. merge the partitions
omp_set_num_threads(2);
#pragma omp parallel
{
int tid = omp_get_thread_num();
cudaSetDevice(tid);
bamsort_context context;
context.allocate_partition();
uint64 p1L, p1H; // batch partition limits [L, H)
uint64 p2L, p2H;
uint64 p1_size, p2_size;
uint64 part_id = tid;
while(part_id < num_partitions) {
uint64 out_idx = p_offsets[part_id];
if(part_id == 0) {
p1L = 0;
p2L = 0;
} else {
p1L = pivot_idx[0][part_id-1];
p2L = pivot_idx[1][part_id-1];
}
if(part_id == num_partitions - 1) {
p1H = b1->keys.size();
p2H = b2->keys.size();
} else {
p1H = pivot_idx[0][part_id];
p2H = pivot_idx[1][part_id];
}
p1_size = p1H - p1L;
p2_size = p2H - p2L;
printf("Thread %d. Partition sizes: %llu %llu\n", tid, p1_size, p2_size);
// if one of the batch partitions is empty, we are done
if(p1_size == 0) {
thrust::copy(b2->keys.begin() + p2L, b2->keys.begin() + p2H, out->keys.begin()+out_idx);
thrust::copy(b2->ids.begin() + p2L, b2->ids.begin() + p2H, out->ids.begin()+out_idx);
part_id += 2;
continue;
} else if(p2_size == 0) {
thrust::copy(b1->keys.begin() + p1L, b1->keys.begin() + p1H, out->keys.begin()+out_idx);
thrust::copy(b1->ids.begin() + p1L, b1->ids.begin() + p1H, out->ids.begin()+out_idx);
part_id += 2;
continue;
}
// transfer the partitions to the device
thrust::copy(b1->keys.begin() + p1L, b1->keys.begin() + p1H, context.p1.keys.begin());
thrust::copy(b2->keys.begin() + p2L, b2->keys.begin() + p2H, context.p2.keys.begin());
thrust::copy(b1->ids.begin() + p1L, b1->ids.begin() + p1H, context.p1.ids.begin());
thrust::copy(b2->ids.begin() + p2L, b2->ids.begin() + p2H, context.p2.ids.begin());
// merge
mgpu::MergePairs(context.p1.keys.begin(), context.p1.ids.begin(), p1_size,
context.p2.keys.begin(), context.p2.ids.begin(), p2_size,
context.r.keys.begin(), context.r.ids.begin(), *mgpu_ctxt[tid]);
// transfer the results to the host
thrust::copy(context.r.keys.begin(), context.r.keys.begin() + p1_size + p2_size, out->keys.begin()+out_idx);
thrust::copy(context.r.ids.begin(), context.r.ids.begin() + p1_size + p2_size, out->ids.begin()+out_idx);
part_id += 2;
}
}
}
/** ------ Sorting Pipelines ---------- **/
// full load -> sort -> store (no IO-compute overlapping)
// single GPU
void bamsort_pipeline_basic(const char* in_fname, const char* out_fname)
{
cuda::Timer timer, timer_alloc, timer_all;
float sort_time = 0, keygen_time = 0, data_time = 0, merge_time = 0, merge_data_time = 0, merge_search_time = 0;
//timer_all.start();
// 1. load BAM
timer.start();
HTSBAMReader bam_reader(in_fname);
BAM_alignment_batch_SoA h_batch(BAM_POSITIONS | BAM_REFIDS | BAM_FLAGS);
bam_reader.read_aln_batch(h_batch, H_BATCH_SIZE);
timer.stop();
printf("BAM load time: %.4fs\n", timer.seconds());
printf("Total number of alignments: %llu \n", h_batch.num_alns);
timer_all.start();
// 2. split and sort
int num_batches = 0;
uint64 num_aln_loaded = 0;
std::list<H_KVP_batch*> sorted_kvp_batches; // container for the sorted batches
bamsort_context device_context; // device data for sorting
while(num_aln_loaded < h_batch.num_alns) {
// transfer the next batch to the device
timer.start();
device_context.load_batch(h_batch, num_aln_loaded, D_BATCH_SIZE);
timer.stop();
data_time += timer.seconds();
// generate the sort keys
timer.start();
sortkey_gen(&device_context);
timer.stop();
keygen_time += timer.seconds();
// sort
timer.start();
sort(&device_context);
timer.stop();
sort_time += timer.seconds();
// save sorted batches on the host
timer.start();
H_KVP_batch* sorted_batch = new H_KVP_batch();
sorted_batch->keys.resize(device_context.sort_keys.size());
sorted_batch->ids.resize(device_context.sort_keys.size());
thrust::copy(device_context.sort_keys.begin(), device_context.sort_keys.end(), sorted_batch->keys.begin());
thrust::copy(device_context.active_read_ids.begin(), device_context.active_read_ids.end(), sorted_batch->ids.begin());
sorted_kvp_batches.push_back(sorted_batch);
timer.stop();
data_time += timer.seconds();
num_batches += 1;
num_aln_loaded += sorted_batch->ids.size();
printf("Processed %d batches and %llu reads \n", num_batches, num_aln_loaded);
}
printf("Local keygen-only time : %.4fs\n", keygen_time);
printf("Local batch sorting-only time : %.4fs\n", sort_time);
printf("Local device data allocation and transfer time : %.4fs\n", data_time);
// free device data
device_context.free_local_sort_batch();
// 3. merge
H_KVP_batch* final_result = sorted_kvp_batches.front();
if(sorted_kvp_batches.size() != 1) {
device_context.allocate_partition();
}
timer.start();
float alloc_time = 0;
while(sorted_kvp_batches.size() > 1) {
timer_alloc.start();
H_KVP_batch* out = new H_KVP_batch();
H_KVP_batch* b1 = sorted_kvp_batches.front();
sorted_kvp_batches.pop_front();
H_KVP_batch* b2 = sorted_kvp_batches.front();
sorted_kvp_batches.pop_front();
// allocate space for the merged batches
out->keys.resize(b1->keys.size() + b2->keys.size());
out->ids.resize(out->keys.size());
timer_alloc.stop();
alloc_time += timer_alloc.seconds();
// merge
merge_batches_1GPU(&device_context, b1, b2, out, merge_time, merge_data_time, merge_search_time);
sorted_kvp_batches.push_back(out);
// free batch memory
b1->free();
b2->free();
free(b1);
free(b2);
if(sorted_kvp_batches.size() == 1) { // merged down to one sequence
final_result = out;
break;
}
}
timer.stop();
printf("Device merge-only time : %.4fs\n", merge_time);
printf("Device merge data time : %.4fs\n", merge_data_time);
printf("Device merge search time : %.4fs\n", merge_search_time);
printf("Merge out alloc time : %.4fs\n", alloc_time);
printf("Total merge time : %.4fs\n", timer.seconds());
timer_all.stop();
test_sorted(*final_result);
timer.start();
//BAM_alignment_batch_SoA out_batch(h_batch.field_mask);
//h_batch.shuffle(out_batch, final_result->ids);
timer.stop();
printf("Shuffle time : %.4fs\n", timer.seconds());
// 4. write BAM output
timer.start();
HTSBAMWriter bam_writer(out_fname);
//bam_writer.write_hdr(bam_reader.header);
//bam_writer.write_aln_batch(h_batch, final_result->ids, bam_reader.header);
timer.stop();
printf("BAM write time : %.4fs\n", timer.seconds());
//timer_all.stop();
printf("Total BAMSORT time : %.4fs\n", timer_all.seconds());
}
// full load -> sort -> store (no overlapping)
// multi-GPU
void bamsort_pipeline_multigpu(const char* in_fname, const char* out_fname)
{
cuda::Timer timer, timer_all;
// 1. load BAM
timer.start();
HTSBAMReader bam_reader(in_fname);
BAM_alignment_batch_SoA h_batch(BAM_POSITIONS | BAM_REFIDS | BAM_FLAGS);
bam_reader.read_aln_batch(h_batch, H_BATCH_SIZE);
timer.stop();
printf("BAM load time: %.4fs\n", timer.seconds());
printf("Total number of alignments: %llu \n", h_batch.num_alns);
int num_dev = 0;
cudaGetDeviceCount(&num_dev);
printf("Total number of CPUs: %d\n", omp_get_num_procs());
printf("Total number of GPUs: %d\n", num_dev);
for (int i = 0; i < num_dev; i++) {
cudaDeviceProp dprop;
cudaGetDeviceProperties(&dprop, i);
printf(" %d: %s\n", i, dprop.name);
}
// 2. split and sort
//TODO: figure out # devices that is appropriate to use based on load size (when num_dev > 2)
if(h_batch.num_alns <= D_MIN_BATCH_SIZE) {
printf("Running on a single device (load size is <= minimum single device size) \n");
num_dev = 1;
}
uint64 thread_batch_size = h_batch.num_alns/num_dev;
std::vector<H_KVP_batch*> thread_sorted_batches(num_dev);
timer_all.start();
omp_set_num_threads(num_dev);
#pragma omp parallel
{
int tid = omp_get_thread_num();
cudaSetDevice(tid);
int devId;
cudaGetDevice(&devId);
printf("Thread %d device %d\n", tid, devId);
cuda::Timer ttimer;
uint64 toffset = tid * thread_batch_size;
uint64 tsize = thread_batch_size;
if(tid == num_dev-1) {
tsize = h_batch.num_alns - toffset; // remainder
}
printf("Thread %d offset %llu size %llu\n", tid, toffset, tsize);
std::list<H_KVP_batch*> sorted_kvp_batches; // host container for the sorted batches
bamsort_context device_context;
uint64 num_aln_loaded = 0;
ttimer.start();
while(num_aln_loaded < tsize) {
// transfer the next batch to the device
uint64 batch_size = D_BATCH_SIZE;
if(tsize - num_aln_loaded < D_BATCH_SIZE) {
batch_size = tsize - num_aln_loaded;
}
device_context.load_batch(h_batch, toffset + num_aln_loaded, batch_size);
sortkey_gen(&device_context);
sort(&device_context);
// save sorted batches on the host
H_KVP_batch* sorted_batch = new H_KVP_batch();
sorted_batch->keys.resize(device_context.sort_keys.size());
sorted_batch->ids.resize(device_context.sort_keys.size());
thrust::copy(device_context.sort_keys.begin(), device_context.sort_keys.end(), sorted_batch->keys.begin());
thrust::copy(device_context.active_read_ids.begin(), device_context.active_read_ids.end(), sorted_batch->ids.begin());
sorted_kvp_batches.push_back(sorted_batch);
num_aln_loaded += sorted_batch->ids.size();
printf("Thread %d processed %llu records \n", tid, (uint64) sorted_batch->ids.size());
}
ttimer.stop();
printf("Thread %d done with local sorting. Time %.4fs \n", tid, ttimer.seconds());
device_context.free_local_sort_batch();
// merge down to a single batch on each device
if(sorted_kvp_batches.size() == 1) {
thread_sorted_batches[tid] = sorted_kvp_batches.front();
} else {
device_context.allocate_partition();
}
ttimer.start();
while(sorted_kvp_batches.size() > 1) {
H_KVP_batch* out = new H_KVP_batch();
H_KVP_batch* b1 = sorted_kvp_batches.front();
sorted_kvp_batches.pop_front();
H_KVP_batch* b2 = sorted_kvp_batches.front();
sorted_kvp_batches.pop_front();
// allocate space for the merged batches
out->keys.resize(b1->keys.size() + b2->keys.size());
out->ids.resize(out->keys.size());
float t1, t2, t3;
merge_batches_1GPU(&device_context, b1, b2, out, t1, t2, t3);
sorted_kvp_batches.push_back(out);
b1->free();
b2->free();
free(b1);
free(b2);
if(sorted_kvp_batches.size() == 1) { // merged down to one sequence
thread_sorted_batches[tid] = out;
break;
}
}
ttimer.stop();
printf("Thread %d done with merging. Time %.4fs \n", tid, ttimer.seconds());
}
H_KVP_batch* final_result = new H_KVP_batch();
if(num_dev == 2) { //TODO: generalize to any number of devices
final_result->keys.resize(thread_sorted_batches[0]->keys.size() + thread_sorted_batches[1]->keys.size());
final_result->ids.resize(final_result->keys.size());
merge_batches_2GPU(thread_sorted_batches[0], thread_sorted_batches[1], final_result);
}
timer_all.stop();
printf("Total sort time : %.4fs\n", timer_all.seconds());
test_sorted(*final_result);
// 4. write BAM output
timer.start();
HTSBAMWriter bam_writer(out_fname);
//bam_writer.write_header(bam_reader.header);
//bam_writer.write_aln_batch(h_batch, final_result.ids, bam_reader.header);
timer.stop();
printf("BAM write time : %.4fs\n", timer.seconds());
}
// permute the sorted BAM file
void generate_unsorted_bam(const char* in_fname, const char* out_fname) {
try {
// load
BAMReader bam_reader(in_fname);
BAM_header hdr = bam_reader.header;
BAM_alignment_batch_raw sorted_batch;
bam_reader.read_aln_batch_raw(sorted_batch, H_BATCH_SIZE);
uint64 num_loaded = sorted_batch.offsets.size()-1;
printf("Total number of loaded alignments: %llu \n", num_loaded);
// generate permutation
std::vector<uint64> perm(num_loaded);
for (uint64 i=0; i<num_loaded; ++i) perm.push_back(i);
std::srand(0);
std::random_shuffle(perm.begin(), perm.end());
H_VectorU64 ids (num_loaded);
for (uint64 i=0; i<num_loaded; ++i) ids.push_back(perm[i]);
// write
BAMWriter bam_writer(out_fname);
bam_writer.write_header(hdr);
bam_writer.write_aln_batch_raw(sorted_batch, ids);
} catch (nvbio::runtime_error& e) {
printf("%s\n", e.what());
exit(1);
}
}
// concatenate BAM file contents multiple times
void duplicate_unsorted_bam(const char* in_fname, const char* out_fname, int num_repeats) {
try {
BAMReader bam_reader(in_fname);
BAM_header hdr = bam_reader.header;
BAM_alignment_batch_raw sorted_batch;
bam_reader.read_aln_batch_raw(sorted_batch, H_BATCH_SIZE);
uint64 num_loaded = sorted_batch.offsets.size()-1;
printf("Total number of loaded alignments: %llu \n", num_loaded);
H_VectorU64 ids (num_loaded);
thrust::sequence(ids.begin(), ids.end());
BAMWriter bam_writer(out_fname);
bam_writer.write_header(hdr);
for(int i = 0; i < num_repeats; i++) {
bam_writer.write_aln_batch_raw(sorted_batch, ids);
}
} catch (nvbio::runtime_error& e) {
printf("%s\n", e.what());
exit(1);
}
}
//TODO: overlap load with device compute
int main(int argc, char **argv)
{
if(argc < 3) {
printf("Usage: ./bamsort <bam_file> <out_file> \n");
exit(1);
}
try {
//generate_unsorted_bam(argv[1], argv[2]);
//duplicate_unsorted_bam(argv[1], argv[2], 1000);
bamsort_pipeline_basic(argv[1], argv[2]);
//bamsort_pipeline_multigpu(argv[1], argv[2]);
} catch (nvbio::runtime_error& e) {
printf("%s\n", e.what());
exit(1);
}
return 0;
}
int test_sorted(const H_KVP_batch& result)
{
// check that all the keys are in ascending order
uint64 k = 0;
for(uint64 i = 0; i < result.keys.size(); i++) {
if(k > result.keys[i]) {
printf("Failed test; out of order: %llu %llu %llu %llu %llu \n", (uint64) k, (uint64) result.keys[i], (uint64) result.keys[i-1], i, i-1);
return 0;
}
k = result.keys[i];
}
printf("Passed test! \n");
return 1;
}
|
the_stack
|
extern "C" {
#include <ccv.h>
#include <ccv_internal.h>
#include <nnc/ccv_nnc.h>
#include <nnc/ccv_nnc_easy.h>
#include <nnc/ccv_nnc_internal.h>
}
#include <nnc/gpu/ccv_nnc_compat.h>
#ifdef HAVE_CUDA
template<typename NUM>
__global__ void _ccv_nnc_roi_align_forw_nchw(const int nchw, const int ch, const int w, const int h, const int a_n, const int adim0, const int adim1, const int adim2, const NUM* const ap, const int b_n, const int bdim0, const NUM* const bp, const int pool_w, const int pool_h, const int cdim0, const int cdim1, const int cdim2, NUM* const cp)
{
const int pool_chw = ch * pool_h * pool_w;
const int pool_hw = pool_h * pool_w;
CUDA_1D_KERNEL_LOOP(i, nchw) {
const int n = i / pool_chw;
const int cxy = i % pool_chw;
const int k = cxy / pool_hw;
const int xy = cxy % pool_hw;
const int y = xy / pool_w;
const int x = xy % pool_w;
const float roi_x = bp[(n % b_n) * bdim0] * w; // These assumed it is real-coordinate, with range between 0 to w - 1.
const float roi_y = bp[(n % b_n) * bdim0 + 1] * h;
const float roi_w = bp[(n % b_n) * bdim0 + 2] * w;
const float roi_h = bp[(n % b_n) * bdim0 + 3] * h;
const int bin_h = (int)ceilf(roi_h / pool_h); // How many bins in each point of the pool. We slightly sampling at higher resolution (due to ceiling) with bilinear interpolation.
const int bin_w = (int)ceilf(roi_w / pool_w);
const int bin_pool_h = bin_h * pool_h; // Before averaging, what's the size of the region in integral term.
const int bin_pool_w = bin_w * pool_w;
const float scale_y = roi_h / bin_pool_h; // The scale to multiply back to get original coordinate.
const float scale_x = roi_w / bin_pool_w;
const int py = y * bin_h;
const int px = x * bin_w;
float v = 0;
int count = 0;
const float* const apz = ap + (n % a_n) * adim0 + k * adim1;
for (int by = 0; by < bin_h; by++)
{
const float ay = roi_y + (by + py + 0.5) * scale_y - 0.5;
const int iy = (int)floorf(ay);
if (iy + 1 < 0 || iy > h - 1)
continue;
const float ry = ay - iy;
const int iy0 = ccv_clamp(iy, 0, h - 1);
const int iy1 = ccv_clamp(iy + 1, 0, h - 1);
for (int bx = 0; bx < bin_w; bx++)
{
const float ax = roi_x + (bx + px + 0.5) * scale_x - 0.5;
const int ix = (int)floorf(ax);
if (ix + 1 < 0 || ix > w - 1)
continue;
const float rx = ax - ix;
const int ix0 = ccv_clamp(ix, 0, w - 1);
const int ix1 = ccv_clamp(ix + 1, 0, w - 1);
const float c00 = (1 - ry) * (1 - rx);
const float c01 = (1 - ry) * rx;
const float c10 = ry * (1 - rx);
const float c11 = ry * rx;
const float ap00 = apz[iy0 * adim2 + ix0];
const float ap01 = apz[iy0 * adim2 + ix1];
const float ap10 = apz[iy1 * adim2 + ix0];
const float ap11 = apz[iy1 * adim2 + ix1];
v += ap00 * c00 + ap01 * c01 + ap10 * c10 + ap11 * c11;
++count;
}
}
cp[n * cdim0 + k * cdim1 + y * cdim2 + x] = count > 0 ? v / count : 0;
}
}
template<typename NUM>
__global__ void _ccv_nnc_roi_align_forw_nhwc(const int nchw, const int ch, const int w, const int h, const int a_n, const int adim0, const int adim1, const int adim2, const NUM* const ap, const int b_n, const int bdim0, const NUM* const bp, const int pool_w, const int pool_h, const int cdim0, const int cdim1, const int cdim2, NUM* const cp)
{
const int pool_chw = ch * pool_h * pool_w;
const int pool_hw = pool_h * pool_w;
CUDA_1D_KERNEL_LOOP(i, nchw) {
const int n = i / pool_chw;
const int cxy = i % pool_chw;
const int k = cxy / pool_hw;
const int xy = cxy % pool_hw;
const int y = xy / pool_w;
const int x = xy % pool_w;
const float roi_x = bp[(n % b_n) * bdim0] * w; // These assumed it is real-coordinate, with range between 0 to w - 1.
const float roi_y = bp[(n % b_n) * bdim0 + 1] * h;
const float roi_w = bp[(n % b_n) * bdim0 + 2] * w;
const float roi_h = bp[(n % b_n) * bdim0 + 3] * h;
const int bin_h = (int)ceilf(roi_h / pool_h); // How many bins in each point of the pool. We slightly sampling at higher resolution (due to ceiling) with bilinear interpolation.
const int bin_w = (int)ceilf(roi_w / pool_w);
const int bin_pool_h = bin_h * pool_h; // Before averaging, what's the size of the region in integral term.
const int bin_pool_w = bin_w * pool_w;
const float scale_y = roi_h / bin_pool_h; // The scale to multiply back to get original coordinate.
const float scale_x = roi_w / bin_pool_w;
const int py = y * bin_h;
const int px = x * bin_w;
float v = 0;
int count = 0;
const float* const apz = ap + (n % a_n) * adim0 + k;
for (int by = 0; by < bin_h; by++)
{
const float ay = roi_y + (by + py + 0.5) * scale_y - 0.5;
const int iy = (int)floorf(ay);
if (iy + 1 < 0 || iy > h - 1)
continue;
const float ry = ay - iy;
const int iy0 = ccv_clamp(iy, 0, h - 1);
const int iy1 = ccv_clamp(iy + 1, 0, h - 1);
for (int bx = 0; bx < bin_w; bx++)
{
const float ax = roi_x + (bx + px + 0.5) * scale_x - 0.5;
const int ix = (int)floorf(ax);
if (ix + 1 < 0 || ix > w - 1)
continue;
const float rx = ax - ix;
const int ix0 = ccv_clamp(ix, 0, w - 1);
const int ix1 = ccv_clamp(ix + 1, 0, w - 1);
const float c00 = (1 - ry) * (1 - rx);
const float c01 = (1 - ry) * rx;
const float c10 = ry * (1 - rx);
const float c11 = ry * rx;
const float ap00 = apz[iy0 * adim1 + ix0 * adim2];
const float ap01 = apz[iy0 * adim1 + ix1 * adim2];
const float ap10 = apz[iy1 * adim1 + ix0 * adim2];
const float ap11 = apz[iy1 * adim1 + ix1 * adim2];
v += ap00 * c00 + ap01 * c01 + ap10 * c10 + ap11 * c11;
++count;
}
}
cp[n * cdim0 + y * cdim1 + x * cdim2 + k] = count > 0 ? v / count : 0;
}
}
static int _ccv_nnc_roi_align_forw(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
{
assert(input_size == 2);
const ccv_nnc_tensor_view_t* a = (ccv_nnc_tensor_view_t*)inputs[0];
assert(output_size == 1);
const ccv_nnc_tensor_view_t* b = (ccv_nnc_tensor_view_t*)inputs[1];
ccv_nnc_tensor_view_t* c = (ccv_nnc_tensor_view_t*)outputs[0];
const int a_nd = ccv_nnc_tensor_nd(a->info.dim);
assert(a_nd == CCV_NNC_MAX_DIM + 1 || a_nd == CCV_NNC_MAX_DIM + 2);
const int* adim = (a_nd == CCV_NNC_MAX_DIM + 1) ? a->info.dim : a->info.dim + 1;
const int c_nd = ccv_nnc_tensor_nd(c->info.dim);
assert(c_nd == CCV_NNC_MAX_DIM + 1 || c_nd == CCV_NNC_MAX_DIM + 2);
const int* cdim = (c_nd == CCV_NNC_MAX_DIM + 1) ? c->info.dim : c->info.dim + 1;
const int* ainc = CCV_IS_TENSOR_VIEW(a) ? ((a_nd == CCV_NNC_MAX_DIM + 1) ? a->inc : a->inc + 1) : adim;
const int* cinc = CCV_IS_TENSOR_VIEW(c) ? ((c_nd == CCV_NNC_MAX_DIM + 1) ? c->inc : c->inc + 1) : cdim;
const int a_n = ccv_nnc_tensor_get_n(a->info);
const int b_nd = ccv_nnc_tensor_nd(b->info.dim);
assert(b_nd == 1 || b_nd == 2);
const int b_n = b_nd == 1 ? 1 : b->info.dim[0];
const int c_n = ccv_nnc_tensor_get_n(c->info);
assert(c_n == ccv_max(a_n, b_n));
const int aninc = a_nd == CCV_NNC_MAX_DIM + 1 ? 0 : ainc[0] * ainc[1] * ainc[2];
const int* binc = CCV_IS_TENSOR_VIEW(b) ? b->inc : b->info.dim;
const int bninc = b_nd == 1 ? 0 : binc[1];
const int cninc = c_nd == CCV_NNC_MAX_DIM + 1 ? 0 : cinc[0] * cinc[1] * cinc[2];
assert(a->info.format == c->info.format);
cudaStream_t stream = ccv_nnc_stream_context_get_stream(stream_context);
if (a->info.format == CCV_TENSOR_FORMAT_NCHW)
{
const int h = adim[1];
const int w = adim[2];
const int pool_h = cdim[1];
const int pool_w = cdim[2];
assert(cdim[0] == adim[0]);
const int ch = cdim[0];
const int nchw = c_n * pool_h * pool_w * ch;
_ccv_nnc_roi_align_forw_nchw<<<CUDA_GET_BLOCKS(nchw), CUDA_NUM_THREADS, 0, stream>>>(nchw, ch, w, h, a_n, aninc, ainc[1] * ainc[2], ainc[2], a->data.f32, b_n, bninc, b->data.f32, pool_w, pool_h, cninc, cinc[1] * cinc[2], cinc[2], c->data.f32);
} else {
assert(a->info.format == CCV_TENSOR_FORMAT_NHWC);
const int h = adim[0];
const int w = adim[1];
const int pool_h = cdim[0];
const int pool_w = cdim[1];
assert(cdim[2] == adim[2]);
const int ch = cdim[2];
const int nchw = c_n * pool_h * pool_w * ch;
_ccv_nnc_roi_align_forw_nhwc<<<CUDA_GET_BLOCKS(nchw), CUDA_NUM_THREADS, 0, stream>>>(nchw, ch, w, h, a_n, aninc, ainc[1] * ainc[2], ainc[2], a->data.f32, b_n, bninc, b->data.f32, pool_w, pool_h, cninc, cinc[1] * cinc[2], cinc[2], c->data.f32);
}
return CCV_NNC_EXEC_SUCCESS;
}
template<typename NUM>
__global__ void _ccv_nnc_zero_back(const size_t tensor_count, NUM* const a)
{
CUDA_1D_KERNEL_LOOP(i, tensor_count) {
a[i] = 0;
}
}
template<typename NUM>
__global__ void _ccv_nnc_roi_align_back_nchw(const int nchw, const int ch, const int w, const int h, const int o_n, const int odim0, const int odim1, const int odim2, NUM* const op, const int b_n, const int bdim0, const NUM* const bp, const int pool_w, const int pool_h, const int gdim0, const int gdim1, const int gdim2, const NUM* const gp)
{
const int pool_chw = ch * pool_h * pool_w;
const int pool_hw = pool_h * pool_w;
CUDA_1D_KERNEL_LOOP(i, nchw) {
const int n = i / pool_chw;
const int cxy = i % pool_chw;
const int k = cxy / pool_hw;
const int xy = cxy % pool_hw;
const int y = xy / pool_w;
const int x = xy % pool_w;
const float roi_x = bp[(n % b_n) * bdim0] * w; // These assumed it is real-coordinate, with range between 0 to w - 1.
const float roi_y = bp[(n % b_n) * bdim0 + 1] * h;
const float roi_w = bp[(n % b_n) * bdim0 + 2] * w;
const float roi_h = bp[(n % b_n) * bdim0 + 3] * h;
const int bin_h = (int)ceilf(roi_h / pool_h); // How many bins in each point of the pool. We slightly sampling at higher resolution (due to ceiling) with bilinear interpolation.
const int bin_w = (int)ceilf(roi_w / pool_w);
const int bin_pool_h = bin_h * pool_h; // Before averaging, what's the size of the region in integral term.
const int bin_pool_w = bin_w * pool_w;
const float scale_y = roi_h / bin_pool_h; // The scale to multiply back to get original coordinate.
const float scale_x = roi_w / bin_pool_w;
const int py = y * bin_h;
const int px = x * bin_w;
float* const opz = op + (n % o_n) * odim0 + k * odim1;
// Need to be careful about float-point accuracy. For both min / max, we started with edge case:
// what if I am at h already for iy? In that case, if we exceed even a little, we tip over, hence, floor.
// What if I am at -1 already for iy? In that case, if we exceed even a little, we tip over, hence, ceil.
// Notice that for upper limit, I use h? If I use h - 1, it is OK to tip over, because we floor(ay), hence,
// the extra will be trimmed. Only if I am so close to h, tip over is not acceptable.
const int bin_h_at_y = ccv_min(bin_h - 1, (int)floorf((h + 0.5 - roi_y) / scale_y - 0.5 - py)) - ccv_max(0, (int)ceilf((-0.5 - roi_y) / scale_y - 0.5 - py)) + 1;
const int bin_w_at_x = ccv_min(bin_w - 1, (int)floorf((w + 0.5 - roi_x) / scale_x - 0.5 - px)) - ccv_max(0, (int)ceilf((-0.5 - roi_x) / scale_x - 0.5 - px)) + 1;
const int count = ccv_max(0, bin_h_at_y) * ccv_max(0, bin_w_at_x);
const float v = count > 0 ? gp[n * gdim0 + k * gdim1 + y * gdim2 + x] / count : 0;
for (int by = 0; by < bin_h; by++)
{
const float ay = roi_y + (by + py + 0.5) * scale_y - 0.5;
const int iy = (int)floorf(ay);
if (iy + 1 < 0 || iy > h - 1)
continue;
const float ry = ay - iy;
const int iy0 = ccv_clamp(iy, 0, h - 1);
const int iy1 = ccv_clamp(iy + 1, 0, h - 1);
for (int bx = 0; bx < bin_w; bx++)
{
const float ax = roi_x + (bx + px + 0.5) * scale_x - 0.5;
const int ix = (int)floorf(ax);
if (ix + 1 < 0 || ix > w - 1)
continue;
const float rx = ax - ix;
const int ix0 = ccv_clamp(ix, 0, w - 1);
const int ix1 = ccv_clamp(ix + 1, 0, w - 1);
const float c00 = (1 - ry) * (1 - rx);
const float c01 = (1 - ry) * rx;
const float c10 = ry * (1 - rx);
const float c11 = ry * rx;
atomicAdd(&opz[iy0 * odim2 + ix0], (NUM)(v * c00));
atomicAdd(&opz[iy0 * odim2 + ix1], (NUM)(v * c01));
atomicAdd(&opz[iy1 * odim2 + ix0], (NUM)(v * c10));
atomicAdd(&opz[iy1 * odim2 + ix1], (NUM)(v * c11));
}
}
}
}
template<typename NUM>
__global__ void _ccv_nnc_roi_align_back_nhwc(const int nchw, const int ch, const int w, const int h, const int o_n, const int odim0, const int odim1, const int odim2, NUM* const op, const int b_n, const int bdim0, const NUM* const bp, const int pool_w, const int pool_h, const int gdim0, const int gdim1, const int gdim2, const NUM* const gp)
{
const int pool_chw = ch * pool_h * pool_w;
const int pool_hw = pool_h * pool_w;
CUDA_1D_KERNEL_LOOP(i, nchw) {
const int n = i / pool_chw;
const int cxy = i % pool_chw;
const int k = cxy / pool_hw;
const int xy = cxy % pool_hw;
const int y = xy / pool_w;
const int x = xy % pool_w;
const float roi_x = bp[(n % b_n) * bdim0] * w; // These assumed it is real-coordinate, with range between 0 to w - 1.
const float roi_y = bp[(n % b_n) * bdim0 + 1] * h;
const float roi_w = bp[(n % b_n) * bdim0 + 2] * w;
const float roi_h = bp[(n % b_n) * bdim0 + 3] * h;
const int bin_h = (int)ceilf(roi_h / pool_h); // How many bins in each point of the pool. We slightly sampling at higher resolution (due to ceiling) with bilinear interpolation.
const int bin_w = (int)ceilf(roi_w / pool_w);
const int bin_pool_h = bin_h * pool_h; // Before averaging, what's the size of the region in integral term.
const int bin_pool_w = bin_w * pool_w;
const float scale_y = roi_h / bin_pool_h; // The scale to multiply back to get original coordinate.
const float scale_x = roi_w / bin_pool_w;
const int py = y * bin_h;
const int px = x * bin_w;
float* const opz = op + (n % o_n) * odim0 + k;
const int bin_h_at_y = ccv_min(bin_h - 1, (int)floorf((h + 0.5 - roi_y) / scale_y - 0.5 - py)) - ccv_max(0, (int)ceilf((-0.5 - roi_y) / scale_y - 0.5 - py)) + 1;
const int bin_w_at_x = ccv_min(bin_w - 1, (int)floorf((w + 0.5 - roi_x) / scale_x - 0.5 - px)) - ccv_max(0, (int)ceilf((-0.5 - roi_x) / scale_x - 0.5 - px)) + 1;
const int count = ccv_max(0, bin_h_at_y) * ccv_max(0, bin_w_at_x);
const float v = count > 0 ? gp[n * gdim0 + y * gdim1 + x * gdim2 + k] / count : 0;
for (int by = 0; by < bin_h; by++)
{
const float ay = roi_y + (by + py + 0.5) * scale_y - 0.5;
const int iy = (int)floorf(ay);
if (iy + 1 < 0 || iy > h - 1)
continue;
const float ry = ay - iy;
const int iy0 = ccv_clamp(iy, 0, h - 1);
const int iy1 = ccv_clamp(iy + 1, 0, h - 1);
for (int bx = 0; bx < bin_w; bx++)
{
const float ax = roi_x + (bx + px + 0.5) * scale_x - 0.5;
const int ix = (int)floorf(ax);
if (ix + 1 < 0 || ix > w - 1)
continue;
const float rx = ax - ix;
const int ix0 = ccv_clamp(ix, 0, w - 1);
const int ix1 = ccv_clamp(ix + 1, 0, w - 1);
const float c00 = (1 - ry) * (1 - rx);
const float c01 = (1 - ry) * rx;
const float c10 = ry * (1 - rx);
const float c11 = ry * rx;
atomicAdd(&opz[iy0 * odim1 + ix0 * odim2], (NUM)(v * c00));
atomicAdd(&opz[iy0 * odim1 + ix1 * odim2], (NUM)(v * c01));
atomicAdd(&opz[iy1 * odim1 + ix0 * odim2], (NUM)(v * c10));
atomicAdd(&opz[iy1 * odim1 + ix1 * odim2], (NUM)(v * c11));
}
}
}
}
static int _ccv_nnc_roi_align_back(const ccv_nnc_cmd_t cmd, const ccv_nnc_hint_t hint, const int flags, ccv_nnc_tensor_t* const* const inputs, const int input_size, ccv_nnc_tensor_t* const* const outputs, const int output_size, ccv_nnc_stream_context_t* const stream_context)
{
assert(input_size >= 3);
const ccv_nnc_tensor_view_t* g = (ccv_nnc_tensor_view_t*)inputs[0];
assert(output_size == 1);
ccv_nnc_tensor_view_t* o = (ccv_nnc_tensor_view_t*)outputs[0];
const int g_nd = ccv_nnc_tensor_nd(g->info.dim);
assert(g_nd == CCV_NNC_MAX_DIM + 1 || g_nd == CCV_NNC_MAX_DIM + 2);
const int* gdim = (g_nd == CCV_NNC_MAX_DIM + 1) ? g->info.dim : g->info.dim + 1;
const int o_nd = ccv_nnc_tensor_nd(o->info.dim);
assert(o_nd == CCV_NNC_MAX_DIM + 1 || o_nd == CCV_NNC_MAX_DIM + 2);
const int* odim = (o_nd == CCV_NNC_MAX_DIM + 1) ? o->info.dim : o->info.dim + 1;
const int* ginc = CCV_IS_TENSOR_VIEW(g) ? ((g_nd == CCV_NNC_MAX_DIM + 1) ? g->inc : g->inc + 1) : gdim;
const int* oinc = CCV_IS_TENSOR_VIEW(o) ? ((o_nd == CCV_NNC_MAX_DIM + 1) ? o->inc : o->inc + 1) : odim;
const ccv_nnc_tensor_view_t* b = (ccv_nnc_tensor_view_t*)inputs[2];
const int o_n = ccv_nnc_tensor_get_n(o->info);
const int b_nd = ccv_nnc_tensor_nd(b->info.dim);
assert(b_nd == 1 || b_nd == 2);
const int b_n = b_nd == 1 ? 1 : b->info.dim[0];
const int g_n = ccv_nnc_tensor_get_n(g->info);
assert(g_n == ccv_max(o_n, b_n));
const int oninc = o_nd == CCV_NNC_MAX_DIM + 1 ? 0 : oinc[0] * oinc[1] * oinc[2];
const int* binc = CCV_IS_TENSOR_VIEW(b) ? b->inc : b->info.dim;
const int bninc = b_nd == 1 ? 0 : binc[1];
const int gninc = g_nd == CCV_NNC_MAX_DIM + 1 ? 0 : ginc[0] * ginc[1] * ginc[2];
assert(g->info.format == o->info.format);
cudaStream_t stream = ccv_nnc_stream_context_get_stream(stream_context);
const size_t o_tensor_count = ccv_nnc_tensor_count(o->info);
_ccv_nnc_zero_back<<<CUDA_GET_BLOCKS(o_tensor_count), CUDA_NUM_THREADS, 0, stream>>>(o_tensor_count, o->data.f32);
if (o->info.format == CCV_TENSOR_FORMAT_NCHW)
{
const int h = odim[1];
const int w = odim[2];
const int pool_h = gdim[1];
const int pool_w = gdim[2];
assert(gdim[0] == odim[0]);
const int ch = gdim[0];
const int nchw = g_n * pool_h * pool_w * ch;
_ccv_nnc_roi_align_back_nchw<<<CUDA_GET_BLOCKS(nchw), CUDA_NUM_THREADS, 0, stream>>>(nchw, ch, w, h, o_n, oninc, oinc[1] * oinc[2], oinc[2], o->data.f32, b_n, bninc, b->data.f32, pool_w, pool_h, gninc, ginc[1] * ginc[2], ginc[2], g->data.f32);
} else {
assert(o->info.format == CCV_TENSOR_FORMAT_NHWC);
const int h = odim[0];
const int w = odim[1];
const int pool_h = gdim[0];
const int pool_w = gdim[1];
assert(gdim[2] == odim[2]);
const int ch = gdim[2];
const int nchw = g_n * pool_h * pool_w * ch;
_ccv_nnc_roi_align_back_nhwc<<<CUDA_GET_BLOCKS(nchw), CUDA_NUM_THREADS, 0, stream>>>(nchw, ch, w, h, o_n, oninc, oinc[1] * oinc[2], oinc[2], o->data.f32, b_n, bninc, b->data.f32, pool_w, pool_h, gninc, ginc[1] * ginc[2], ginc[2], g->data.f32);
}
return CCV_NNC_EXEC_SUCCESS;
}
#endif
REGISTER_COMMAND_BACKEND(CCV_NNC_ROI_ALIGN_FORWARD, CCV_NNC_BACKEND_GPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry)
{
#ifdef HAVE_CUDA
registry->tensor_formats = CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_NHWC;
registry->tensor_datatypes = CCV_32F;
registry->tensor_memory = CCV_TENSOR_GPU_MEMORY;
registry->algorithms = 1;
registry->exec = _ccv_nnc_roi_align_forw;
#endif
}
REGISTER_COMMAND_BACKEND(CCV_NNC_ROI_ALIGN_BACKWARD, CCV_NNC_BACKEND_GPU_REF)(ccv_nnc_cmd_backend_registry_t* const registry)
{
#ifdef HAVE_CUDA
registry->tensor_formats = CCV_TENSOR_FORMAT_NCHW | CCV_TENSOR_FORMAT_NHWC;
registry->tensor_datatypes = CCV_32F; // Currently only support CCV_32F because atomicAdd only supports __half at sm_70. I will revisit this by either get rid of atomicAdd or deprecate support for Jetson Nano / TX2.
registry->tensor_memory = CCV_TENSOR_GPU_MEMORY;
registry->algorithms = 1;
registry->exec = _ccv_nnc_roi_align_back;
#endif
}
|
the_stack
|
#include <collectives/ib_comm.hpp>
#include <iostream>
#include <sstream>
#include <utils.cuh>
#include <utils.hpp>
namespace HugeCTR {
static void* proxy_thread_func(void* cfg) {
auto ibv_config = (struct IbvProxy::InitConfig*)cfg;
// set numa allocation policy to local
set_mempolicy(MPOL_LOCAL, NULL, 0);
CudaCPUDeviceContext context(ibv_config->device_id_);
IbvProxy* proxy = new IbvProxy(ibv_config);
while (*(volatile int*)&proxy->destroy_ != 1) {
proxy->stm();
}
delete (proxy);
return NULL;
}
// Helpers
void IbComm::detect_ib_devs() {
// Init hwloc topology
hwloc_topology_init(&topo_);
hwloc_topology_set_io_types_filter(topo_, HWLOC_TYPE_FILTER_KEEP_ALL);
hwloc_topology_load(topo_);
ibv_device** dev_list;
int num_devices;
dev_list = ibv_get_device_list(&num_devices);
if ((!dev_list) || (num_devices == 0)) {
HCTR_LOG_S(ERROR, WORLD) << "Ibv get device list failed: " << num_devices << std::endl;
exit(-1);
}
// Get hwloc devices and final ib devs
for (int d = 0; d < num_devices; d++) {
if ((dev_list[d]->node_type != IBV_NODE_RNIC) && (dev_list[d]->node_type != IBV_NODE_CA)) {
continue;
}
const char* dev_name = ibv_get_device_name(dev_list[d]);
if (!dev_name) {
HCTR_LOG_S(ERROR, WORLD) << "Unable to get device name" << std::endl;
exit(-1);
}
ibv_context* context;
context = ibv_open_device(dev_list[d]);
if (!context) {
continue;
}
struct ibv_device_attr dev_attr;
memset(&dev_attr, 0, sizeof(dev_attr));
if (ibv_query_device(context, &dev_attr) != 0) {
HCTR_LOG_S(ERROR, WORLD) << "Unable to query device " << dev_name << std::endl;
exit(-1);
}
for (int port = 1; port <= dev_attr.phys_port_cnt; port++) {
struct ibv_port_attr port_attr;
if (ibv_query_port(context, port, &port_attr) != 0) {
HCTR_LOG_S(WARNING, WORLD)
<< "Unable to query port " << dev_name << ":" << port << std::endl;
continue;
}
if (port_attr.state != IBV_PORT_ACTIVE) continue;
if (port_attr.link_layer != IBV_LINK_LAYER_INFINIBAND) continue;
// TODO: Check against user specified device list.
ib_dev_list_.emplace_back();
ib_dev_list_.back().dev_name = dev_name;
ib_dev_list_.back().dev_port_id = port;
ib_dev_list_.back().hwloc_obj = hwloc_ibv_get_device_osdev(topo_, dev_list[d]);
if (!ib_dev_list_.back().hwloc_obj) {
HCTR_LOG_S(ERROR, WORLD) << "unable to get hwloc obj for ib device " << dev_name
<< std::endl;
exit(1);
}
}
ibv_close_device(context);
}
ibv_free_device_list(dev_list);
}
void IbComm::print_obj(size_t my_rank, std::string obj_name, hwloc_obj_t obj) {
if (my_rank != 0) return;
if (!obj) {
HCTR_LOG_S(INFO, WORLD) << obj_name << ":NULL" << std::endl;
return;
}
if (obj->type == HWLOC_OBJ_PCI_DEVICE) {
HCTR_LOG_S(INFO, WORLD) << obj_name << ":PCIeDevice " << obj->gp_index << " " << obj << " "
<< obj->depth << " " << obj->attr->pcidev.dev << std::endl;
} else if (obj->type == HWLOC_OBJ_OS_DEVICE) {
HCTR_LOG_S(INFO, WORLD) << obj_name << ":OSdev " << obj->gp_index << " " << obj << " "
<< obj->depth << " " << obj->name << " " << obj->attr->osdev.type
<< std::endl;
} else if (obj->type == HWLOC_OBJ_BRIDGE) {
HCTR_LOG_S(INFO, WORLD) << obj_name << ":PCIeBridge " << obj->gp_index << " " << obj << " "
<< obj->depth << std::endl;
} else {
HCTR_LOG_S(INFO, WORLD) << obj_name << ":Unknown " << obj->gp_index << " " << obj << " "
<< obj->depth << std::endl;
}
}
size_t IbComm::calculate_pcie_hier_distance(size_t my_rank, hwloc_obj_t obj1, hwloc_obj_t obj2) {
size_t distance = 0;
auto is_bridge = [](hwloc_obj_t obj) { return obj && (obj->type == HWLOC_OBJ_BRIDGE); };
auto are_bridges = [is_bridge](hwloc_obj_t obj1, hwloc_obj_t obj2) {
return is_bridge(obj1) && is_bridge(obj2);
};
while (!is_bridge(obj1)) {
obj1 = obj1->parent;
}
while (!is_bridge(obj2)) {
obj2 = obj2->parent;
}
while (are_bridges(obj1, obj2) && (obj1 != obj2)) {
while (are_bridges(obj1, obj2) && (obj1->attr->bridge.depth > obj2->attr->bridge.depth)) {
obj1 = obj1->parent;
distance++;
}
while (are_bridges(obj1, obj2) && (obj2->attr->bridge.depth > obj1->attr->bridge.depth)) {
obj2 = obj2->parent;
distance++;
}
if (are_bridges(obj1, obj2) && (obj1 != obj2)) {
obj1 = obj1->parent;
obj2 = obj2->parent;
distance += 2;
}
}
if (obj1 != obj2) { // No common PCIe ancestor found. Must be SYS.
distance = std::numeric_limits<size_t>::max();
}
return distance;
}
void IbComm::print_distance_matrix(size_t my_rank, std::vector<std::vector<size_t>>& gpu_nic_dist) {
// Print distance matrix
if (my_rank == 0) {
{
auto log = HCTR_LOG_S(INFO, WORLD);
for (size_t n = 0; n < ib_dev_list_.size(); n++) {
log << std::setfill(' ') << std::setw(24) << ib_dev_list_[n].dev_name;
}
log << std::endl;
}
{
auto log = HCTR_LOG_S(INFO, WORLD);
for (size_t g = 0; g < num_gpus_; g++) {
for (size_t n = 0; n < ib_dev_list_.size(); n++) {
log << std::setfill(' ') << std::setw(24) << gpu_nic_dist[g][n];
}
log << std::endl;
}
}
}
}
void IbComm::calculate_gpu_nic_affinity() {
// get hwloc GPU objs
std::vector<hwloc_obj_t> gpu_list;
for (auto& g : device_list_) {
auto gpu_obj = hwloc_cudart_get_device_osdev_by_index(topo_, g);
if (!gpu_obj) {
HCTR_LOG_S(ERROR, WORLD) << "unable to get hwloc obj for cuda device " << g << std::endl;
exit(1);
}
gpu_list.push_back(gpu_obj);
}
// Find GPU-NIC distances
std::vector<std::vector<size_t>> gpu_nic_dist(num_gpus_);
for (size_t g = 0; g < num_gpus_; g++) {
gpu_nic_dist[g].resize(ib_dev_list_.size());
for (size_t n = 0; n < ib_dev_list_.size(); n++) {
hwloc_obj_t gpu_obj = gpu_list[g];
gpu_nic_dist[g][n] =
calculate_pcie_hier_distance(my_proc_, gpu_obj, ib_dev_list_[n].hwloc_obj);
}
}
// print_distance_matrix(my_proc_, gpu_nic_dist);
// Calculate affinities. Only supports at max one NIC per GPU
// If we need to support more than one NIC per GPU in future, we can replicate the gpu devs.
size_t max_nics = ib_dev_list_.size();
gpu_nic_affinity_.resize(num_gpus_, max_nics);
if (num_gpus_ >= ib_dev_list_.size()) {
size_t current_nic = 0;
for (size_t assigned_gpus = 0; assigned_gpus < num_gpus_; assigned_gpus++) {
// Greedy algorithm
// Find unassigned gpu with min distance
size_t min_distance = std::numeric_limits<size_t>::max();
size_t min_gpu = 0;
for (size_t g = 0; g < num_gpus_; g++) {
if ((gpu_nic_affinity_[g] == max_nics) && (gpu_nic_dist[g][current_nic] <= min_distance)) {
min_distance = gpu_nic_dist[g][current_nic];
min_gpu = g;
}
}
gpu_nic_affinity_[min_gpu] = current_nic;
current_nic = (current_nic + 1) % ib_dev_list_.size();
}
} else {
// still assigns max one NIC per GPU. Just iterate over NICs instead
for (size_t g = 0; g < num_gpus_; g++) {
size_t min_distance = std::numeric_limits<size_t>::max();
size_t min_nic = 0;
for (size_t n = 0; n < ib_dev_list_.size(); n++) {
if ((ib_dev_list_[n].num_gpus_assigned == 0) && (gpu_nic_dist[g][n] <= min_distance)) {
min_distance = gpu_nic_dist[g][n];
min_nic = n;
}
}
gpu_nic_affinity_[g] = min_nic;
}
}
// Print gpu nic affinities that are picked;
if (my_proc_ == 0) {
for (size_t g = 0; g < num_gpus_; g++) {
const auto& ib_dev = ib_dev_list_[gpu_nic_affinity_[g]];
HCTR_LOG_S(INFO, ROOT) << "GPU-NIC affinity " << g << "-" << ib_dev.dev_name << ":"
<< ib_dev.dev_port_id << std::endl;
}
}
// Check gpu nic affinities of other nodes and warn if mismatch
char(**gpu_nic_affinity_names)[IBV_SYSFS_NAME_MAX];
gpu_nic_affinity_names =
(char(**)[IBV_SYSFS_NAME_MAX])malloc(sizeof(char(*)[IBV_SYSFS_NAME_MAX]) * num_procs_);
for (size_t r = 0; r < num_procs_; r++) {
gpu_nic_affinity_names[r] =
(char(*)[IBV_SYSFS_NAME_MAX])malloc(sizeof(char[IBV_SYSFS_NAME_MAX]) * num_gpus_);
}
for (size_t g = 0; g < num_gpus_; g++) {
auto ib_dev = ib_dev_list_[gpu_nic_affinity_[g]];
std::ostringstream os;
os << ib_dev.dev_name << ":" << ib_dev.dev_port_id;
std::string ib_name = os.str();
ib_name = ib_name.substr(0, IBV_SYSFS_NAME_MAX);
std::strcpy(gpu_nic_affinity_names[my_proc_][g], ib_name.c_str());
}
for (size_t r = 0; r < num_procs_; r++) {
HCTR_MPI_THROW(MPI_Bcast(gpu_nic_affinity_names[r],
num_gpus_ * sizeof(char[IBV_SYSFS_NAME_MAX]), MPI_BYTE, r,
MPI_COMM_WORLD));
}
for (size_t r = 0; r < num_procs_; r++) {
for (size_t g = 0; g < num_gpus_; g++) {
std::string my_ib_name = std::string(gpu_nic_affinity_names[my_proc_][g]);
std::string remote_ib_name = std::string(gpu_nic_affinity_names[r][g]);
if (my_ib_name != remote_ib_name) {
HCTR_LOG_S(WARNING, WORLD)
<< "Mismatch in mellanox dev names. " << g << " " << my_proc_ << ":" << my_ib_name
<< " " << r << ":" << remote_ib_name << std::endl;
HCTR_LOG_S(WARNING, WORLD)
<< "Non uniform cluster detected. Performance maybe impacted" << std::endl;
}
}
}
for (size_t r = 0; r < num_procs_; r++) {
free(gpu_nic_affinity_names[r]);
}
free(gpu_nic_affinity_names);
HCTR_MPI_THROW(MPI_Barrier(MPI_COMM_WORLD));
}
void IbComm::init_proxy_threads() {
proxy_cmd_ = std::make_unique<ProxyCommand>(num_gpus_);
proxy_cmd_->reset();
proxy_thread_.resize(num_gpus_);
proxy_cfg_.resize(num_gpus_);
for (auto& cfg : proxy_cfg_) {
cfg = std::make_unique<IbvProxy::InitConfig>();
}
for (size_t g = 0; g < num_gpus_; g++) {
size_t device_id = device_list_[g];
auto& cfg = proxy_cfg_[g];
cfg->device_id_ = device_id;
cfg->global_id_ = my_proc_;
cfg->proxy_id_ = g;
cfg->ib_dev_ = ib_dev_list_[gpu_nic_affinity_[g]].dev_name;
cfg->ib_port_ = ib_dev_list_[gpu_nic_affinity_[g]].dev_port_id;
cfg->proxy_cmd_ = proxy_cmd_.get();
cfg->num_gpus_ = num_gpus_;
cfg->num_procs_ = num_procs_;
cfg->my_proc_ = my_proc_;
sched_param param;
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
pthread_attr_getschedparam(&attr, ¶m);
param.sched_priority = sched_get_priority_max(SCHED_FIFO);
;
pthread_attr_setschedparam(&attr, ¶m);
int ret = pthread_create(&proxy_thread_[g], &attr, &proxy_thread_func, cfg.get());
PROXY_ASSERT(ret == 0);
}
}
// API implementation
int IbComm::init(size_t num_procs, size_t num_gpus, size_t my_proc,
const std::vector<int>& device_list) {
num_procs_ = num_procs;
num_gpus_ = num_gpus;
my_proc_ = my_proc;
device_list_ = device_list;
PROXY_ASSERT(num_procs > 1);
detect_ib_devs();
calculate_gpu_nic_affinity();
init_proxy_threads();
is_initialized_ = true;
return 0;
}
IbComm::HierA2ACollContext::HierA2ACollContext(IbComm* comm) {
HCTR_LIB_THROW(cudaMallocHost(&cmd_storage_, 2 * sizeof(size_t)));
h_recv_cmd_ptr_ = &cmd_storage_[0];
*h_recv_cmd_ptr_ = 1;
size_t num_gpus = comm->num_gpus_;
std::generate_n(std::back_inserter(ctx_), num_gpus,
[] { return std::make_unique<HierA2ACollContextPerGPU>(); });
d_send_cmd_ = new size_t*[num_gpus];
d_ibv_atomic_ = new size_t*[num_gpus];
d_ibv_atomic_recv_ = new size_t*[num_gpus];
for (size_t g = 0; g < num_gpus; g++) {
HCTR_LIB_THROW(cudaSetDevice(comm->device_list_[g]));
HCTR_LIB_THROW(cudaEventCreate(&ctx_[g]->event_));
// TODO: collate all storage
HCTR_LIB_THROW(cudaMalloc((void**)&d_send_cmd_[g], sizeof(size_t)));
size_t init_value = 2;
HCTR_LIB_THROW(cudaMemcpy(d_send_cmd_[g], &init_value, sizeof(size_t), cudaMemcpyHostToDevice));
HCTR_LIB_THROW(cudaMalloc((void**)&d_ibv_atomic_[g], MAX_IBV_DEST * sizeof(size_t)));
size_t atomic_init_values[MAX_IBV_DEST];
std::fill_n(atomic_init_values, MAX_IBV_DEST, 1);
HCTR_LIB_THROW(cudaMemcpy(d_ibv_atomic_[g], atomic_init_values, MAX_IBV_DEST * sizeof(size_t),
cudaMemcpyHostToDevice));
HCTR_LIB_THROW(cudaMalloc((void**)&d_ibv_atomic_recv_[g], MAX_IBV_DEST * sizeof(size_t)));
std::fill_n(atomic_init_values, MAX_IBV_DEST, 0);
HCTR_LIB_THROW(cudaMemcpy(d_ibv_atomic_recv_[g], atomic_init_values,
MAX_IBV_DEST * sizeof(size_t), cudaMemcpyHostToDevice));
}
barrier_ = std::make_unique<GPUBarrier>(comm->num_gpus_, comm->device_list_);
sync_helper_ = std::make_unique<CollSyncHelper>();
}
IbComm::HierA2ACollContext::~HierA2ACollContext() {
size_t num_gpus = ctx_.size();
if (d_ibv_atomic_recv_) {
for (size_t g = 0; g < num_gpus; g++) {
cudaFree(d_ibv_atomic_recv_[g]);
}
delete d_ibv_atomic_recv_;
}
if (d_ibv_atomic_) {
for (size_t g = 0; g < num_gpus; g++) {
cudaFree(d_ibv_atomic_[g]);
}
delete d_ibv_atomic_;
}
if (d_send_cmd_) {
for (size_t g = 0; g < num_gpus; g++) {
cudaFree(d_send_cmd_[g]);
}
delete d_send_cmd_;
}
if (cmd_storage_) {
cudaFree(cmd_storage_);
}
}
IbComm::HierA2ACollContextPerGPU::~HierA2ACollContextPerGPU() {
if (d_send_ptrs_) {
free(d_send_ptrs_);
}
if (d_recv_ptrs_) {
free(d_recv_ptrs_);
}
if (d_send_sizes_copy_) {
cudaFree(d_send_sizes_copy_);
}
}
// TODO: Initialize these in the constructor for RAI
HierA2ACollHandle IbComm::register_hier_a2a_coll(bool skip_barrier) {
// std::unique_lock<std::mutex> lock(proxy_cmd_->mutex_);
hier_a2a_coll_ctx_.emplace_back(std::make_unique<HierA2ACollContext>(this));
HierA2ACollHandle coll_handle = (HierA2ACollHandle)(hier_a2a_coll_ctx_.size() - 1);
auto sync_helper = hier_a2a_v_coll_ctx_[coll_handle]->sync_helper_.get();
M2PHierA2ACollInit coll_init_cmd_(coll_handle, sync_helper, skip_barrier);
for (size_t g = 0; g < num_gpus_; g++) {
M2PHierA2ACollInit coll_init_cmd_(coll_handle, sync_helper, skip_barrier);
HierA2ACollInitCmd cmd = std::make_pair(std::move(coll_init_cmd_), std::move(P2MNull()));
proxy_cmd_->cmd_[g] = std::move(cmd);
}
proxy_cmd_->post_command();
proxy_cmd_->wait_for_completion();
proxy_cmd_->reset();
return coll_handle;
}
HierA2AvCollHandle IbComm::register_hier_a2a_v_coll(bool skip_barrier) {
// std::unique_lock<std::mutex> lock(proxy_cmd_->mutex_);
hier_a2a_v_coll_ctx_.emplace_back(std::make_unique<HierA2ACollContext>(this));
HierA2AvCollHandle coll_handle = (HierA2AvCollHandle)(hier_a2a_v_coll_ctx_.size() - 1);
auto sync_helper = hier_a2a_v_coll_ctx_[coll_handle]->sync_helper_.get();
for (size_t g = 0; g < num_gpus_; g++) {
M2PHierA2AvCollInit coll_init_cmd_(coll_handle, sync_helper, skip_barrier);
HierA2AvCollInitCmd cmd = std::make_pair(std::move(coll_init_cmd_), std::move(P2MNull()));
proxy_cmd_->cmd_[g] = std::move(cmd);
}
proxy_cmd_->post_command();
proxy_cmd_->wait_for_completion();
proxy_cmd_->reset();
return coll_handle;
}
void IbComm::set_a2a_coll_stream(HierA2ACollHandle coll, cudaStream_t stream, size_t device_id) {
hier_a2a_coll_ctx_[coll]->ctx_[device_id]->stream_ = stream;
}
void IbComm::set_a2a_coll_stream(HierA2AvCollHandle coll, cudaStream_t stream, size_t device_id) {
hier_a2a_v_coll_ctx_[coll]->ctx_[device_id]->stream_ = stream;
}
void IbComm::set_a2a_coll_buf(HierA2ACollHandle coll, void** send_ptrs, const size_t* send_max_size,
void** recv_ptrs, const size_t* recv_max_size, size_t device_id) {
auto& coll_ctx = *hier_a2a_coll_ctx_[coll];
if (proxy_cmd_->cmd_[device_id].which() != 0) {
HCTR_LOG_S(ERROR, WORLD) << "Proxy command is already populated. Don't mix up set API. "
<< HCTR_LOCATION() << std::endl;
exit(1);
}
proxy_cmd_->cmd_[device_id] = HierA2ABufInitCmd();
HierA2ABufInitCmd& cmd = boost::get<HierA2ABufInitCmd>(proxy_cmd_->cmd_[device_id]);
M2PHierA2ABufInit& buf_init = std::get<0>(cmd);
auto& gpu_ctx = *coll_ctx.ctx_[device_id];
gpu_ctx.d_send_ptrs_ = (void**)malloc(sizeof(void*) * num_procs_);
gpu_ctx.d_recv_ptrs_ = (void**)malloc(sizeof(void*) * num_procs_);
memcpy(gpu_ctx.d_send_ptrs_, send_ptrs, sizeof(void*) * num_procs_);
memcpy(gpu_ctx.d_recv_ptrs_, recv_ptrs, sizeof(void*) * num_procs_);
buf_init.coll_handle_ = coll;
buf_init.d_send_ptrs_ = send_ptrs;
buf_init.d_recv_ptrs_ = recv_ptrs;
buf_init.h_max_send_size_ = send_max_size;
buf_init.h_max_recv_size_ = recv_max_size;
buf_init.h_recv_cmd_ptr_ = coll_ctx.h_recv_cmd_ptr_;
buf_init.d_ibv_atomic_ = coll_ctx.d_ibv_atomic_[device_id];
buf_init.d_ibv_atomic_recv_ = coll_ctx.d_ibv_atomic_recv_[device_id];
}
void IbComm::set_a2a_coll_buf(HierA2AvCollHandle coll, void* send_ptrs, const size_t send_max_size,
void* recv_ptrs, const size_t recv_max_size, size_t device_id) {
auto& coll_ctx = *hier_a2a_v_coll_ctx_[coll];
if (proxy_cmd_->cmd_[device_id].which() != 0) {
HCTR_LOG_S(ERROR, WORLD) << "Proxy command is already populated. Don't mix up set API. "
<< HCTR_LOCATION() << std::endl;
exit(1);
}
proxy_cmd_->cmd_[device_id] = HierA2AvBufInitCmd();
HierA2AvBufInitCmd& cmd = boost::get<HierA2AvBufInitCmd>(proxy_cmd_->cmd_[device_id]);
M2PHierA2AvBufInit& buf_init = std::get<0>(cmd);
auto& gpu_ctx = *coll_ctx.ctx_[device_id];
gpu_ctx.d_send_ptrs_ = (void**)malloc(sizeof(void*));
gpu_ctx.d_recv_ptrs_ = (void**)malloc(sizeof(void*));
gpu_ctx.d_send_ptrs_[0] = send_ptrs;
gpu_ctx.d_recv_ptrs_[0] = recv_ptrs;
gpu_ctx.h_max_send_size_ = send_max_size;
HCTR_LIB_THROW(cudaSetDevice(device_list_[device_id]));
// Allocate A2Av send size copy storage
HCTR_LIB_THROW(
cudaMalloc((void**)(&gpu_ctx.d_send_sizes_copy_), sizeof(size_t) * num_gpus_ * num_procs_));
std::vector<size_t> send_sizes(num_gpus_ * num_procs_, send_max_size / (num_gpus_ * num_procs_));
HCTR_LIB_THROW(cudaMemcpy(gpu_ctx.d_send_sizes_copy_, send_sizes.data(),
sizeof(size_t) * num_gpus_ * num_procs_, cudaMemcpyHostToDevice));
buf_init.coll_handle_ = coll;
buf_init.d_send_ptrs_ = send_ptrs;
buf_init.d_recv_ptrs_ = recv_ptrs;
buf_init.h_max_send_size_ = send_max_size;
buf_init.h_max_recv_size_ = recv_max_size;
buf_init.h_recv_cmd_ptr_ = coll_ctx.h_recv_cmd_ptr_;
buf_init.d_ibv_atomic_ = coll_ctx.d_ibv_atomic_[device_id];
buf_init.d_ibv_atomic_recv_ = coll_ctx.d_ibv_atomic_recv_[device_id];
}
void IbComm::register_a2a_coll_buf(HierA2ACollHandle coll) {
// Init command pointers
auto& coll_ctx = *hier_a2a_coll_ctx_[coll];
proxy_cmd_->post_command();
proxy_cmd_->wait_for_completion();
for (size_t g = 0; g < num_gpus_; g++) {
HierA2ABufInitCmd& proxy_cmd = boost::get<HierA2ABufInitCmd>(proxy_cmd_->cmd_[g]);
auto& buf_init_out = std::get<1>(proxy_cmd);
coll_ctx.ctx_[g]->h_send_sizes_ = buf_init_out.h_send_size_;
coll_ctx.ctx_[g]->h_recv_sizes_ = buf_init_out.h_recv_size_;
}
proxy_cmd_->reset();
}
void IbComm::register_a2a_coll_buf(HierA2AvCollHandle coll) {
// Init command pointers
auto& coll_ctx = *hier_a2a_v_coll_ctx_[coll];
proxy_cmd_->post_command();
proxy_cmd_->wait_for_completion();
for (size_t g = 0; g < num_gpus_; g++) {
HierA2AvBufInitCmd& proxy_cmd = boost::get<HierA2AvBufInitCmd>(proxy_cmd_->cmd_[g]);
auto& buf_init_out = std::get<1>(proxy_cmd);
coll_ctx.ctx_[g]->h_send_sizes_ = buf_init_out.h_send_size_;
coll_ctx.ctx_[g]->h_recv_sizes_ = buf_init_out.h_recv_size_;
}
proxy_cmd_->reset();
}
static __global__ void update_sizes(size_t* __restrict__ h_send_sizes,
size_t* __restrict__ h_recv_sizes,
size_t* __restrict__ d_send_sizes_copy,
const size_t* __restrict__ d_send_sizes,
const size_t* __restrict__ d_recv_sizes, size_t size) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) {
size_t send_size = d_send_sizes[i];
h_send_sizes[i] = send_size;
d_send_sizes_copy[i] = send_size;
h_recv_sizes[i] = d_recv_sizes[i];
}
}
void IbComm::update_a2a_coll_sizes(HierA2AvCollHandle coll, const size_t* d_send_sizes,
const size_t* d_recv_sizes, cudaStream_t dep_stream,
size_t device_id) {
auto& ctx = *hier_a2a_v_coll_ctx_[coll];
auto& gpu_ctx = *ctx.ctx_[device_id];
HCTR_LIB_THROW(cudaEventRecord(gpu_ctx.event_, dep_stream));
HCTR_LIB_THROW(cudaStreamWaitEvent(gpu_ctx.stream_, gpu_ctx.event_));
constexpr size_t MAX_TPB = 256;
size_t n_blocks = ceildiv<size_t>(num_procs_ * num_gpus_, MAX_TPB);
update_sizes<<<n_blocks, MAX_TPB, 0, gpu_ctx.stream_>>>(
gpu_ctx.h_send_sizes_, gpu_ctx.h_recv_sizes_, gpu_ctx.d_send_sizes_copy_, d_send_sizes,
d_recv_sizes, num_procs_ * num_gpus_);
}
// Local first distribution TODO: node first might be efficient
static __global__ void update_pre_intra_sizes(size_t* __restrict__ h_send_sizes,
size_t* __restrict__ d_send_sizes,
size_t** __restrict__ d_pre_intra_send_sizes,
size_t my_gpu_id, size_t num_gpus, size_t num_procs) {
// Thread blocks = num procs
// Threads = num gpus
int gpu_id = threadIdx.x;
int proc_id = blockIdx.x;
size_t send_size = d_pre_intra_send_sizes[gpu_id][proc_id * num_gpus + my_gpu_id];
size_t send_indx = proc_id * num_gpus + gpu_id;
h_send_sizes[send_indx] = send_size;
d_send_sizes[send_indx] = send_size;
// TODO: uncomment below for cuda graph support
// __threadfence_system();
}
void IbComm::pre_intra_update_a2a_coll_sizes(HierA2AvCollHandle coll,
size_t** d_pre_intra_send_sizes,
cudaStream_t dep_stream, size_t device_id) {
auto& ctx = *hier_a2a_v_coll_ctx_[coll];
auto& gpu_ctx = *ctx.ctx_[device_id];
HCTR_LIB_THROW(cudaEventRecord(gpu_ctx.event_, dep_stream));
HCTR_LIB_THROW(cudaStreamWaitEvent(gpu_ctx.stream_, gpu_ctx.event_));
ctx.barrier_->sync_all_gpus(gpu_ctx.stream_, device_id);
update_pre_intra_sizes<<<num_procs_, num_gpus_, 0, gpu_ctx.stream_>>>(
gpu_ctx.h_send_sizes_, gpu_ctx.d_send_sizes_copy_, d_pre_intra_send_sizes, device_id,
num_gpus_, num_procs_);
}
void IbComm::set_ready_to_transfer() {
PROXY_ASSERT_MSG(!is_ready_to_transfer_, "Ready to transfer is already set")
for (size_t g = 0; g < num_gpus_; g++) {
proxy_cmd_->cmd_[g] = ProxyStateTransitionCmd();
ProxyStateTransitionCmd& cmd_t = boost::get<ProxyStateTransitionCmd>(proxy_cmd_->cmd_[g]);
M2PStateTransition& cmd = std::get<0>(cmd_t);
cmd.state_ = IbvProxyState::READY_TO_TRANSFER;
}
proxy_cmd_->post_command();
proxy_cmd_->wait_for_completion();
proxy_cmd_->reset();
is_ready_to_transfer_ = true;
}
template <typename T>
static __global__ void copy_local(const T* __restrict__ input_, T* __restrict__ output_,
size_t size) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) {
output_[i] = input_[i];
}
}
template <typename T>
static __global__ void copy_local_segmented(const T* __restrict__ input_, T* __restrict__ output_,
const size_t* __restrict__ sizes, int num_segments,
size_t offset) {
for (int s = 0; s < num_segments; s++) {
int segment_offset = s * offset;
size_t num_elems = sizes[s] / sizeof(T);
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < num_elems;
i += blockDim.x * gridDim.x) {
output_[segment_offset + i] = input_[segment_offset + i];
}
}
}
static __global__ void wait_completion(size_t* d_ibv_cmd, size_t* atomic, int nDest, int myDest,
int device_id) {
if ((threadIdx.x < nDest) && (threadIdx.x != myDest)) {
size_t curr_count = *(volatile size_t*)d_ibv_cmd;
// clock_t s=clock64();
while (*((volatile size_t*)&atomic[threadIdx.x]) < (curr_count - 1)) {
// if (clock64()-s > 2000000000) {
// HCTR_LOG(INFO, WORLD, "wait completion expected: %llu %llu, got %llu from_dest %d my_dest
// %d %d n_dest %d\n",
// curr_count, (curr_count - 1), atomic[threadIdx.x], threadIdx.x, myDest, device_id,
// nDest);
// s = clock64();
// }
}
}
__syncthreads();
}
template <typename T>
void IbComm::post_send_command_a2a<T>(HierA2ACollHandle coll, cudaStream_t dep_stream,
size_t device_id) {
auto& ctx = *hier_a2a_coll_ctx_[coll];
auto& gpu_ctx = *ctx.ctx_[device_id];
HCTR_LIB_THROW(cudaEventRecord(gpu_ctx.event_, dep_stream));
HCTR_LIB_THROW(cudaStreamWaitEvent(gpu_ctx.stream_, gpu_ctx.event_));
ctx.barrier_->sync_all_gpus_report_host_and_inc(ctx.d_send_cmd_[device_id], ctx.h_recv_cmd_ptr_,
gpu_ctx.stream_, device_id);
size_t num_elems = gpu_ctx.h_send_sizes_[my_proc_] / sizeof(T);
// TODO: This is not capturable as we using sizes from host
copy_local<T><<<96, 1024, 0, gpu_ctx.stream_>>>((T*)gpu_ctx.d_send_ptrs_[my_proc_],
(T*)gpu_ctx.d_recv_ptrs_[my_proc_], num_elems);
wait_completion<<<1, 32, 0, gpu_ctx.stream_>>>(
ctx.d_send_cmd_[device_id], ctx.d_ibv_atomic_[device_id], num_procs_, my_proc_, device_id);
}
template <typename T>
void IbComm::post_send_command_a2a<T>(HierA2AvCollHandle coll, cudaStream_t dep_stream,
size_t device_id) {
auto& ctx = *hier_a2a_v_coll_ctx_[coll];
auto& gpu_ctx = *ctx.ctx_[device_id];
HCTR_LIB_THROW(cudaEventRecord(gpu_ctx.event_, dep_stream));
HCTR_LIB_THROW(cudaStreamWaitEvent(gpu_ctx.stream_, gpu_ctx.event_));
ctx.barrier_->sync_all_gpus_report_host_and_inc(ctx.d_send_cmd_[device_id], ctx.h_recv_cmd_ptr_,
gpu_ctx.stream_, device_id);
// TODO: Change it to use max SMs
size_t* copy_sizes = &gpu_ctx.d_send_sizes_copy_[my_proc_ * num_gpus_];
size_t offset = gpu_ctx.h_max_send_size_ / (num_procs_ * num_gpus_) / sizeof(T);
// TODO: This is not good, we are reading the sizes from host, create a device copy!
copy_local_segmented<T><<<96, 1024, 0, gpu_ctx.stream_>>>(
(T*)gpu_ctx.d_send_ptrs_[0] + (my_proc_ * num_gpus_ * offset),
(T*)gpu_ctx.d_recv_ptrs_[0] + (my_proc_ * num_gpus_ * offset), copy_sizes, num_gpus_, offset);
wait_completion<<<1, 32, 0, gpu_ctx.stream_>>>(
ctx.d_send_cmd_[device_id], ctx.d_ibv_atomic_[device_id], num_procs_, my_proc_, device_id);
}
template <typename T>
void IbComm::post_a2a_send_command<T>(HierA2AvCollHandle coll, cudaStream_t dep_stream,
size_t device_id) {
auto& ctx = *hier_a2a_v_coll_ctx_[coll];
auto& gpu_ctx = *ctx.ctx_[device_id];
HCTR_LIB_THROW(cudaEventRecord(gpu_ctx.event_, dep_stream));
HCTR_LIB_THROW(cudaStreamWaitEvent(gpu_ctx.stream_, gpu_ctx.event_));
ctx.barrier_->sync_all_gpus_report_host_and_inc(ctx.d_send_cmd_[device_id], ctx.h_recv_cmd_ptr_,
gpu_ctx.stream_, device_id);
// TODO: Change it to use max SMs
size_t* copy_sizes = &gpu_ctx.d_send_sizes_copy_[my_proc_ * num_gpus_];
size_t offset = gpu_ctx.h_max_send_size_ / (num_procs_ * num_gpus_) / sizeof(T);
// TODO: This is not good, we are reading the sizes from host, create a device copy!
copy_local_segmented<T><<<96, 1024, 0, gpu_ctx.stream_>>>(
(T*)gpu_ctx.d_send_ptrs_[0] + (my_proc_ * num_gpus_ * offset),
(T*)gpu_ctx.d_recv_ptrs_[0] + (my_proc_ * num_gpus_ * offset), copy_sizes, num_gpus_, offset);
}
void IbComm::blocking_wait(HierA2AvCollHandle coll, cudaStream_t dep_stream, size_t device_id) {
auto& ctx = *hier_a2a_v_coll_ctx_[coll];
auto& gpu_ctx = *ctx.ctx_[device_id];
HCTR_LIB_THROW(cudaEventRecord(gpu_ctx.event_, dep_stream));
HCTR_LIB_THROW(cudaStreamWaitEvent(gpu_ctx.stream_, gpu_ctx.event_));
wait_completion<<<1, 32, 0, gpu_ctx.stream_>>>(
ctx.d_send_cmd_[device_id], ctx.d_ibv_atomic_[device_id], num_procs_, my_proc_, device_id);
}
static __global__ void wait_recv(size_t* d_ibv_cmd, size_t* atomic, int nDest, int myDest) {
if ((threadIdx.x < nDest) && (threadIdx.x != myDest)) {
size_t curr_count = *d_ibv_cmd;
while (*((volatile size_t*)&atomic[threadIdx.x]) < (curr_count - 2)) {
}
}
__syncthreads();
}
void IbComm::wait_global_recv_async(HierA2ACollHandle coll, size_t device_id) {
auto& ctx = *hier_a2a_coll_ctx_[coll];
auto& gpu_ctx = *ctx.ctx_[device_id];
wait_recv<<<1, 32, 0, gpu_ctx.stream_>>>(ctx.d_send_cmd_[device_id],
ctx.d_ibv_atomic_recv_[device_id], num_procs_, my_proc_);
}
void IbComm::wait_global_recv_async(HierA2AvCollHandle coll, size_t device_id) {
auto& ctx = *hier_a2a_v_coll_ctx_[coll];
auto& gpu_ctx = *ctx.ctx_[device_id];
wait_recv<<<1, 32, 0, gpu_ctx.stream_>>>(ctx.d_send_cmd_[device_id],
ctx.d_ibv_atomic_recv_[device_id], num_procs_, my_proc_);
}
template void IbComm::post_send_command_a2a<__half>(HierA2ACollHandle coll, cudaStream_t dep_stream,
size_t device_id);
template void IbComm::post_send_command_a2a<float>(HierA2ACollHandle coll, cudaStream_t dep_stream,
size_t device_id);
template void IbComm::post_send_command_a2a<uint32_t>(HierA2ACollHandle coll,
cudaStream_t dep_stream, size_t device_id);
template void IbComm::post_send_command_a2a<uint16_t>(HierA2ACollHandle coll,
cudaStream_t dep_stream, size_t device_id);
template void IbComm::post_send_command_a2a<__half>(HierA2AvCollHandle coll,
cudaStream_t dep_stream, size_t device_id);
template void IbComm::post_send_command_a2a<float>(HierA2AvCollHandle coll, cudaStream_t dep_stream,
size_t device_id);
template void IbComm::post_send_command_a2a<uint32_t>(HierA2AvCollHandle coll,
cudaStream_t dep_stream, size_t device_id);
template void IbComm::post_send_command_a2a<uint16_t>(HierA2AvCollHandle coll,
cudaStream_t dep_stream, size_t device_id);
template void IbComm::post_a2a_send_command<__half>(HierA2AvCollHandle coll,
cudaStream_t dep_stream, size_t device_id);
template void IbComm::post_a2a_send_command<float>(HierA2AvCollHandle coll, cudaStream_t dep_stream,
size_t device_id);
template void IbComm::post_a2a_send_command<uint32_t>(HierA2AvCollHandle coll,
cudaStream_t dep_stream, size_t device_id);
template void IbComm::post_a2a_send_command<uint16_t>(HierA2AvCollHandle coll,
cudaStream_t dep_stream, size_t device_id);
void IbComm::finalize() {
if (!is_initialized_) {
return;
}
if (!is_ready_to_transfer_) {
for (size_t g = 0; g < num_gpus_; g++) {
proxy_cmd_->cmd_[g] = ProxyStateTransitionCmd();
ProxyStateTransitionCmd& cmd_t = boost::get<ProxyStateTransitionCmd>(proxy_cmd_->cmd_[g]);
M2PStateTransition& cmd = std::get<0>(cmd_t);
cmd.state_ = IbvProxyState::DESTROY;
}
proxy_cmd_->post_command();
proxy_cmd_->wait_for_completion();
proxy_cmd_->reset();
}
proxy_cmd_->set_destroy();
for (size_t g = 0; g < num_gpus_; g++) {
int ret = pthread_join(proxy_thread_[g], NULL);
PROXY_ASSERT(ret == 0);
}
is_finalized_ = true;
}
IbComm::~IbComm() {
if (!is_finalized_) {
finalize();
}
}
} // namespace HugeCTR
#endif
|
the_stack
|
#include "../util_ptx.cuh"
#include "../util_type.cuh"
#include "../util_math.cuh"
#include "../util_namespace.cuh"
CUB_NAMESPACE_BEGIN
// Implementation of the MergePath algorithm, as described in:
// Odeh et al, "Merge Path - Parallel Merging Made Simple"
// doi:10.1109/IPDPSW.2012.202
template <typename KeyT,
typename KeyIteratorT,
typename OffsetT,
typename BinaryPred>
__device__ __forceinline__ OffsetT MergePath(KeyIteratorT keys1,
KeyIteratorT keys2,
OffsetT keys1_count,
OffsetT keys2_count,
OffsetT diag,
BinaryPred binary_pred)
{
OffsetT keys1_begin = diag < keys2_count ? 0 : diag - keys2_count;
OffsetT keys1_end = (cub::min)(diag, keys1_count);
while (keys1_begin < keys1_end)
{
OffsetT mid = cub::MidPoint<OffsetT>(keys1_begin, keys1_end);
KeyT key1 = keys1[mid];
KeyT key2 = keys2[diag - 1 - mid];
bool pred = binary_pred(key2, key1);
if (pred)
{
keys1_end = mid;
}
else
{
keys1_begin = mid + 1;
}
}
return keys1_begin;
}
template <typename KeyT, typename CompareOp, int ITEMS_PER_THREAD>
__device__ __forceinline__ void SerialMerge(KeyT *keys_shared,
int keys1_beg,
int keys2_beg,
int keys1_count,
int keys2_count,
KeyT (&output)[ITEMS_PER_THREAD],
int (&indices)[ITEMS_PER_THREAD],
CompareOp compare_op)
{
int keys1_end = keys1_beg + keys1_count;
int keys2_end = keys2_beg + keys2_count;
KeyT key1 = keys_shared[keys1_beg];
KeyT key2 = keys_shared[keys2_beg];
#pragma unroll
for (int item = 0; item < ITEMS_PER_THREAD; ++item)
{
bool p = (keys2_beg < keys2_end) &&
((keys1_beg >= keys1_end)
|| compare_op(key2, key1));
output[item] = p ? key2 : key1;
indices[item] = p ? keys2_beg++ : keys1_beg++;
if (p)
{
key2 = keys_shared[keys2_beg];
}
else
{
key1 = keys_shared[keys1_beg];
}
}
}
/**
* \brief The BlockMergeSort class provides methods for sorting items partitioned across a CUDA thread block using a merge sorting method.
* \ingroup BlockModule
*
* \tparam KeyT KeyT type
* \tparam BLOCK_DIM_X The thread block length in threads along the X dimension
* \tparam ITEMS_PER_THREAD The number of items per thread
* \tparam ValueT <b>[optional]</b> ValueT type (default: cub::NullType, which indicates a keys-only sort)
* \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1)
* \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1)
*
* \par Overview
* BlockMergeSort arranges items into ascending order using a comparison
* functor with less-than semantics. Merge sort can handle arbitrary types
* and comparison functors, but is slower than BlockRadixSort when sorting
* arithmetic types into ascending/descending order.
*
* \par A Simple Example
* \blockcollective{BlockMergeSort}
* \par
* The code snippet below illustrates a sort of 512 integer keys that are
* partitioned across 128 threads * where each thread owns 4 consecutive items.
* \par
* \code
* #include <cub/cub.cuh> // or equivalently <cub/block/block_merge_sort.cuh>
*
* struct CustomLess
* {
* template <typename DataType>
* __device__ bool operator()(const DataType &lhs, const DataType &rhs)
* {
* return lhs < rhs;
* }
* };
*
* __global__ void ExampleKernel(...)
* {
* // Specialize BlockMergeSort for a 1D block of 128 threads owning 4 integer items each
* typedef cub::BlockMergeSort<int, 128, 4> BlockMergeSort;
*
* // Allocate shared memory for BlockMergeSort
* __shared__ typename BlockMergeSort::TempStorage temp_storage_shuffle;
*
* // Obtain a segment of consecutive items that are blocked across threads
* int thread_keys[4];
* ...
*
* BlockMergeSort(temp_storage_shuffle).Sort(thread_data, CustomLess());
* ...
* }
* \endcode
* \par
* Suppose the set of input \p thread_keys across the block of threads is
* <tt>{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }</tt>.
* The corresponding output \p thread_keys in those threads will be
* <tt>{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }</tt>.
*
* \par Re-using dynamically allocating shared memory
* The following example under the examples/block folder illustrates usage of
* dynamically shared memory with BlockReduce and how to re-purpose
* the same memory region:
* <a href="../../examples/block/example_block_reduce_dyn_smem.cu">example_block_reduce_dyn_smem.cu</a>
*
* This example can be easily adapted to the storage required by BlockMergeSort.
*/
template <
typename KeyT,
int BLOCK_DIM_X,
int ITEMS_PER_THREAD,
typename ValueT = NullType,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1>
class BlockMergeSort
{
private:
// The thread block size in threads
static constexpr int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
static constexpr int ITEMS_PER_TILE = ITEMS_PER_THREAD * BLOCK_THREADS;
// Whether or not there are values to be trucked along with keys
static constexpr bool KEYS_ONLY = Equals<ValueT, NullType>::VALUE;
/// Shared memory type required by this thread block
union _TempStorage
{
KeyT keys_shared[ITEMS_PER_TILE + 1];
ValueT items_shared[ITEMS_PER_TILE + 1];
}; // union TempStorage
/// Internal storage allocator
__device__ __forceinline__ _TempStorage& PrivateStorage()
{
__shared__ _TempStorage private_storage;
return private_storage;
}
/// Shared storage reference
_TempStorage &temp_storage;
/// Linear thread-id
unsigned int linear_tid;
public:
/// \smemstorage{BlockMergeSort}
struct TempStorage : Uninitialized<_TempStorage> {};
__device__ __forceinline__ BlockMergeSort()
: temp_storage(PrivateStorage())
, linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
__device__ __forceinline__ BlockMergeSort(TempStorage &temp_storage)
: temp_storage(temp_storage.Alias())
, linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z))
{}
private:
template <typename T>
__device__ __forceinline__ void Swap(T &lhs, T &rhs)
{
T temp = lhs;
lhs = rhs;
rhs = temp;
}
template <typename CompareOp>
__device__ __forceinline__ void
StableOddEvenSort(KeyT (&keys)[ITEMS_PER_THREAD],
ValueT (&items)[ITEMS_PER_THREAD],
CompareOp compare_op)
{
#pragma unroll
for (int i = 0; i < ITEMS_PER_THREAD; ++i)
{
#pragma unroll
for (int j = 1 & i; j < ITEMS_PER_THREAD - 1; j += 2)
{
if (compare_op(keys[j + 1], keys[j]))
{
Swap(keys[j], keys[j + 1]);
if (!KEYS_ONLY)
{
Swap(items[j], items[j + 1]);
}
}
} // inner loop
} // outer loop
}
public:
/**
* \brief Sorts items partitioned across a CUDA thread block using a merge sorting method.
*
* \par
* - Sort is not guaranteed to be stable. That is, suppose that i and j are
* equivalent: neither one is less than the other. It is not guaranteed
* that the relative order of these two elements will be preserved by sort.
*
* \tparam CompareOp functor type having member <tt>bool operator()(KeyT lhs, KeyT rhs)</tt>
* CompareOp is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
*/
template <typename CompareOp>
__device__ __forceinline__ void
Sort(KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort
CompareOp compare_op) ///< [in] Comparison function object which returns
///< true if the first argument is ordered before
///< the second
{
ValueT items[ITEMS_PER_THREAD];
Sort<CompareOp, false>(keys, items, compare_op, ITEMS_PER_TILE, keys[0]);
}
/**
* \brief Sorts items partitioned across a CUDA thread block using a merge sorting method.
*
* \par
* - Sort is not guaranteed to be stable. That is, suppose that i and j are
* equivalent: neither one is less than the other. It is not guaranteed
* that the relative order of these two elements will be preserved by sort.
* - The value of \p oob_default is assigned to all elements that are out of
* \p valid_items boundaries. It's expected that \p oob_default is ordered
* after any value in the \p valid_items boundaries. The algorithm always
* sorts a fixed amount of elements, which is equal to ITEMS_PER_THREAD * BLOCK_THREADS.
* If there is a value that is ordered after \p oob_default, it won't be
* placed within \p valid_items boundaries.
*
* \tparam CompareOp functor type having member <tt>bool operator()(KeyT lhs, KeyT rhs)</tt>
* CompareOp is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
*/
template <typename CompareOp>
__device__ __forceinline__ void
Sort(KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort
CompareOp compare_op, ///< [in] Comparison function object which returns true if the first argument is ordered before the second
int valid_items, ///< [in] Number of valid items to sort
KeyT oob_default) ///< [in] Default value to assign out-of-bound items
{
ValueT items[ITEMS_PER_THREAD];
Sort<CompareOp, true>(keys, items, compare_op, valid_items, oob_default);
}
/**
* \brief Sorts items partitioned across a CUDA thread block using a merge sorting method.
*
* \par
* - Sort is not guaranteed to be stable. That is, suppose that i and j are
* equivalent: neither one is less than the other. It is not guaranteed
* that the relative order of these two elements will be preserved by sort.
*
* \tparam CompareOp functor type having member <tt>bool operator()(KeyT lhs, KeyT rhs)</tt>
* CompareOp is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
*/
template <typename CompareOp>
__device__ __forceinline__ void
Sort(KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort
ValueT (&items)[ITEMS_PER_THREAD], ///< [in-out] Values to sort
CompareOp compare_op) ///< [in] Comparison function object which returns true if the first argument is ordered before the second
{
Sort<CompareOp, false>(keys, items, compare_op, ITEMS_PER_TILE, keys[0]);
}
/**
* \brief Sorts items partitioned across a CUDA thread block using a merge sorting method.
*
* \par
* - Sort is not guaranteed to be stable. That is, suppose that i and j are
* equivalent: neither one is less than the other. It is not guaranteed
* that the relative order of these two elements will be preserved by sort.
* - The value of \p oob_default is assigned to all elements that are out of
* \p valid_items boundaries. It's expected that \p oob_default is ordered
* after any value in the \p valid_items boundaries. The algorithm always
* sorts a fixed amount of elements, which is equal to ITEMS_PER_THREAD * BLOCK_THREADS.
* If there is a value that is ordered after \p oob_default, it won't be
* placed within \p valid_items boundaries.
*
* \tparam CompareOp functor type having member <tt>bool operator()(KeyT lhs, KeyT rhs)</tt>
* CompareOp is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
* \tparam IS_LAST_TILE True if valid_items isn't equal to the ITEMS_PER_TILE
*/
template <typename CompareOp,
bool IS_LAST_TILE = true>
__device__ __forceinline__ void
Sort(KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort
ValueT (&items)[ITEMS_PER_THREAD], ///< [in-out] Values to sort
CompareOp compare_op, ///< [in] Comparison function object which returns true if the first argument is ordered before the second
int valid_items, ///< [in] Number of valid items to sort
KeyT oob_default) ///< [in] Default value to assign out-of-bound items
{
if (IS_LAST_TILE)
{
// if last tile, find valid max_key
// and fill the remaining keys with it
//
KeyT max_key = oob_default;
#pragma unroll
for (int item = 1; item < ITEMS_PER_THREAD; ++item)
{
if (ITEMS_PER_THREAD * linear_tid + item < valid_items)
{
max_key = compare_op(max_key, keys[item]) ? keys[item] : max_key;
}
else
{
keys[item] = max_key;
}
}
}
// if first element of thread is in input range, stable sort items
//
if (!IS_LAST_TILE || ITEMS_PER_THREAD * linear_tid < valid_items)
{
StableOddEvenSort(keys, items, compare_op);
}
// each thread has sorted keys
// merge sort keys in shared memory
//
#pragma unroll
for (int target_merged_threads_number = 2;
target_merged_threads_number <= BLOCK_THREADS;
target_merged_threads_number *= 2)
{
int merged_threads_number = target_merged_threads_number / 2;
int mask = target_merged_threads_number - 1;
CTA_SYNC();
// store keys in shmem
//
#pragma unroll
for (int item = 0; item < ITEMS_PER_THREAD; ++item)
{
int idx = ITEMS_PER_THREAD * linear_tid + item;
temp_storage.keys_shared[idx] = keys[item];
}
CTA_SYNC();
int indices[ITEMS_PER_THREAD];
int first_thread_idx_in_thread_group_being_merged = ~mask & linear_tid;
int start = ITEMS_PER_THREAD * first_thread_idx_in_thread_group_being_merged;
int size = ITEMS_PER_THREAD * merged_threads_number;
int thread_idx_in_thread_group_being_merged = mask & linear_tid;
int diag =
(cub::min)(valid_items,
ITEMS_PER_THREAD * thread_idx_in_thread_group_being_merged);
int keys1_beg = (cub::min)(valid_items, start);
int keys1_end = (cub::min)(valid_items, keys1_beg + size);
int keys2_beg = keys1_end;
int keys2_end = (cub::min)(valid_items, keys2_beg + size);
int keys1_count = keys1_end - keys1_beg;
int keys2_count = keys2_end - keys2_beg;
int partition_diag = MergePath<KeyT>(&temp_storage.keys_shared[keys1_beg],
&temp_storage.keys_shared[keys2_beg],
keys1_count,
keys2_count,
diag,
compare_op);
int keys1_beg_loc = keys1_beg + partition_diag;
int keys1_end_loc = keys1_end;
int keys2_beg_loc = keys2_beg + diag - partition_diag;
int keys2_end_loc = keys2_end;
int keys1_count_loc = keys1_end_loc - keys1_beg_loc;
int keys2_count_loc = keys2_end_loc - keys2_beg_loc;
SerialMerge(&temp_storage.keys_shared[0],
keys1_beg_loc,
keys2_beg_loc,
keys1_count_loc,
keys2_count_loc,
keys,
indices,
compare_op);
if (!KEYS_ONLY)
{
CTA_SYNC();
// store keys in shmem
//
#pragma unroll
for (int item = 0; item < ITEMS_PER_THREAD; ++item)
{
int idx = ITEMS_PER_THREAD * linear_tid + item;
temp_storage.items_shared[idx] = items[item];
}
CTA_SYNC();
// gather items from shmem
//
#pragma unroll
for (int item = 0; item < ITEMS_PER_THREAD; ++item)
{
items[item] = temp_storage.items_shared[indices[item]];
}
}
}
} // func block_merge_sort
/**
* \brief Sorts items partitioned across a CUDA thread block using a merge sorting method.
*
* \par
* - StableSort is stable: it preserves the relative ordering of equivalent
* elements. That is, if x and y are elements such that x precedes y,
* and if the two elements are equivalent (neither x < y nor y < x) then
* a postcondition of stable_sort is that x still precedes y.
*
* \tparam CompareOp functor type having member <tt>bool operator()(KeyT lhs, KeyT rhs)</tt>
* CompareOp is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
*/
template <typename CompareOp>
__device__ __forceinline__ void
StableSort(KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort
CompareOp compare_op) ///< [in] Comparison function object which returns true if the first argument is ordered before the second
{
Sort(keys, compare_op);
}
/**
* \brief Sorts items partitioned across a CUDA thread block using a merge sorting method.
*
* \par
* - StableSort is stable: it preserves the relative ordering of equivalent
* elements. That is, if x and y are elements such that x precedes y,
* and if the two elements are equivalent (neither x < y nor y < x) then
* a postcondition of stable_sort is that x still precedes y.
*
* \tparam CompareOp functor type having member <tt>bool operator()(KeyT lhs, KeyT rhs)</tt>
* CompareOp is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
*/
template <typename CompareOp>
__device__ __forceinline__ void
StableSort(KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort
ValueT (&items)[ITEMS_PER_THREAD], ///< [in-out] Values to sort
CompareOp compare_op) ///< [in] Comparison function object which returns true if the first argument is ordered before the second
{
Sort(keys, items, compare_op);
}
/**
* \brief Sorts items partitioned across a CUDA thread block using a merge sorting method.
*
* \par
* - StableSort is stable: it preserves the relative ordering of equivalent
* elements. That is, if x and y are elements such that x precedes y,
* and if the two elements are equivalent (neither x < y nor y < x) then
* a postcondition of stable_sort is that x still precedes y.
* - The value of \p oob_default is assigned to all elements that are out of
* \p valid_items boundaries. It's expected that \p oob_default is ordered
* after any value in the \p valid_items boundaries. The algorithm always
* sorts a fixed amount of elements, which is equal to ITEMS_PER_THREAD * BLOCK_THREADS.
* If there is a value that is ordered after \p oob_default, it won't be
* placed within \p valid_items boundaries.
*
* \tparam CompareOp functor type having member <tt>bool operator()(KeyT lhs, KeyT rhs)</tt>
* CompareOp is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
*/
template <typename CompareOp>
__device__ __forceinline__ void
StableSort(KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort
CompareOp compare_op, ///< [in] Comparison function object which returns true if the first argument is ordered before the second
int valid_items, ///< [in] Number of valid items to sort
KeyT oob_default) ///< [in] Default value to assign out-of-bound items
{
Sort(keys, compare_op, valid_items, oob_default);
}
/**
* \brief Sorts items partitioned across a CUDA thread block using a merge sorting method.
*
* \par
* - StableSort is stable: it preserves the relative ordering of equivalent
* elements. That is, if x and y are elements such that x precedes y,
* and if the two elements are equivalent (neither x < y nor y < x) then
* a postcondition of stable_sort is that x still precedes y.
* - The value of \p oob_default is assigned to all elements that are out of
* \p valid_items boundaries. It's expected that \p oob_default is ordered
* after any value in the \p valid_items boundaries. The algorithm always
* sorts a fixed amount of elements, which is equal to ITEMS_PER_THREAD * BLOCK_THREADS.
* If there is a value that is ordered after \p oob_default, it won't be
* placed within \p valid_items boundaries.
*
* \tparam CompareOp functor type having member <tt>bool operator()(KeyT lhs, KeyT rhs)</tt>
* CompareOp is a model of <a href="https://en.cppreference.com/w/cpp/concepts/strict_weak_order">Strict Weak Ordering</a>.
* \tparam IS_LAST_TILE True if valid_items isn't equal to the ITEMS_PER_TILE
*/
template <typename CompareOp,
bool IS_LAST_TILE = true>
__device__ __forceinline__ void
StableSort(KeyT (&keys)[ITEMS_PER_THREAD], ///< [in-out] Keys to sort
ValueT (&items)[ITEMS_PER_THREAD], ///< [in-out] Values to sort
CompareOp compare_op, ///< [in] Comparison function object which returns true if the first argument is ordered before the second
int valid_items, ///< [in] Number of valid items to sort
KeyT oob_default) ///< [in] Default value to assign out-of-bound items
{
Sort<CompareOp, IS_LAST_TILE>(keys,
items,
compare_op,
valid_items,
oob_default);
}
};
CUB_NAMESPACE_END
|
the_stack
|
template<typename T, int BS> __global__
void phase_factor_kernel (T *kPoints, int *makeTwoCopies,
T *pos, T **phi_in, T **phi_out,
int num_splines, int num_walkers)
{
__shared__ T in_shared[2*BS+1], kPoints_s[BS][3],
pos_s[BS][3];
volatile __shared__ T out_shared[2*BS+1];
__shared__ T *phi_in_ptr[BS], *phi_out_ptr[BS];
int tid = threadIdx.x;
assert(warpSize == 32);
#pragma unroll
for (int i=0; i<3; i++)
{
int off = (3*blockIdx.x+i)*BS + tid;
if (off < 3*num_walkers)
pos_s[0][i*BS + tid] = pos[off];
}
if (blockIdx.x*BS+tid < num_walkers)
{
phi_in_ptr[tid] = phi_in[blockIdx.x*BS+tid];
phi_out_ptr[tid] = phi_out[blockIdx.x*BS+tid];
}
//__syncthreads();
int nb = (num_splines + BS-1)/BS;
int outIndex=0;
int outBlock=0;
int m2c;
volatile __shared__ int m2c_ps[BS];
int numWrite = min(BS, num_walkers-blockIdx.x*BS);
for (int block=0; block<nb; block++)
{
// Load kpoints into shared memory
for (int i=0; i<3; i++)
{
int off = (3*block+i)*BS + tid;
if (off < 3*num_splines)
kPoints_s[0][i*BS+tid] = kPoints[off];
}
// Load makeTwoCopies with coallesced reads
if (block*BS+tid < num_splines)
{
if(makeTwoCopies[block*BS + tid])
m2c = 1;
else
m2c = 0;
}
else
m2c = 0;
//prefix sum of m2c array
m2c_ps[tid] = m2c+1;
if(tid >= 1)
m2c_ps[tid] += m2c_ps[tid-1];
if(tid >= 2)
m2c_ps[tid] += m2c_ps[tid-2];
if(tid >= 4)
m2c_ps[tid] += m2c_ps[tid-4];
if(tid >= 8)
m2c_ps[tid] += m2c_ps[tid-8];
if(tid >= 16)
m2c_ps[tid] += m2c_ps[tid-16];
if(tid > 0)
outIndex = m2c_ps[tid-1];
T s, c;
int end = min (BS, num_splines-block*BS);
if(tid < end)
for (int i=0; i<numWrite; i++)
{
if ((2*block)*BS+tid < 2*num_splines)
in_shared[tid ] = phi_in_ptr[i][(2*block+0)*BS+tid];
if ((2*block)*BS+tid + end < 2*num_splines)
in_shared[tid+end] = phi_in_ptr[i][(2*block)*BS+tid + end];
// Compute e^{-ikr}
T phase = -(pos_s[i][0]*kPoints_s[tid][0] +
pos_s[i][1]*kPoints_s[tid][1] +
pos_s[i][2]*kPoints_s[tid][2]);
sincosf(phase, &s, &c);
T phi_real = in_shared[2*tid]*c - in_shared[2*tid+1]*s;
T phi_imag = in_shared[2*tid]*s + in_shared[2*tid+1]*c;
out_shared[outIndex] = phi_real;
if(m2c)
{
out_shared[outIndex + 1] = phi_imag;
}
phi_out_ptr[i][outBlock+tid]= out_shared[tid];
if(tid + end < m2c_ps[end-1])
{
phi_out_ptr[i][outBlock + tid + end] = out_shared[tid+end];
}
}
outBlock+= m2c_ps[end-1];
}
}
template<typename T, int BS> __global__
void phase_factor_kernel_new (T *kPoints, int *makeTwoCopies,
T *pos, T **phi_in, T **phi_out,
int num_splines)
{
__shared__ T in_shared[2*BS], out_shared[2*BS], kPoints_s[BS][3],
pos_s[3];
__shared__ int m2c[BS];
__shared__ T *phi_in_ptr, *phi_out_ptr;
int tid = threadIdx.x;
if (tid < 3)
pos_s[tid] = pos[3*blockIdx.x+tid];
if (tid == 0)
{
phi_in_ptr = phi_in[blockIdx.x];
phi_out_ptr = phi_out[blockIdx.x];
}
int NB = (num_splines+BS-1)/BS;
int outIndex=0, outBlock=0;
for (int ib=0; ib<NB; ib++)
{
for (int i=0; i<3; i++)
kPoints_s[0][i*BS+tid] = kPoints[(3*ib+i)*BS+tid];
T phase = -(kPoints_s[tid][0]*pos_s[0] +
kPoints_s[tid][1]*pos_s[1] +
kPoints_s[tid][2]*pos_s[2]);
T s, c;
sincosf (phase, &s, &c);
int off = 2*ib*BS + tid;
in_shared[tid] = phi_in_ptr[off];
in_shared[tid+BS] = phi_in_ptr[off+BS];
T phi_real = in_shared[2*tid]*c - in_shared[2*tid+1]*s;
T phi_imag = in_shared[2*tid]*s + in_shared[2*tid+1]*c;
m2c[tid] = makeTwoCopies[ib*BS + tid];
int iend = min (BS, num_splines - ib*BS);
for (int i=0; i<iend; i++)
{
if (tid == i)
out_shared[outIndex] = phi_real;
outIndex++;
__syncthreads();
if (outIndex == BS)
{
phi_out_ptr[outBlock*BS+tid] = out_shared[tid];
outIndex = 0;
outBlock++;
}
__syncthreads();
if (m2c[i])
{
if (tid == i)
out_shared[outIndex] = phi_imag;
outIndex++;
}
__syncthreads();
if (outIndex == BS)
{
phi_out_ptr[outBlock*BS+tid] = out_shared[tid];
outIndex = 0;
outBlock++;
}
__syncthreads();
}
}
if (tid < outIndex)
phi_out_ptr[outBlock*BS+tid] = out_shared[tid];
}
template<typename T, int BS> __global__
void phase_factor_kernel (T *kPoints, int *makeTwoCopies,
T *pos, T **phi_in, T **phi_out,
T **grad_lapl_in, T **grad_lapl_out,
int num_splines, int num_walkers,
int row_stride)
{
volatile __shared__ T in_shared[5][2*BS+1], out_shared[5][BS+1], kPoints_s[BS][3];
__shared__ T pos_s[3];
__shared__ T *my_phi_in, *my_phi_out, *my_GL_in, *my_GL_out;
int tid = threadIdx.x;
if (tid == 0)
{
my_phi_in = phi_in[blockIdx.x];
my_phi_out = phi_out[blockIdx.x];
my_GL_in = grad_lapl_in[blockIdx.x];
my_GL_out = grad_lapl_out[blockIdx.x];
}
if (tid < 3)
pos_s[tid] = pos[3*blockIdx.x+tid];
//__syncthreads();
int nb = (num_splines + BS-1)/BS;
int outIndex=0;
int outBlock=0;
__shared__ int m2c[BS];
for (int block=0; block<nb; block++)
{
// Load kpoints into shared memory
for (int i=0; i<3; i++)
{
int off = (3*block+i)*BS + tid;
if (off < 3*num_splines)
kPoints_s[0][i*BS+tid] = kPoints[off];
}
// Load phi_in with coallesced reads
if ((2*block+0)*BS+tid < 2*num_splines)
{
in_shared[0][tid+ 0] = my_phi_in[(2*block+0)*BS+tid];
for (int j=0; j<4; j++)
in_shared[j+1][tid+ 0] = my_GL_in[2*j*num_splines+(2*block+0)*BS+tid];
}
if ((2*block+1)*BS+tid < 2*num_splines)
{
in_shared[0][tid+BS] = my_phi_in[(2*block+1)*BS+tid];
for (int j=0; j<4; j++)
in_shared[j+1][tid+BS] = my_GL_in[2*j*num_splines+(2*block+1)*BS+tid];
}
//__syncthreads();
// Now add on phase factors
T phase = -(pos_s[0]*kPoints_s[tid][0] +
pos_s[1]*kPoints_s[tid][1] +
pos_s[2]*kPoints_s[tid][2]);
T s, c;
sincosf (phase, &s, &c);
T u_re, u_im, gradu_re[3], gradu_im[3], laplu_re, laplu_im;
u_re = in_shared[0][2*tid+0];
u_im = in_shared[0][2*tid+1];
gradu_re[0] = in_shared[1][2*tid+0];
gradu_im[0] = in_shared[1][2*tid+1];
gradu_re[1] = in_shared[2][2*tid+0];
gradu_im[1] = in_shared[2][2*tid+1];
gradu_re[2] = in_shared[3][2*tid+0];
gradu_im[2] = in_shared[3][2*tid+1];
laplu_re = in_shared[4][2*tid+0];
laplu_im = in_shared[4][2*tid+1];
in_shared[0][2*tid+0] = u_re*c - u_im*s;
in_shared[0][2*tid+1] = u_re*s + u_im*c;
// Gradient = e^(-ikr)*(-i*u*k + gradu)
for (int dim=0; dim<3; dim++)
{
T gre, gim;
gre = gradu_re[dim] + kPoints_s[tid][dim]*u_im;
gim = gradu_im[dim] - kPoints_s[tid][dim]*u_re;
in_shared[dim+1][2*tid+0] = gre*c - gim*s;
in_shared[dim+1][2*tid+1] = gre*s + gim*c;
}
// Add phase contribution to laplacian
T k2 = (kPoints_s[tid][0]*kPoints_s[tid][0] +
kPoints_s[tid][1]*kPoints_s[tid][1] +
kPoints_s[tid][2]*kPoints_s[tid][2]);
T lre = laplu_re - k2*u_re + 2.0*(kPoints_s[tid][0]*gradu_im[0]+
kPoints_s[tid][1]*gradu_im[1]+
kPoints_s[tid][2]*gradu_im[2]);
T lim = laplu_im - k2*u_im - 2.0*(kPoints_s[tid][0]*gradu_re[0]+
kPoints_s[tid][1]*gradu_re[1]+
kPoints_s[tid][2]*gradu_re[2]);
in_shared[4][2*tid+0] = lre*c - lim*s;
in_shared[4][2*tid+1] = lre*s + lim*c;
// Load makeTwoCopies with coallesced reads
if (block*BS+tid < num_splines)
m2c[tid] = makeTwoCopies[block*BS + tid];
//__syncthreads();
// Now, serialize to output buffer
int end = min (BS, num_splines - block*BS);
for (int i=0; i<end; i++)
{
if (tid < 5)
out_shared[tid][outIndex] = in_shared[tid][2*i+0];
outIndex++;
//__syncthreads();
if (outIndex == BS)
{
// Write back to global memory
my_phi_out[ outBlock*BS+tid] = out_shared[0][tid];
my_GL_out[0*row_stride +outBlock*BS+tid] = out_shared[1][tid];
my_GL_out[1*row_stride +outBlock*BS+tid] = out_shared[2][tid];
my_GL_out[2*row_stride +outBlock*BS+tid] = out_shared[3][tid];
my_GL_out[3*row_stride +outBlock*BS+tid] = out_shared[4][tid];
outIndex = 0;
outBlock++;
}
if (m2c[i])
{
if (tid < 5)
out_shared[tid][outIndex] = in_shared[tid][2*i+1];
outIndex++;
//__syncthreads();
if (outIndex == BS)
{
// Write back to global memory
my_phi_out[ outBlock*BS+tid] = out_shared[0][tid];
my_GL_out[0*row_stride +outBlock*BS+tid] = out_shared[1][tid];
my_GL_out[1*row_stride +outBlock*BS+tid] = out_shared[2][tid];
my_GL_out[2*row_stride +outBlock*BS+tid] = out_shared[3][tid];
my_GL_out[3*row_stride +outBlock*BS+tid] = out_shared[4][tid];
outIndex = 0;
outBlock++;
//__syncthreads();
}
}
}
//__syncthreads();
}
if (tid < outIndex)
{
my_phi_out[ outBlock*BS+tid] = out_shared[0][tid];
my_GL_out[0*row_stride +outBlock*BS+tid] = out_shared[1][tid];
my_GL_out[1*row_stride +outBlock*BS+tid] = out_shared[2][tid];
my_GL_out[2*row_stride +outBlock*BS+tid] = out_shared[3][tid];
my_GL_out[3*row_stride +outBlock*BS+tid] = out_shared[4][tid];
}
}
// T s, c;
// int end = min (BS, num_splines-block*BS);
// for (int i=0; i<end; i++) {
// // Compute e^{-ikr}
// T phase = -(pos_s[tid][0]*kPoints_s[i][0] +
// pos_s[tid][1]*kPoints_s[i][1] +
// pos_s[tid][2]*kPoints_s[i][2]);
// sincosf(phase, &s, &c);
// T phi_real = in_shared[tid][0][2*i]*c - in_shared[tid][0][2*i+1]*s;
// T phi_imag = in_shared[tid][0][2*i]*s + in_shared[tid][0][2*i+1]*c;
// T grad_real[3], grad_imag[3], lapl_real, lapl_imag;
// // for (int dim=0; dim<3; dim++) {
// // T re, im;
// // re = grad_lapl_in_shared[tid][dim][2*i+0] + kPoints_s[i][dim]*in_shared[tid][2*i+1];
// // im = grad_lapl_in_shared[tid][dim][2*i+1] - kPoints_s[i][dim]*in_shared[tid][2*i+0];
// // grad_real[dim] = re*c - im*s;
// // grad_imag[dim] = re*s + im*c;
// // }
// // grad_lapl_out_shared[tid][0][outIndex] = grad_real[0];
// // grad_lapl_out_shared[tid][1][outIndex] = grad_real[1];
// // grad_lapl_out_shared[tid][2][outIndex] = grad_real[2];
// // out_shared[tid][outIndex++] = phi_real;
// __syncthreads();
// if (outIndex == BS) {
// for (int j=0; j<numWrite; j++)
// phi_out_ptr[j][outBlock*BS+tid]= out_shared[j][tid];
// outIndex = 0;
// outBlock++;
// }
// __syncthreads();
// if (m2c[i]) {
// grad_lapl_out_shared[tid][0][outIndex] = grad_imag[0];
// grad_lapl_out_shared[tid][1][outIndex] = grad_imag[1];
// grad_lapl_out_shared[tid][2][outIndex] = grad_imag[2];
// out_shared[tid][outIndex++] = phi_imag;
// __syncthreads();
// if (outIndex == BS) {
// for (int j=0; j<numWrite; j++)
// phi_out_ptr[j][outBlock*BS+tid] = out_shared[j][tid];
// outIndex = 0;
// outBlock++;
// }
// }
// __syncthreads();
// }
// }
// // Write remainining outputs
// for (int i=0; i<numWrite; i++)
// if (tid < outIndex)
// phi_out_ptr[i][outBlock*BS+tid] = out_shared[i][tid];
// }
#include <cstdio>
#include <complex>
#include <iostream>
#include "../CUDA/gpu_misc.h"
void apply_phase_factors(float kPoints[], int makeTwoCopies[],
float pos[], float *phi_in[], float *phi_out[],
int num_splines, int num_walkers)
{
// float kPoints_h[3*num_splines];
// int makeTwoCopies_h[num_splines];
// float pos_h[3*num_walkers];
// float *phi_in_ptr[num_walkers];
// float *phi_out_ptr[num_walkers];
// cudaMemcpy (kPoints_h, kPoints, 3*num_splines*sizeof(float), cudaMemcpyDeviceToHost);
// cudaMemcpy (makeTwoCopies_h, makeTwoCopies, num_splines*sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy (pos_h, pos, 3*num_walkers*sizeof(float), cudaMemcpyDeviceToHost);
// cudaMemcpy (phi_in_ptr, phi_in, num_walkers*sizeof(float*), cudaMemcpyDeviceToHost);
// cudaMemcpy (phi_out_ptr, phi_out, num_walkers*sizeof(float*), cudaMemcpyDeviceToHost);
// for (int iw=0; iw<num_walkers; iw++) {
// cudaMemcpy (kPoints_h, kPoints, 3*num_splines*sizeof(float), cudaMemcpyDeviceToHost);
// std::complex<float> phi_in_h[num_splines];
// float phi_out_h[num_splines*2];
// cudaMemcpy (phi_in_h, phi_in_ptr[iw], num_splines*2*sizeof(float), cudaMemcpyDeviceToHost);
// int iout = 0;
// for (int isp=0; isp < num_splines; isp++) {
// float phase = -(kPoints_h[3*isp+0] * pos_h[3*iw+0] +
// kPoints_h[3*isp+1] * pos_h[3*iw+1] +
// kPoints_h[3*isp+2] * pos_h[3*iw+2]);
// float s,c;
// sincosf(phase, &s, &c);
// std::complex<float> z(c,s);
// std::complex<float> out = z*phi_in_h[isp];
// phi_out_h[iout++] = out.real();
// if (makeTwoCopies_h[isp])
// phi_out_h[iout++] = out.imag();
// }
// cudaMemcpyAsync (phi_out_ptr[iw], phi_out_h, iout*sizeof(float), cudaMemcpyHostToDevice);
// }
// return;
const int BS = 32;
dim3 dimBlock(BS);
dim3 dimGrid ((num_walkers+BS-1)/BS);
phase_factor_kernel<float,BS><<<dimGrid,dimBlock>>>
(kPoints, makeTwoCopies, pos, phi_in, phi_out, num_splines, num_walkers);
// dim3 dimGrid (num_walkers);
// phase_factor_kernel_new<float,BS><<<dimGrid,dimBlock>>>
// (kPoints, makeTwoCopies, pos, phi_in, phi_out, num_splines);
}
void apply_phase_factors(float kPoints[], int makeTwoCopies[],
float pos[], float *phi_in[], float *phi_out[],
float *GL_in[], float *GL_out[],
int num_splines, int num_walkers, int row_stride)
{
const int BS = 32;
dim3 dimBlock(BS);
dim3 dimGrid (num_walkers);
phase_factor_kernel<float,BS><<<dimGrid,dimBlock, 0, gpu::kernelStream>>>
(kPoints, makeTwoCopies, pos, phi_in, phi_out,
GL_in, GL_out, num_splines, num_walkers, row_stride);
}
|
the_stack
|
#include "caffe/fast_rcnn_layers.hpp"
using std::max;
using std::min;
#define PI 3.1415926
namespace caffe {
template <typename Dtype>
__global__ void RotateROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
// rois(oriented rectangle): [batch_idx, xmin, ymin, xmax, ymax, theta]
bottom_rois += n * 6;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
Dtype roi_theta = bottom_rois[5] / 180.f * PI;
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
int roi_ctr_w = roi_start_w + roi_width * 0.5;
int roi_ctr_h = roi_start_h + roi_height * 0.5;
// affine matrix
Dtype aff_mat[2][2];
aff_mat[0][0] = static_cast<Dtype>(cos(roi_theta));
aff_mat[0][1] = static_cast<Dtype>(sin(roi_theta));
aff_mat[1][0] = static_cast<Dtype>(-sin(roi_theta));
aff_mat[1][1] = static_cast<Dtype>(cos(roi_theta));
// Force malformed ROIs to be 1x1
Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w));
hstart = hstart + roi_start_h;
hend = hend + roi_start_h;
wstart = wstart + roi_start_w;
wend = wend + roi_start_w;
Dtype maxval = 0;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
// rotate
int r_w = round(static_cast<Dtype>(w - roi_ctr_w) * aff_mat[0][0] +
static_cast<Dtype>(h - roi_ctr_h) * aff_mat[0][1]) + roi_ctr_w;
int r_h = round(static_cast<Dtype>(w - roi_ctr_w) * aff_mat[1][0] +
static_cast<Dtype>(h - roi_ctr_h) * aff_mat[1][1]) + roi_ctr_h;
// skip if [r_w, r_h] not inside
if ((r_w < 0) || (r_h < 0) || (r_w > (width - 1)) || (r_h > (height - 1)))
continue;
const int bottom_index = r_h * width + r_w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void RotateROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
RotateROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void RotateROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
Dtype roi_theta = static_cast<Dtype>(offset_bottom_rois[5] / 180.f * PI);
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
int roi_ctr_w = roi_start_w + roi_width * 0.5;
int roi_ctr_h = roi_start_h + roi_height * 0.5;
// get affine matrix
Dtype aff_mat[2][2];
aff_mat[0][0] = static_cast<Dtype>(cos(roi_theta));
aff_mat[0][1] = static_cast<Dtype>(sin(roi_theta));
aff_mat[1][0] = static_cast<Dtype>(-sin(roi_theta));
aff_mat[1][1] = static_cast<Dtype>(cos(roi_theta));
// point in polygon
// add +-1 to make sure boundary points inside
int pt_a_w = roi_start_w - 1;
int pt_a_h = roi_start_h - 1;
int pt_b_w = roi_end_w + 1;
int pt_b_h = roi_start_h - 1;
int pt_c_w = roi_end_w + 1;
int pt_c_h = roi_end_h + 1;
int pt_d_w = roi_start_w - 1;
int pt_d_h = roi_end_h + 1;
int r_pt_a_w = round(static_cast<Dtype>(pt_a_w - roi_ctr_w) * aff_mat[0][0] +
static_cast<Dtype>(pt_a_h - roi_ctr_h) * aff_mat[0][1]) + roi_ctr_w;
int r_pt_a_h = round(static_cast<Dtype>(pt_a_w - roi_ctr_w) * aff_mat[1][0] +
static_cast<Dtype>(pt_a_h - roi_ctr_h) * aff_mat[1][1]) + roi_ctr_h;
int r_pt_b_w = round(static_cast<Dtype>(pt_b_w - roi_ctr_w) * aff_mat[0][0] +
static_cast<Dtype>(pt_b_h - roi_ctr_h) * aff_mat[0][1]) + roi_ctr_w;
int r_pt_b_h = round(static_cast<Dtype>(pt_b_w - roi_ctr_w) * aff_mat[1][0] +
static_cast<Dtype>(pt_b_h - roi_ctr_h) * aff_mat[1][1]) + roi_ctr_h;
int r_pt_c_w = round(static_cast<Dtype>(pt_c_w - roi_ctr_w) * aff_mat[0][0] +
static_cast<Dtype>(pt_c_h - roi_ctr_h) * aff_mat[0][1]) + roi_ctr_w;
int r_pt_c_h = round(static_cast<Dtype>(pt_c_w - roi_ctr_w) * aff_mat[1][0] +
static_cast<Dtype>(pt_c_h - roi_ctr_h) * aff_mat[1][1]) + roi_ctr_h;
int r_pt_d_w = round(static_cast<Dtype>(pt_d_w - roi_ctr_w) * aff_mat[0][0] +
static_cast<Dtype>(pt_d_h - roi_ctr_h) * aff_mat[0][1]) + roi_ctr_w;
int r_pt_d_h = round(static_cast<Dtype>(pt_d_w - roi_ctr_w) * aff_mat[1][0] +
static_cast<Dtype>(pt_d_h - roi_ctr_h) * aff_mat[1][1]) + roi_ctr_h;
Dtype aa = (r_pt_b_w - r_pt_a_w) * (h - r_pt_a_h) - (r_pt_b_h - r_pt_a_h) * (w - r_pt_a_w);
Dtype bb = (r_pt_c_w - r_pt_b_w) * (h - r_pt_b_h) - (r_pt_c_h - r_pt_b_h) * (w - r_pt_b_w);
Dtype cc = (r_pt_d_w - r_pt_c_w) * (h - r_pt_c_h) - (r_pt_d_h - r_pt_c_h) * (w - r_pt_c_w);
Dtype dd = (r_pt_a_w - r_pt_d_w) * (h - r_pt_d_h) - (r_pt_a_h - r_pt_d_h) * (w - r_pt_d_w);
// Skip if Rotate ROI doesn't include (h, w)
const bool in_roi = ((aa > Dtype(0.) && bb > Dtype(0.) && cc > Dtype(0.) && dd > Dtype(0.)) ||
(aa < Dtype(0.) && bb < Dtype(0.) && cc < Dtype(0.) && dd < Dtype(0.)));
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width);
// invert rotate (w, h) to align box
Dtype inv_aff_mat[2][2];
inv_aff_mat[0][0] = static_cast<Dtype>(cos(-roi_theta));
inv_aff_mat[0][1] = static_cast<Dtype>(sin(-roi_theta));
inv_aff_mat[1][0] = static_cast<Dtype>(-sin(-roi_theta));
inv_aff_mat[1][1] = static_cast<Dtype>(cos(-roi_theta));
int inv_w = round(static_cast<Dtype>(w - roi_ctr_w) * inv_aff_mat[0][0] +
static_cast<Dtype>(h - roi_ctr_h) * inv_aff_mat[0][1]) + roi_ctr_w;
int inv_h = round(static_cast<Dtype>(w - roi_ctr_w) * inv_aff_mat[1][0] +
static_cast<Dtype>(h - roi_ctr_h) * inv_aff_mat[1][1]) + roi_ctr_h;
int phstart = floor(static_cast<Dtype>(inv_h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(inv_h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(inv_w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(inv_w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void RotateROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
RotateROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(RotateROIPoolingLayer);
} // namespace caffe
|
the_stack
|
#include <gunrock/app/snn/snn_app.cu>
#include <gunrock/graphio/labels.cuh>
#include <gunrock/app/test_base.cuh>
// KNN includes
// Gunrock KNN app
#include <gunrock/app/knn/knn_app.cu>
#include <gunrock/app/knn/knn_helpers.cuh>
#include <gunrock/app/knn/knn_problem.cuh>
#include <gunrock/app/knn/knn_enactor.cuh>
#include <gunrock/app/knn/knn_test.cuh>
// FAISS knn
#ifdef FAISS_FOUND
#include <faiss/gpu/GpuDistance.h>
#include <faiss/gpu/GpuIndexFlat.h>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/StandardGpuResources.h>
#include <faiss/utils/Heap.h>
#include <faiss/gpu/utils/Limits.cuh>
#include <faiss/gpu/utils/Select.cuh>
#endif
//#define SNN_DEBUG
#ifdef SNN_DEBUG
#define debug(a...) fprintf(stderr, a)
#else
#define debug(a...)
#endif
using namespace gunrock;
namespace APP_NAMESPACE = app::snn;
/******************************************************************************
* Main
******************************************************************************/
/**
* @brief Enclosure to the main function
*/
struct main_struct {
/**
* @brief the actual main function, after type switching
* @tparam VertexT Type of vertex identifier
* @tparam SizeT Type of graph size, i.e. type of edge identifier
* @tparam ValueT Type of edge values
* @param parameters Command line parameters
* @param v,s,val Place holders for type deduction
* \return cudaError_t error message(s), if any
*/
template <typename VertexT, // Use int as the vertex identifier
typename SizeT, // Use int as the graph size type
typename ValueT> // Use int as the value type
cudaError_t operator()(util::Parameters& parameters, VertexT v, SizeT s,
ValueT val) {
// CLI parameters
bool quick = parameters.Get<bool>("quick");
bool quiet = parameters.Get<bool>("quiet");
auto knn_version = parameters.Get<std::string>("knn-version");
// Get n dimension tuplets
std::string labels_file = parameters.Get<std::string>("labels-file");
util::PrintMsg("Points File Input: " + labels_file, !quiet);
std::ifstream lfile(labels_file.c_str());
if (labels_file == "" || !lfile.is_open()){
util::PrintMsg("file cannot be open\n", !quiet);
return (cudaError_t)1;
}
cudaError_t retval = cudaSuccess;
typedef typename app::TestGraph<VertexT, SizeT, ValueT, graph::HAS_CSR>
GraphT;
GraphT graph;
auto target = util::DEVICE;
// points initialization is moved to gunrock::graphio::labels::Read ... ReadLabelsStream
util::Array1D<SizeT, ValueT> points;
util::CpuTimer cpu_timer;
cpu_timer.Start();
// graphio::labels is setting "n" and "dim"
retval = gunrock::graphio::labels::Read(parameters, points);
if (retval){
util::PrintMsg("Reading error\n");
return retval;
}
cpu_timer.Stop();
parameters.Set("load-time", cpu_timer.ElapsedMillis());
// Get number of points
SizeT num_points = parameters.Get<SizeT>("n");
// Get dimensional of space
SizeT dim = parameters.Get<SizeT>("dim");
// Get number of nearest neighbors, default k = 10
SizeT k = parameters.Get<int>("k");
if (k >= num_points)
return util::GRError("K must be < N", __FILE__, __LINE__);
// Get number of neighbors two close points should share
SizeT eps = parameters.Get<SizeT>("eps");
// Get the min density
SizeT min_pts = parameters.Get<SizeT>("min-pts");
if (min_pts > k)
return util::GRError("Min-Pts must be < K", __FILE__, __LINE__);
#ifdef SNN_DEBUG
// Debug of points:
debug("debug points\n");
for (SizeT i=0; i<num_points; ++i){
debug("for point %d: ", i);
for (SizeT j=0; j<dim; ++j){
if (typeid(ValueT) == typeid(double))
debug("%lf ", points[i*dim + j]);
else
debug("%d ", points[i*dim + j]);
}
debug("\n");
}
#endif
util::PrintMsg("num_points = " + std::to_string(num_points) +
", k = " + std::to_string(k) +
", eps = " + std::to_string(eps) +
", min-pts = " + std::to_string(min_pts), !quiet);
// Gunrock KNN results
SizeT* h_knns = (SizeT*) malloc(sizeof(SizeT)*num_points*k);
if (knn_version.compare("faiss") == 0){
util::PrintMsg("KNN version: " + knn_version);
#ifdef FAISS_FOUND
//* -------------------- FAISS KNN ------------------------*
long* res_I;
GUARD_CU(cudaMalloc((void**)&res_I, sizeof(long)*num_points*(k+1)));
float* res_D;
GUARD_CU(cudaMalloc((void**)&res_D, sizeof(float)*num_points*(k+1)));
ValueT *samples0 = (ValueT*)points.GetPointer(util::HOST);
float *samples = (float*)malloc(num_points * dim * sizeof(float));
for (int i = 0; i < num_points * dim; ++i) samples[i] = (float)samples0[i];
std::vector<float*> ptrs(1);
ptrs[0] = samples;
std::vector<int> sizes(1);
sizes[0] = num_points;
SizeT device = parameters.Get<SizeT>("device");
GUARD_CU(cudaSetDevice(device));
cudaStream_t stream;
GUARD_CU(cudaStreamCreate(&stream));
faiss::gpu::StandardGpuResources gpu_res;
gpu_res.noTempMemory();
gpu_res.setCudaMallocWarning(true);
gpu_res.setDefaultStream(device, stream);
cpu_timer.Start();
faiss::gpu::bruteForceKnn(&gpu_res, faiss::METRIC_L2, samples, true, num_points,
samples, true, num_points, dim, k+1, res_D, res_I);
cpu_timer.Stop();
util::PrintMsg("Faiss KNN Elapsed: "
+ std::to_string(cpu_timer.ElapsedMillis()), !quiet);
util::PrintMsg("__________________________", !quiet);
parameters.Set("knn-elapsed", cpu_timer.ElapsedMillis());
long* knn_res = (long*)malloc(sizeof(long)*num_points*(k+1));
GUARD_CU(cudaMemcpy(knn_res, res_I, sizeof(long)*num_points*(k+1), cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
for (SizeT x = 0; x < num_points; ++x){
if (knn_res[x * (k+1)] != x){
h_knns[x*k] = knn_res[x * (k+1)];
}
for (int i=0; i<k; ++i){
if (knn_res[x * (k+1) + i + 1] == x)
continue;
h_knns[x*k + i] = knn_res[x * (k+1) + i + 1];
}
}
delete [] samples;
delete [] knn_res;
cudaFree(res_I);
cudaFree(res_D);
#else
// FAISS_FOUND
util::PrintMsg("FAISS library not found.");
delete [] h_knns;
return util::GRError("FAISS library not found.", __FILE__, __LINE__);
#endif
}else{
util::PrintMsg("KNN version: gunrock");
/* -------------- Gunrock KNN ---------------------------------*/
typedef app::knn::Problem<GraphT> ProblemKNN;
typedef app::knn::Enactor<ProblemKNN> EnactorKNN;
ProblemKNN knn_problem(parameters);
EnactorKNN knn_enactor;
GUARD_CU(knn_problem.Init(graph, util::DEVICE));
GUARD_CU(knn_enactor.Init(knn_problem, util::DEVICE));
GUARD_CU(knn_problem.Reset(points.GetPointer(util::HOST), target));
GUARD_CU(knn_enactor.Reset(num_points, k, target));
// Computing k Nearest Neighbors
cpu_timer.Start();
GUARD_CU(knn_enactor.Enact());
cpu_timer.Stop();
util::PrintMsg("Gunrock KNN Elapsed: "
+ std::to_string(cpu_timer.ElapsedMillis()), !quiet);
util::PrintMsg("__________________________", !quiet);
parameters.Set("knn-elapsed", cpu_timer.ElapsedMillis());
// Extract kNN
GUARD_CU(knn_problem.Extract(h_knns));
}
#ifdef SNN_DEBUG
for (SizeT x = 0; x < 100;/*num_points;*/ ++x){
debug("knn[%d]: ", x);
for (int i = 0; i < k; ++i){
debug("%d ", h_knns[x * k + i]);
}
debug("\n");
}
#endif
// Reference result on CPU
SizeT* ref_cluster = NULL;
SizeT* ref_core_point_counter = NULL;
SizeT* ref_noise_point_counter = NULL;
SizeT* ref_cluster_counter = NULL;
// Result on GPU
SizeT* h_cluster = (SizeT*)malloc(sizeof(SizeT) * num_points);
SizeT* h_core_point_counter = (SizeT*)malloc(sizeof(SizeT));
SizeT* h_noise_point_counter = (SizeT*)malloc(sizeof(SizeT));
SizeT* h_cluster_counter = (SizeT*)malloc(sizeof(SizeT));
if (!quick) {
// Init datastructures for reference result on GPU
ref_cluster = (SizeT*)malloc(sizeof(SizeT) * num_points);
for (auto i = 0; i < num_points; ++i) ref_cluster[i] = i;
ref_core_point_counter = (SizeT*)malloc(sizeof(SizeT));
ref_noise_point_counter = (SizeT*)malloc(sizeof(SizeT));
ref_cluster_counter = (SizeT*)malloc(sizeof(SizeT));
// If not in `quick` mode, compute CPU reference implementation
util::PrintMsg("__________________________", !quiet);
util::PrintMsg("______ CPU Reference _____", !quiet);
float elapsed = app::snn::CPU_Reference(graph.csr(), num_points, k,
eps, min_pts, h_knns, ref_cluster, ref_core_point_counter,
ref_noise_point_counter, ref_cluster_counter, !quiet);
util::PrintMsg("--------------------------\n Elapsed: "
+ std::to_string(elapsed), !quiet);
util::PrintMsg("__________________________", !quiet);
parameters.Set("cpu-elapsed", elapsed);
}
std::vector<std::string> switches{"advance-mode"};
util::PrintMsg("--------RunTests-------", !quiet);
GUARD_CU(app::Switch_Parameters(parameters, graph, switches,
[num_points, k, eps, min_pts, h_knns, h_cluster, h_core_point_counter,
h_noise_point_counter, h_cluster_counter, ref_core_point_counter,
ref_noise_point_counter, ref_cluster_counter, ref_cluster]
(util::Parameters& parameters, GraphT& graph) {
return app::snn::RunTests(parameters, graph, num_points, k, eps,
min_pts, h_knns, h_cluster, ref_cluster, h_core_point_counter,
ref_core_point_counter, h_noise_point_counter,
ref_noise_point_counter, h_cluster_counter,
ref_cluster_counter, util::DEVICE);
}));
if (!quick) {
delete[] ref_cluster;
delete[] ref_core_point_counter;
delete[] ref_noise_point_counter;
delete[] ref_cluster_counter;
}
delete[] h_knns;
delete[] h_cluster;
return retval;
}
};
int main(int argc, char** argv) {
cudaError_t retval = cudaSuccess;
util::Parameters parameters("test snn");
GUARD_CU(graphio::UseParameters(parameters));
GUARD_CU(app::snn::UseParameters(parameters));
GUARD_CU(app::UseParameters_test(parameters));
GUARD_CU(parameters.Parse_CommandLine(argc, argv));
if (parameters.Get<bool>("help")) {
parameters.Print_Help();
return cudaSuccess;
}
GUARD_CU(parameters.Check_Required());
return app::Switch_Types<app::VERTEXT_U32B | app::VERTEXT_U64B |
app::SIZET_U32B | app::SIZET_U64B |
app::VALUET_F32B | app::VALUET_F64B | app::UNDIRECTED>(
parameters, main_struct());
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
// ISO2DFD: HIP Port of the 2D-Finite-Difference-Wave Propagation,
//
// ISO2DFD is a finite difference stencil kernel for solving the 2D acoustic
// isotropic wave equation. Kernels in this sample are implemented as 2nd order
// in space, 2nd order in time scheme without boundary conditions. Using HIP,
// the sample will explicitly run on the GPU as well as CPU to
// calculate a result. If successful, the output will include GPU device name.
//
// A complete online tutorial for this code sample can be found at :
// https://software.intel.com/en-us/articles/code-sample-two-dimensional-finite-difference-wave-propagation-in-isotropic-media-iso2dfd
#include <fstream>
#include <iostream>
#include <hip/hip_runtime.h>
#include "iso2dfd.h"
#define BLOCK_SIZE 16
#define MIN(a, b) (a) < (b) ? (a) : (b)
/*
* Host-Code
* Utility function to display input arguments
*/
void usage(std::string programName) {
std::cout << " Incorrect parameters " << std::endl;
std::cout << " Usage: ";
std::cout << programName << " n1 n2 Iterations " << std::endl
<< std::endl;
std::cout << " n1 n2 : Grid sizes for the stencil " << std::endl;
std::cout << " Iterations : No. of timesteps. " << std::endl;
}
/*
* Host-Code
* Function used for initialization
*/
void initialize(float* ptr_prev, float* ptr_next, float* ptr_vel, size_t nRows,
size_t nCols) {
std::cout << "Initializing ... " << std::endl;
// Define source wavelet
float wavelet[12] = {0.016387336, -0.041464937, -0.067372555, 0.386110067,
0.812723635, 0.416998396, 0.076488599, -0.059434419,
0.023680172, 0.005611435, 0.001823209, -0.000720549};
// Initialize arrays
for (size_t i = 0; i < nRows; i++) {
size_t offset = i * nCols;
for (int k = 0; k < nCols; k++) {
ptr_prev[offset + k] = 0.0f;
ptr_next[offset + k] = 0.0f;
// pre-compute squared value of sample wave velocity v*v (v = 1500 m/s)
ptr_vel[offset + k] = 2250000.0f;
}
}
// Add a source to initial wavefield as an initial condition
for (int s = 11; s >= 0; s--) {
for (int i = nRows / 2 - s; i < nRows / 2 + s; i++) {
size_t offset = i * nCols;
for (int k = nCols / 2 - s; k < nCols / 2 + s; k++) {
ptr_prev[offset + k] = wavelet[s];
}
}
}
}
/*
* Host-Code
* Utility function to calculate L2-norm between resulting buffer and reference
* buffer
*/
bool within_epsilon(float* output, float* reference, const size_t dimx,
const size_t dimy, const unsigned int radius,
const float delta = 0.01f) {
FILE* fp = fopen("./error_diff.txt", "w");
if (!fp) fp = stderr;
bool error = false;
//float abs_delta = fabsf(delta);
double norm2 = 0;
for (size_t iy = 0; iy < dimy; iy++) {
for (size_t ix = 0; ix < dimx; ix++) {
if (ix >= radius && ix < (dimx - radius) && iy >= radius &&
iy < (dimy - radius)) {
float difference = fabsf(*reference - *output);
norm2 += difference * difference;
if (difference > delta) {
error = true;
fprintf(fp, " ERROR: (%zu,%zu)\t%e instead of %e (|e|=%e)\n", ix, iy,
*output, *reference, difference);
}
}
++output;
++reference;
}
}
if (fp != stderr) fclose(fp);
norm2 = sqrt(norm2);
if (error) printf("error (Euclidean norm): %.9e\n", norm2);
return error;
}
/*
* Host-Code
* CPU implementation for wavefield modeling
* Updates wavefield for the number of iterations given in nIteratons parameter
*/
void iso_2dfd_iteration_cpu(float* next, float* prev, float* vel,
const float dtDIVdxy, int nRows, int nCols,
int nIterations) {
for (unsigned int k = 0; k < nIterations; k += 1) {
for (unsigned int i = 1; i < nRows - HALF_LENGTH; i += 1) {
for (unsigned int j = 1; j < nCols - HALF_LENGTH; j += 1) {
// Stencil code to update grid
int gid = j + (i * nCols);
float value = 0.f;
value += prev[gid + 1] - 2.f * prev[gid] + prev[gid - 1];
value += prev[gid + nCols] - 2.f * prev[gid] + prev[gid - nCols];
value *= dtDIVdxy * vel[gid];
next[gid] = 2.0f * prev[gid] - next[gid] + value;
}
}
// Swap arrays
float* swap = next;
next = prev;
prev = swap;
}
}
/*
* Device-Code - GPU
* SYCL implementation for single iteration of iso2dfd kernel
*
* Range kernel is used to spawn work-items in x, y dimension
*
*/
__global__ void iso_2dfd_kernel(float* next, const float* prev, const float* vel,
const float dtDIVdxy, const int nRows, const int nCols) {
// Compute global id
// We can use the get.global.id() function of the item variable
// to compute global id. The 2D array is laid out in memory in row major
// order.
int gidCol = blockDim.x * blockIdx.x + threadIdx.x;
int gidRow = blockDim.y * blockIdx.y + threadIdx.y;
if (gidRow < nRows && gidCol < nCols) {
size_t gid = (gidRow)*nCols + gidCol;
// Computation to solve wave equation in 2D
// First check if gid is inside the effective grid (not in halo)
if ((gidCol >= HALF_LENGTH && gidCol < nCols - HALF_LENGTH) &&
(gidRow >= HALF_LENGTH && gidRow < nRows - HALF_LENGTH)) {
// Stencil code to update grid point at position given by global id (gid)
// New time step for grid point is computed based on the values of the
// the immediate neighbors in both the horizontal and vertical
// directions, as well as the value of grid point at a previous time step
float value = 0.f;
value += prev[gid + 1] - 2.f * prev[gid] + prev[gid - 1];
value += prev[gid + nCols] - 2.f * prev[gid] + prev[gid - nCols];
value *= dtDIVdxy * vel[gid];
next[gid] = 2.f * prev[gid] - next[gid] + value;
}
}
}
int main(int argc, char* argv[]) {
// Arrays used to update the wavefield
float* prev_base;
float* next_base;
float* next_cpu;
// Array to store wave velocity
float* vel_base;
bool error = false;
size_t nRows, nCols;
unsigned int nIterations;
// Read parameters
try {
nRows = std::stoi(argv[1]);
nCols = std::stoi(argv[2]);
nIterations = std::stoi(argv[3]);
}
catch (...) {
usage(argv[0]);
return 1;
}
// Compute the total size of grid
size_t nsize = nRows * nCols;
// Allocate arrays to hold wavefield and velocity
prev_base = new float[nsize];
next_base = new float[nsize];
next_cpu = new float[nsize];
vel_base = new float[nsize];
// Compute constant value (delta t)^2 (delta x)^2. To be used in wavefield
// update
float dtDIVdxy = (DT * DT) / (DXY * DXY);
// Initialize arrays and introduce initial conditions (source)
initialize(prev_base, next_base, vel_base, nRows, nCols);
std::cout << "Grid Sizes: " << nRows << " " << nCols << std::endl;
std::cout << "Iterations: " << nIterations << std::endl;
std::cout << std::endl;
// Start timer
auto start = std::chrono::steady_clock::now();
std::cout << "Computing wavefield in device .." << std::endl;
// Display info about device
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, 0);
std::cout << " Running on:: " << devProp.name << std::endl;
std::cout << " The Device Max Work Group Size is : " << devProp.maxThreadsPerBlock << std::endl;
float* d_next;
float* d_prev;
float* d_vel;
hipMalloc((void**)&d_next, sizeof(float)*nsize);
hipMemcpy(d_next, next_base, sizeof(float)*nsize, hipMemcpyHostToDevice);
hipMalloc((void**)&d_prev, sizeof(float)*nsize);
hipMemcpy(d_prev, prev_base, sizeof(float)*nsize, hipMemcpyHostToDevice);
hipMalloc((void**)&d_vel, sizeof(float)*nsize);
hipMemcpy(d_vel, vel_base, sizeof(float)*nsize, hipMemcpyHostToDevice);
unsigned int grid_cols = (nCols + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_rows = (nRows + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// Iterate over time steps
for (unsigned int k = 0; k < nIterations; k += 1) {
// swaps their content at every iteration.
if (k % 2 == 0)
hipLaunchKernelGGL(iso_2dfd_kernel, dim3(dimGrid), dim3(dimBlock), 0, 0, d_next, d_prev, d_vel, dtDIVdxy, nRows, nCols);
else
hipLaunchKernelGGL(iso_2dfd_kernel, dim3(dimGrid), dim3(dimBlock), 0, 0, d_prev, d_next, d_vel, dtDIVdxy, nRows, nCols);
}
hipMemcpy(next_base, d_next, sizeof(float)*nsize, hipMemcpyDeviceToHost);
// Compute and display time used by device
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
.count();
std::cout << "Elapsed time: " << time << " ms" << std::endl;
std::cout << std::endl;
// Output final wavefield (computed by device) to binary file
std::ofstream outFile;
outFile.open("wavefield_snapshot.bin", std::ios::out | std::ios::binary);
outFile.write(reinterpret_cast<char*>(next_base), nsize * sizeof(float));
outFile.close();
// Compute wavefield on CPU (for validation)
std::cout << "Computing wavefield in CPU .." << std::endl;
// Re-initialize arrays
initialize(prev_base, next_cpu, vel_base, nRows, nCols);
// Compute wavefield on CPU
// Start timer for CPU
start = std::chrono::steady_clock::now();
iso_2dfd_iteration_cpu(next_cpu, prev_base, vel_base, dtDIVdxy, nRows, nCols,
nIterations);
// Compute and display time used by CPU
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::milliseconds>(end - start)
.count();
std::cout << "CPU time: " << time << " ms" << std::endl;
std::cout << std::endl;
// Compute error (difference between final wavefields computed in device and
// CPU)
error = within_epsilon(next_base, next_cpu, nRows, nCols, HALF_LENGTH, 0.1f);
// If error greater than threshold (last parameter in error function call),
// report
if (error)
std::cout << "Final wavefields from device and CPU are different: Error "
<< std::endl;
else
std::cout << "Final wavefields from device and CPU are equivalent: Success"
<< std::endl;
// Output final wavefield (computed by CPU) to binary file
outFile.open("wavefield_snapshot_cpu.bin", std::ios::out | std::ios::binary);
outFile.write(reinterpret_cast<char*>(next_cpu), nsize * sizeof(float));
outFile.close();
std::cout << "Final wavefields (from device and CPU) written to disk"
<< std::endl;
std::cout << "Finished. " << std::endl;
// Cleanup
delete[] prev_base;
delete[] next_base;
delete[] vel_base;
hipFree(d_prev);
hipFree(d_next);
hipFree(d_vel);
return error ? 1 : 0;
}
|
the_stack
|
#include "math_constants.h"
//Eigen includes
#include <Eigen/Dense>
#include <Eigen/Sparse>
//Boost
#include "boost/program_options.hpp"
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/path.hpp>
//My own includes
#include "add_model_info.h"
#include "logger.h"
#include "global_params.h"
#include "prev_states.h"
#include "input_file_prep.h"
#include "BZ_CUDA_UTIL.h"
#include "conv_char.h"
#include "encoder_multi_source.h"
#include "bi_encoder.h"
#include "attention_layer.h"
#include "attention_node.h"
#include "attention_combiner.h"
#include "decoder_model_wrapper.h"
#include "ensemble_factory.h"
#include "base_layer.h"
#include "NCE.h"
#include "gpu_info_struct.h"
#include "custom_kernels.h"
#include "Hidden_To_Hidden_Layer.h"
#include "LSTM_HH.h"
#include "model.h"
#include "fileHelper.h"
#include "fileHelper_source.h"
#include "Eigen_Util.h"
#include "model.hpp"
//#include "base_layer.hpp"
#include "LSTM.hpp"
#include "softmax.hpp"
#include "Input_To_Hidden_Layer.hpp"
#include "Hidden_To_Hidden_Layer.hpp"
#include "LSTM_HH.hpp"
#include "decoder_model_wrapper.hpp"
#include "ensemble_factory.hpp"
#include "attention_layer.hpp"
#include "attention_node.hpp"
#include "NCE.hpp"
#include "bi_encoder.hpp"
#include "encoder_multi_source.hpp"
#include "tree_LSTM.hpp"
#include "input_file_prep.hpp"
#include "attention_combiner.hpp"
#include "attention_combiner_node.hpp"
#include "conv_char.hpp"
#include "highway_network.hpp"
#include "memory_util.h"
boost::filesystem::path WORKING_DIRECTORY;
void clean_working_directory(void) {
boost::filesystem::remove_all(WORKING_DIRECTORY);
}
//parse the command line from the user
void command_line_parse(global_params ¶ms,int argc, char **argv) {
//files for keeping the user input
//if not s, 1st source, 2nd target, 3rd output weights name
//if s, 1st target, 2nd output weights name
std::vector<std::string> train_files;
//files for force decoding
//if not s, 1. source input file 2. target input file 3. neural network file name 4. output file name
//if s, 1. target input file 2. neural network file name 3. output file name
std::vector<std::string> test_files;
//stuff for adaptive learning rate schedule
//if not seq , 1st is source dev, 2nd is target dev
//if seq 1st is target dev
std::vector<std::string> adaptive_learning_rate;
//lower and upper range for parameter initialization
std::vector<precision> lower_upper_range;
//for the kbest flag, 4 arguements must be entered for kbest, 1. number of best paths 2 input file name
//3. neural network file name (this is the output file you get after training the neural network)4. output file name
std::vector<std::string> kbest_files;
//for stoic gen, 1st neural network file, 2nd is output file name
std::vector<std::string> stoicgen_files;
//truncated softmax
std::vector<std::string> trunc_info;
//for decoding ratios
std::vector<precision> decoding_ratio;
//for continuing to train
std::vector<std::string> cont_train;
//for multi gpu training
std::vector<int> gpu_indicies;
std::vector<precision> clip_cell_vals;
std::vector<double> NCE_vals;
//for multisource
std::vector<std::string> multi_source;
//for char-mt
std::vector<int> char_mt_vec;
//basic format setup
namespace po = boost::program_options;
po::options_description desc("Options");
desc.add_options()
("help,h", "Run to get help on how to use the program. This is version 1.0")
("train,t",po::value<std::vector<std::string> > (&train_files)->multitoken(),"Train a model with input data file(s) and a name for the neural network output file"\
". \nFORMAT (if sequence to sequence): <source file name> <target file name> <neural network output name> "\
" \nFORMAT (if sequence): <target file name> <neural network output name>")
("cont-train,C",po::value<std::vector<std::string>> (&cont_train)->multitoken(),"Resume training of a model (THIS WILL OVERWRITE THE MODEL FILE PASSED IN)\n"\
"FORMAT: (if sequence to sequence): <source file name> <target file name> <neural network file name>\n"\
"FORMAT: (if seq): <target file name> <neural network file name>")
("vocab-mapping-file",po::value<std::string> (¶ms.ensemble_train_file_name),"Train a model with the same integerization mappings as another model. This is needed to do ensemble decoding\n"\
"FORMAT: <neural network file name>")
("train-source-RNN",po::value<bool>(&deniz::train_source_RNN),"train source RNN. DEFAULT: True")
("train-target-RNN",po::value<bool>(&deniz::train_target_RNN),"train target RNN. DEFAULT: True")
("train-source-input-embedding",po::value<bool>(&deniz::train_source_input_embedding),"train source input embeddings. DEFAULT: True")
("train-target-input-embedding",po::value<bool>(&deniz::train_target_input_embedding),"train target input embeddings. DEFAULT: True")
("train-target-output-embedding",po::value<bool>(&deniz::train_target_output_embedding),"train target output embeddings. DEFAULT: True")
("train-attention-target-RNN",po::value<bool>(&deniz::train_attention_target_RNN),"train target attention. DEFAULT: True")
("vocab-mapping-file-multi-source",po::value<std::string> (¶ms.multi_src_params.ensemble_train_file_name),"specify multi-source mapping for vocab")
("multi-source",po::value<std::vector<std::string>> (&multi_source)->multitoken(),"Specify the second source training file and mapping file for the multi-source model")
//("multi-attention",po::value<bool>(¶ms.multi_src_params.multi_attention),"for attention model with multiple sources\n")
("multi-attention",po::value<bool>(¶ms.multi_src_params.multi_attention_v2),"Make the multi-source seq-to-seq model use attention\n")
//("char-mt",po::value<std::vector<int>> (&char_mt_vec)->multitoken(),"<filter_size> <char_emb_size> <num highway layers> \n")
//("add-ht",po::value<bool>(¶ms.multi_src_params.add_ht),"add hiddenstates for both attention models instead of sending through neural network\n")
//("print-norms",po::value<bool>(&BZ_CUDA::print_norms),"Print out norms of all matrices\n")
("lstm-combine",po::value<bool>(¶ms.multi_src_params.lstm_combine),"For multi source seq-to-seq model, use the child-sum combination method if set to true, else use the basic method. DEFAULT: false\n")
("num-layers,N",po::value<int>(¶ms.num_layers),"Set the number of LSTM layers you want for your model\n DEFAULT: 1")
("multi-gpu,M",po::value<std::vector<int>> (&gpu_indicies)->multitoken(), "Train the model on multiple gpus.\nFORMAT: <gpu for layer 1> <gpu for layer 2> ... <gpu for softmax>\n"\
"DEFAULT: all layers and softmax lie on gpu 0")
("force-decode,f",po::value<std::vector<std::string> > (&test_files)->multitoken(), "Get per line probability of dataset plus the perplexity\n"\
"FORMAT: (if sequence to sequence): <source file name> <target file name> <trained neural network file name> <output file name>\n"\
"FORMAT: (if sequence): <target file name> <trained neural network file name> <output file name>")
// ("stoch-gen,g", po::value<std::vector<std::string> > (&stoicgen_files)->multitoken(),"Do random generation for a sequence model, such as a language model\n"\
// "FORMAT: <neural network file name> <output file name>")
// ("stoch-gen-len",po::value<int>(¶ms.sg_length) ,"How many sentences to let stoch-gen run for\n"\
// "FORMAT: <num sentences>\n"
// "DEFAULT: 100")
//("dump-alignments",po::value<bool>(¶ms.attent_params.dump_alignments),"Dump the alignments to a file")
// ("temperature",po::value<double>(¶ms.temperature) ,"What should the temperature be for the stoch generation"\
// "FORMAT: <temperature> where temperature is typically between [0,1]. A lower temperature makes the model output less and less from what it memorized from training\n"\
// "DEFAULT: 1")
("sequence,s", "Train model that learns a sequence,such as language modeling. Default model is sequence to sequence model")
("tmp-dir-location",po::value<std::string>(¶ms.tmp_location),"For all modes in the code, a tmp directiory must be created for data preparation. Specify the location of where you want this to be created. DEFAULT: Current directory")
//("bi-directional",po::value<bool>(¶ms.bi_dir_params.bi_dir),"Have the source sequence be encoded bi-diretionally\n")
//("combine-bi-directional",po::value<bool>(¶ms.bi_dir_params.bi_dir_comb),"send a nonlinear tranformation of the rev and nonrev hidden states from the source encoders to the decoder\n")
//("share-embedding",po::value<bool>(¶ms.bi_dir_params.share_embeddings),"For the bidirectional encoder, share the embeddings")
("dropout,d",po::value<precision>(¶ms.dropout_rate),"Use dropout and set the dropout rate. This value is the probability of keeping a node. FORMAT: <dropout rate>. DEFAULT: 1.0")
("learning-rate,l",po::value<precision>(¶ms.learning_rate),"Set the learning rate. DEFAULT: 0.5")
("random-seed",po::value<int>(¶ms.random_seed_int),"Specify a random seed, instead of the model being seeded with the current time\n")
("longest-sent,L",po::value<int>(¶ms.longest_sent),"Set the maximum sentence length for training/force-decode/decode. DEFAULT: 100")
("hiddenstate-size,H",po::value<int>(¶ms.LSTM_size),"Set hiddenstate size. DEFAULT: 100")
//("UNK-replacement",po::value<int>(¶ms.unk_aligned_width),"Set unk replacement to be true and set the wideth\n FORMAT: <alignment width>")
// ("truncated-softmax,T",po::value<std::vector<std::string>> (&trunc_info)->multitoken(),"Use truncated softmax\n DEFAULT: not being used\n"\
// "FORMAT: <shortlist size> <sampled size>")
("UNK-decode",po::value<std::string>(&BZ_CUDA::unk_rep_file_name),"Use unk replacement at decoding time if you have an attention model. Specify a file that the system will output information to. \
This file will then need to be passed to the python script")
("NCE",po::value<int>(¶ms.num_negative_samples),"Use an NCE loss function, specify the number of noise samples you want (these are shared across the minibatch for speed). DEFAULT: uses MLE not NCE")
("NCE-share-samples",po::value<bool>(¶ms.share_samples),"Share the noise samples across the minibatch when using NCE for a speed increase. DEFAULT: True ")
//("NCE-leg-dump",po::value<bool>(&BZ_CUDA::nce_legacy_dump),"Dont use this option")
("NCE-score",po::value<bool>(&BZ_CUDA::nce_score),"Bool for using unnormalized softmax outputs for force decoding. This will make the probabilities not sum to 1, but makes decoding significanly faster. You must have trained the model with NCE for this to work. DEFAULT: false")
//("ASHISH-NCE-STATS",po::value<bool>(&BZ_CUDA::dump_NCE_stats),"for ashish")
("attention-model",po::value<bool>(¶ms.attent_params.attention_model),"Bool for whether you want to train with the attention mode. DEFAULT: False\n")
("attention-width",po::value<int>(¶ms.attent_params.D),"How many words do you want to look at around the alignment position on one half. DEFAULT: 10\n")
("feed-input",po::value<bool>(¶ms.attent_params.feed_input),"Bool for wether you want feed input for the attention model. DEFAULT: False\n")
("source-vocab-size,v",po::value<int>(¶ms.source_vocab_size),"Set source vocab size\n DEFAULT: number of unique words in source training corpus")
("target-vocab-size,V",po::value<int>(¶ms.target_vocab_size),"Set target vocab size\n DEFAULT: number of unique words in target training corpus")
("shuffle",po::value<bool>(¶ms.shuffle),"true if you want to shuffle the train data. DEFAULT: True")
("parameter-range,P",po::value<std::vector<precision> > (&lower_upper_range)->multitoken(),"parameter initialization range\n"\
"FORMAT: <Lower range value> <Upper range value>\n DEFAULT: -0.08 0.08")
("number-epochs,n",po::value<int>(¶ms.num_epochs),"Set number of epochs. DEFAULT: 10")
("matrix-clip-gradients,c",po::value<precision>(¶ms.norm_clip),"Set gradient clipping threshold\n DEFAULT: 5")
//("ind-clip-gradients,i",po::value<precision>(&BZ_CUDA::ind_norm_clip_thres),"CURRENT THIS DOES NOT WORK!!!!!!!!!!!!!!!!!!! \nSet gradient clipping threshold for individual elements\n DEFAULT: 0.1")
("whole-clip-gradients,w",po::value<precision>(¶ms.norm_clip),"Set gradient clipping threshold for all gradients\n DEFAULT: 5")
("adaptive-halve-lr,a",po::value<std::vector<std::string>> (&adaptive_learning_rate)->multitoken(),"change the learning rate"\
" when the perplexity on your specified dev set increases from the previous half epoch by some constant, so "\
" new_learning_rate = constant*old_learning rate, by default the constant is 0.5, but can be set using adaptive-decrease-factor\n"
"FORMAT: (if sequence to sequence): <source dev file name> <target dev file name>\n"\
"FORMAT: (if sequence): <target dev file name>")
("clip-cell",po::value<std::vector<precision>>(&clip_cell_vals)->multitoken(),"Specify the cell clip threshold and the error threshold in backprop.\n FORMAT: <Cell clip threshold> <Error clip Threshold> . Recommended values: <50> <1000>. DEFAULT: not used\n")
("adaptive-decrease-factor,A",po::value<precision>(¶ms.decrease_factor),"To be used with adaptive-halve-lr"\
" it\n DEFAULT: 0.5")
("fixed-halve-lr",po::value<int> (¶ms.epoch_to_start_halving),"Halve the learning rate"\
" after a certain epoch, every half epoch afterwards by a specific amount. FORMAT: <epoch number>")
("fixed-halve-lr-full",po::value<int> (¶ms.epoch_to_start_halving_full),"Halve the learning rate"\
" after a certain epoch, every epoch afterwards by a specific amount. FORMAT: <epoch number>")
("minibatch-size,m",po::value<int>(¶ms.minibatch_size),"Set minibatch size. DEFAULT: 8.")
("screen-print-rate",po::value<int>(¶ms.screen_print_rate),"Set after how many minibatches you want to print info to the stdout and/or the logfile\n DEFAULT: 5")
("logfile",po::value<std::string>(¶ms.HPC_output_file_name),"Dump the terminal output to a" \
"file \n FORMAT: <file name>")
("best-model,B",po::value<std::string>(¶ms.best_model_file_name),"During train have the best model (determined by validation perplexity) be written to a file\nFORMAT: <output file name>")
("save-all-models",po::value<bool>(&BZ_CUDA::dump_every_best),"Save the every model every half epoch")
("decode,k",po::value<std::vector<std::string> > (&kbest_files)->multitoken(),"Get top decoding outputs using beam search in sequence to sequence model. You can specify more than one model for ensemble decoding\n"\
"FORMAT: <how many outputs> <neural network file 1> <neural network file 2> ... <output file>")
("decode-main-data-files",po::value<std::vector<std::string> > (¶ms.decode_user_files)->multitoken(),"FORMAT: <data file 1> <data file 2> ... ")
("decode-multi-source-data-files",po::value<std::vector<std::string> > (¶ms.decode_user_files_additional)->multitoken(),"FORMAT: <multi-source data file 1> <multi-source data file 2> ... ")
("decode-multi-source-vocab-mappings",po::value<std::vector<std::string> > (¶ms.model_names_multi_src)->multitoken(),"FORMAT: <multi-source vocab mapping 1> <multi-source vocab mapping 2> ... ")
("pre-norm-ensemble",po::value<bool>(&BZ_CUDA::pre_norm),"For --decode, ensemble the models before they are normalized to probabilities")
("beam-size,b",po::value<int>(¶ms.beam_size),"Set beam size for --decode paths\n DEFAULT: 12")
("penalty,p",po::value<precision>(¶ms.penalty),"Set penalty for --decode decoding. The value entered"\
" will be added to the log probability score per target word decoded. This can make the model favor longer sentences for decoding\n DEFAULT: 0")
("print-score",po::value<bool>(¶ms.print_score),"Set if you want to print out the unnormalized log prob for each path when using --decode"\
"FORMAT: <bool> \nthe bool is 1 if you want to print the score or 0 otherwise.\n DEFAULT: false")
("dec-ratio",po::value<std::vector<precision>>(&decoding_ratio)->multitoken(),"Set the min and max decoding length rations when using --decode\n"\
"This means that a target decoded sentence must be at least min_dec_ratio*len(source sentence)"\
" and not longer than max_dec_ratio*len(source sentence)\nFORMAT: <min ration> <max ratio>\n"\
"DEFAULT: 0.5, 1.5")
// for fsa
("interactive",po::value<bool>(¶ms.interactive),"Interactive Mode. FORMAT: <bool> \n DEFAULT: false")
("interactive-line",po::value<bool>(¶ms.interactive_line),"Interactive line by line Mode. FORMAT: <bool> \n DEFAULT: false")
("print-beam",po::value<bool>(¶ms.print_beam),"Set if you want to print out the beam cells, mainly used for debug. FORMAT: <bool> \n DEFAULT: false")
("repeat-penalty",po::value<precision>(¶ms.repeat_penalty),"Set penalty for kbest decoding. The value entered will be added to the log probability score per target word decoded. This can make the model favor sentences for less repeating words\n DEFAULT: 0")
("adjacent-repeat-penalty",po::value<precision>(¶ms.adjacent_repeat_penalty),"Set penalty for kbest decoding. The value entered will be added to the log probability score per target word decoded. This will disencourage adjacent word copying.\n DEFAULT: 0")
("fsa",po::value<std::string>(¶ms.fsa_file),"the fsa file for the decoder, should be in carmel format\nFORMAT: <fsa file name>")
("fsa-weight",po::value<float>(¶ms.fsa_weight),"the fsa weight for the decoder, \nDEFAULT: 0.0")
("fsa-log",po::value<bool>(¶ms.fsa_log),"Whether the probability in fsa file is in log space, DEFAULT: false\n")
("encourage-list",po::value<std::vector<std::string>>(¶ms.encourage_list)->multitoken(),"provide encourage word list files for the decoding, each line should contain a encourage word \nFORMAT: <file1> <file2>")
("encourage-weight",po::value<std::string>(¶ms.encourage_weight_str)->multitoken(),"The encourage weights. The weight is in log(e) space, and will be added to the corresponding word probability during decoding\nFORMAT: <weight1>,<weight2> e.g. 1.0,-0.5\n DEFAULT: ")
("wordlen-weight",po::value<precision>(¶ms.wordlen_weight),"wordlen weight\n DEFAULT: 0")
("alliteration-weight",po::value<precision>(¶ms.alliteration_weight),"alliteration weight\n DEFAULT: 0")
// for LSH
("lsh-type",po::value<int>(¶ms.LSH_type),"0: no lsh; 1: WTA LSH; DEFAULT: 0")
("WTA-K",po::value<int>(¶ms.WTA_K),"interests scope; DEFAULT: 8")
("WTA-units-per-band",po::value<int>(¶ms.WTA_units_per_band),"units per band; DEFAULT: 2")
("WTA-W",po::value<int>(¶ms.WTA_W),"number of bands; DEFAULT: 100")
("WTA-m",po::value<int>(¶ms.WTA_m),"number of internal top m, not the same with beam_size; DEFAULT: 10")
("WTA-threshold",po::value<int>(¶ms.WTA_threshold),"threshold DEFAULT: 1")
("WTA-topn",po::value<int>(¶ms.WTA_topn),"topn DEFAULT: 0")
("show-debug-info",po::value<int>(¶ms.show_debug_info),"whether show LSH debug info; DEFAULT: 0")
// for target vocab shrink
("target-vocab-policy",po::value<int>(¶ms.target_vocab_policy),"0: full softmax, 1 top k vocab only, 2: using alignment; 3: using LSH; DEFAULT: 0")
("top-vocab-size",po::value<int>(¶ms.top_vocab_size),"valid only when target-vocab-policy==1; DEFAULT: 10")
("target-vocab-cap",po::value<int>(¶ms.target_vocab_cap),"when target-vocab-policy == 2, the cap value; DEFAULT: 1")
("f2e-file",po::value<std::string>(¶ms.alignment_file),"when target-vocab-policy == 2, the alignment file; DEFAULT: 1")
// to decode legacy model
("legacy-model",po::value<bool>(¶ms.legacy_model),"set when decoding with legacy model. If it's legacy model, it will need to have <START> as the first word in source sentence; DEFAULT: False");
// ("tsne-dump",po::value<bool>(&BZ_STATS::tsne_dump),"for dumping multi-source hiddenstates during decoding")
// ("Dump-LSTM",po::value<std::string>(¶ms.LSTM_dump_file),"Print the output at each timestep from the LSTM\nFORMAT: <output file name>\n"\
// "The file lines that are output are the following: 1.input word, embedding 2.Forget gate 3.input gate"\
// " 4.c_t 5.output gate 6.h_t 7.probabilities");
po::variables_map vm;
//kbest should be changed to decode. train-emsemble should be changed to vocab-mapping-file. screen-print-rate should be changed
//Declare license for the code. LGPL license or MIT license?.
try {
po::store(po::parse_command_line(argc, argv, desc), vm);
po::notify(vm);
std::cout << "------------- Printing options that have currently being set by the user -------------\n";
//now try to loop over all the boost program options
for (auto it=vm.begin(); it != vm.end(); it++) {
std::cout << "Variable: " << it->first << " Value: ";
auto& value = it->second.value();
if (auto v = boost::any_cast<int>(&value)) {
std::cout << *v << "\n";
}
else if (auto v = boost::any_cast<bool>(&value)) {
std::cout << *v << "\n";
}
else if (auto v = boost::any_cast<float>(&value)) {
std::cout << *v << "\n";
}
else if(auto v = boost::any_cast<double>(&value)) {
std::cout << *v << "\n";
}
else if(auto v = boost::any_cast<std::string>(&value)) {
std::cout << *v << "\n";
}
else if(std::vector<std::string> *v = boost::any_cast<std::vector<std::string>>(&value)) {
std::vector<std::string> vv = *v;
for(int i=0; i<vv.size(); i++) {
std::cout << " " << vv[i] << " ";
}
std::cout << "\n";
}
else {
std::cout << "Not Printable\n";
}
}
std::cout << "--------------------------------------------------------------------------------------\n\n";
//see if the user specified the help flag
if ( vm.count("help") ) {
std::cout << "\n------------------------------\n";
std::cout << "This is Barret Zoph's GPU RNN library\n"
<< "The flags for the command line interface are below\n"
<< "Look at the README for an indepth tutorial and example commands\n"
<< "" << "\n";
std::cout << desc << "\n";
exit (EXIT_FAILURE);
}
if (vm.count("random-seed") ) {
params.random_seed = true;
}
if (vm.count("tmp-dir-location")) {
if (params.tmp_location != "") {
if (params.tmp_location[params.tmp_location.size()-1]!='/') {
params.tmp_location+="/";
}
}
}
WORKING_DIRECTORY = boost::filesystem::unique_path();
if(vm.count("tmp-dir-location")) {
WORKING_DIRECTORY = boost::filesystem::path(params.tmp_location + WORKING_DIRECTORY.string());
}
BZ_CUDA::logger << "Temp directory being created named: " << WORKING_DIRECTORY.string() << "\n\n";
boost::filesystem::create_directories(WORKING_DIRECTORY);
params.unique_dir = WORKING_DIRECTORY.string();
std::atexit(clean_working_directory);
if(vm.count("shuffle")) {
BZ_CUDA::shuffle_data = params.shuffle;
}
if(vm.count("logfile")) {
params.HPC_output = true;
//BZ_CUDA::HPC_output = true;
}
BZ_CUDA::logger.SetOutputLogger(params.HPC_output_file_name,params.HPC_output);
//error checks to be sure only once of these options is set
if (vm.count("train") && vm.count("decode")) {
BZ_CUDA::logger << "ERROR: you cannot train and get decode at the same time\n";
exit (EXIT_FAILURE);
}
if (vm.count("train") && vm.count("force-decode")) {
BZ_CUDA::logger << "ERROR: you cannot train and force-decode at the same time\n";
exit (EXIT_FAILURE);
}
if (vm.count("force-decode") && vm.count("decode")) {
BZ_CUDA::logger << "ERROR: you cannot force-decode and get decode at the same time\n";
exit (EXIT_FAILURE);
}
if (!(vm.count("train") || vm.count("force-decode") || vm.count("decode")||vm.count("stoch-gen") || vm.count("cont-train") )) {
BZ_CUDA::logger << "ERROR: you must either train,continue training,get decode,stoch generate data or force-decode\n";
exit (EXIT_FAILURE);
}
if(vm.count("parameter-range")) {
BZ_CUDA::lower = lower_upper_range[0];
BZ_CUDA::upper = lower_upper_range[1];
}
if(vm.count("cont-train")) {
BZ_CUDA::cont_train = true;
}
else {
BZ_CUDA::cont_train = false;
}
//this is for making sure dev_synch_all only loops over current GPU's specified
// if(vm.count("multi-gpu")) {
// if(gpu_indicies.size()==0) {
// gpu_info::device_numbers.push_back(0);
// }
// else {
// gpu_info::device_numbers = gpu_indicies;
// }
// }
if(vm.count("clip-cell")) {
if(clip_cell_vals.size()!=2) {
BZ_CUDA::logger << "ERROR: clip-cell must have exactly two arguement\n";
exit (EXIT_FAILURE);
}
BZ_CUDA::clip_cell = true;
BZ_CUDA::cell_clip_threshold = clip_cell_vals[0];
BZ_CUDA::error_clip_threshold = clip_cell_vals[1];
}
params.longest_sent+=4; //because it is really 4 less
if(vm.count("UNK-decode")) {
BZ_CUDA::unk_replacement = true;
BZ_CUDA::unk_rep_file_stream.open(BZ_CUDA::unk_rep_file_name.c_str());
for(int i=0; i<params.beam_size; i++) {
BZ_CUDA::viterbi_alignments.push_back(-1);
}
for(int i=0; i<params.beam_size * params.longest_sent; i++) {
BZ_CUDA::alignment_scores.push_back(0);
}
BZ_CUDA::h_align_indicies = (int*)malloc((2*params.attent_params.D+1)*params.beam_size*sizeof(int));
BZ_CUDA::h_alignment_values = (precision*)malloc((2*params.attent_params.D+1)*params.beam_size*sizeof(precision));
}
if(vm.count("char-mt")) {
params.char_params.char_cnn = true;
params.char_params.filter_size = char_mt_vec[0];
params.char_params.char_emb_size = char_mt_vec[1];
params.char_params.num_highway_layers = char_mt_vec[2];
extract_char_info(params.char_params.longest_word,params.char_params.num_unique_chars_source,
params.char_params.num_unique_chars_target,params.source_vocab_size,params.target_vocab_size,
params.char_params.char_mapping_file,params.char_params.word_mapping_file);
}
if(vm.count("train") || vm.count("cont-train")) {
if(vm.count("multi-source")) {
if(multi_source.size()!=2) {
BZ_CUDA::logger << "ERROR only two arguements for the multi-source flag\n";
exit (EXIT_FAILURE);
}
params.multi_src_params.multi_source = true;
params.multi_src_params.file_name = multi_source[0];
params.multi_src_params.source_model_name = multi_source[1];
}
//some basic error checks to parameters
if(params.learning_rate<=0) {
BZ_CUDA::logger << "ERROR: you cannot have a learning rate <=0\n";
exit (EXIT_FAILURE);
}
if(params.minibatch_size<=0) {
BZ_CUDA::logger << "ERROR: you cannot have a minibatch of size <=0\n";
exit (EXIT_FAILURE);
}
if(params.LSTM_size<=0) {
BZ_CUDA::logger << "ERROR: you cannot have a hiddenstate of size <=0\n";
exit (EXIT_FAILURE);
}
if(params.source_vocab_size<=0) {
if(params.source_vocab_size!=-1) {
BZ_CUDA::logger << "ERROR: you cannot have a source_vocab_size <=0\n";
exit (EXIT_FAILURE);
}
}
if(params.target_vocab_size<=0) {
if(params.target_vocab_size!=-1) {
BZ_CUDA::logger << "ERROR: you cannot have a target_vocab_size <=0\n";
exit (EXIT_FAILURE);
}
}
if(params.norm_clip<=0) {
BZ_CUDA::logger << "ERROR: you cannot have your norm clip <=0\n";
exit (EXIT_FAILURE);
}
if(params.num_epochs<=0) {
BZ_CUDA::logger << "ERROR: you cannot have num_epochs <=0\n";
exit (EXIT_FAILURE);
}
// if(vm.count("logfile")) {
// params.HPC_output = true;
// BZ_CUDA::HPC_output = true;
// }
if(vm.count("dropout")) {
params.dropout = true;
if(params.dropout_rate < 0 || params.dropout_rate > 1) {
BZ_CUDA::logger << "ERROR: dropout rate must be between 0 and 1\n";
exit (EXIT_FAILURE);
}
}
if(vm.count("matrix-clip-gradients")) {
BZ_CUDA::global_clip_flag = false;
params.clip_gradient = true;
BZ_CUDA::individual_grad_clip = false;
}
if(vm.count("whole-clip-gradients")) {
BZ_CUDA::global_clip_flag = true;
params.clip_gradient = false;
BZ_CUDA::individual_grad_clip = false;
}
if(vm.count("ind-clip-gradients")) {
BZ_CUDA::global_clip_flag = false;
params.clip_gradient = false;
BZ_CUDA::individual_grad_clip = true;
}
if(vm.count("NCE")) {
params.NCE = true;
params.softmax = false;
//BZ_CUDA::print_partition_function = true;
}
if(vm.count("UNK-replacement")) {
params.unk_replace = true;
}
//BZ_CUDA::logger << "Unique_dir: " << params.unique_dir << "\n";
params.train_file_name = params.unique_dir+"/train.txt";
//number of layers
//error checking is done when initializing model
if(vm.count("multi-gpu")) {
params.gpu_indicies = gpu_indicies;
}
if(vm.count("cont-train")) {
//sequence model
if(vm.count("sequence")) {
if(cont_train.size()!=2) {
BZ_CUDA::logger << (int)cont_train.size() << "\n";
BZ_CUDA::logger << "ERROR: two arguements to be supplied to the continue train flag\n"\
" 1. train data file name, 2. neural network file name\n";
exit (EXIT_FAILURE);
}
params.attent_params.attention_model = false;
params.target_file_name = cont_train[0];
params.input_weight_file = cont_train[1];
params.output_weight_file = cont_train[1];
params.LM = true;
params.load_model_train = true;
params.load_model_name = params.input_weight_file;
input_file_prep input_helper;
input_helper.integerize_file_LM(params.input_weight_file,params.target_file_name,params.train_file_name,
params.longest_sent,params.minibatch_size,true,params.LSTM_size,params.target_vocab_size,params.num_layers);
}
else {
if(cont_train.size()!=3) {
BZ_CUDA::logger << "ERROR: three arguements to be supplied to the continue train flag\n"\
" 1. source train data file name 2. target train data file name 3. neural network file name \n";
exit (EXIT_FAILURE);
}
params.LM = false;
params.source_file_name = cont_train[0];
params.target_file_name = cont_train[1];
params.input_weight_file = cont_train[2];
params.output_weight_file = cont_train[2];
params.load_model_train = true;
params.load_model_name = params.input_weight_file;
BZ_CUDA::logger << "Load model name: " << params.load_model_name << "\n";
if(params.source_file_name == params.target_file_name) {
BZ_CUDA::logger << "ERROR: do not use the same file for source and target data\n";
exit (EXIT_FAILURE);
}
input_file_prep input_helper;
if(vm.count("multi-source")) {
params.multi_src_params.int_file_name = params.unique_dir + params.multi_src_params.int_file_name;
}
if(params.char_params.char_cnn) {
params.train_file_name = params.char_params.word_train_file;
params.test_file_name = params.char_params.word_dev_file;
params.output_weight_file = params.char_params.word_mapping_file;
}
else {
input_helper.integerize_file_nonLM(params.input_weight_file,params.source_file_name,
params.target_file_name,params.train_file_name,params.longest_sent,params.minibatch_size,params.LSTM_size,
params.source_vocab_size,params.target_vocab_size,params.num_layers,params.attent_params.attention_model,
params.multi_src_params.multi_source,params.multi_src_params.file_name,params.multi_src_params.int_file_name,
params.multi_src_params.source_model_name);
}
}
}
else {
if(vm.count("num-layers")) {
if(params.num_layers <=0) {
BZ_CUDA::logger << "ERROR: you must have >= 1 layer for your model\n";
exit (EXIT_FAILURE);
}
}
//now create the necessary files
if(vm.count("sequence")) {
if(train_files.size()!=2) {
BZ_CUDA::logger << "ERROR: two arguements to be supplied to the train flag"\
" 1. train data file name, 2. neural network output name\n";
exit (EXIT_FAILURE);
}
params.attent_params.attention_model = false;
params.LM = true;
params.target_file_name = train_files[0];
params.output_weight_file = train_files[1];
input_file_prep input_helper;
if(vm.count("vocab-mapping-file")) {
params.ensemble_train = true;
}
//this outputs the train.txt file along with the mappings and first line
bool success=true;
if(!params.ensemble_train) {
success = input_helper.prep_files_train_LM(params.minibatch_size,params.longest_sent,
params.target_file_name,
params.train_file_name,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers);
}
else {
success = input_helper.prep_files_train_LM_ensemble(params.minibatch_size,params.longest_sent,
params.target_file_name,
params.train_file_name,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers,params.ensemble_train_file_name);
}
if(!success) {
exit (EXIT_FAILURE);
}
}
else {
//then sequence to sequence model
if(train_files.size()!=3) {
BZ_CUDA::logger << (int)train_files.size() <<"\n";
BZ_CUDA::logger << "ERROR: three arguements to be supplied to the train flag for the sequence to sequence model\n"\
" 1. source train data file name\n 2. target train data file name \n3. neural network output name\n";
exit (EXIT_FAILURE);
}
params.LM = false;
params.source_file_name = train_files[0];
params.target_file_name = train_files[1];
params.output_weight_file = train_files[2];
if(params.source_file_name == params.target_file_name) {
BZ_CUDA::logger << "ERROR: do not use the same file for source and target data\n";
exit (EXIT_FAILURE);
}
//see if ensemble training
if(vm.count("vocab-mapping-file")) {
params.ensemble_train = true;
}
input_file_prep input_helper;
bool success=true;
//check if char
if(params.char_params.char_cnn) {
params.train_file_name = params.char_params.word_train_file;
params.test_file_name = params.char_params.word_dev_file;
params.output_weight_file = params.char_params.word_mapping_file;
}
else {
if(params.multi_src_params.multi_source) {
params.multi_src_params.int_file_name = params.unique_dir + params.multi_src_params.int_file_name;
if(params.ensemble_train) {
input_helper.prep_files_train_nonLM_multi_source_ensemble(params.minibatch_size,params.longest_sent,
params.source_file_name,params.target_file_name,
params.train_file_name,params.source_vocab_size,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size,
params.num_layers,params.multi_src_params.file_name,params.multi_src_params.int_file_name,
params.multi_src_params.source_model_name,params.ensemble_train_file_name,params.multi_src_params.ensemble_train_file_name);
}
else {
input_helper.prep_files_train_nonLM_multi_source(params.minibatch_size,params.longest_sent,
params.source_file_name,params.target_file_name,
params.train_file_name,params.source_vocab_size,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size,
params.num_layers,params.multi_src_params.file_name,params.multi_src_params.int_file_name,
params.multi_src_params.source_model_name);
}
}
else if(!params.ensemble_train) {
success = input_helper.prep_files_train_nonLM(params.minibatch_size,params.longest_sent,
params.source_file_name,params.target_file_name,
params.train_file_name,params.source_vocab_size,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers,params.unk_replace,params.unk_aligned_width,params.attent_params.attention_model);
}
else {
success = input_helper.prep_files_train_nonLM_ensemble(params.minibatch_size,params.longest_sent,
params.source_file_name,params.target_file_name,
params.train_file_name,params.source_vocab_size,params.target_vocab_size,
params.shuffle,params.output_weight_file,params.LSTM_size,params.num_layers,params.ensemble_train_file_name,params.attent_params.attention_model);
}
}
if(!success) {
exit (EXIT_FAILURE);
}
}
}
if(vm.count("parameter-range")) {
if(lower_upper_range.size()!=2) {
BZ_CUDA::logger << "ERROR: you must have two inputs to parameter-range\n1.lower bound\n2. upper bound\n";
exit (EXIT_FAILURE);
}
BZ_CUDA::lower = lower_upper_range[0];
BZ_CUDA::upper = lower_upper_range[1];
if(BZ_CUDA::lower >= BZ_CUDA::upper) {
BZ_CUDA::logger << "ERROR: the lower parameter range cannot be greater than the upper range\n";
exit (EXIT_FAILURE);
}
}
if(vm.count("fixed-halve-lr-full")) {
params.stanford_learning_rate = true;
}
if(vm.count("fixed-halve-lr")) {
params.google_learning_rate = true;
if(params.epoch_to_start_halving<=0) {
BZ_CUDA::logger << "ERROR: cannot halve learning rate until 1st epoch \n";
exit (EXIT_FAILURE);
}
}
if(vm.count("adaptive-halve-lr")) {
params.learning_rate_schedule = true;
if(vm.count("sequence")) {
if(adaptive_learning_rate.size()!=1) {
BZ_CUDA::logger << "ERROR: adaptive-halve-lr takes one arguement\n1.dev file name\n";
exit (EXIT_FAILURE);
}
params.dev_target_file_name = adaptive_learning_rate[0];
params.test_file_name = params.unique_dir + "/validation.txt";
input_file_prep input_helper;
if(!params.char_params.char_cnn) {
input_helper.integerize_file_LM(params.output_weight_file,params.dev_target_file_name,params.test_file_name,
params.longest_sent,params.minibatch_size,true,params.LSTM_size,params.target_vocab_size,params.num_layers);
}
}
else {
if(adaptive_learning_rate.size()!=2 && !params.multi_src_params.multi_source) {
BZ_CUDA::logger << "ERROR: adaptive-halve-lr takes two arguements\n1.source dev file name\n2.target dev file name\n";
exit (EXIT_FAILURE);
}
if(adaptive_learning_rate.size()!=3 && params.multi_src_params.multi_source) {
BZ_CUDA::logger << "ERROR: adaptive-halve-lr takes three arguements with multi-source\n1.source dev file name\n2.target dev file name\n3.other source dev file name\n";
exit (EXIT_FAILURE);
}
if(params.multi_src_params.multi_source) {
params.multi_src_params.test_file_name = adaptive_learning_rate[2];
}
params.dev_source_file_name = adaptive_learning_rate[0];
params.dev_target_file_name = adaptive_learning_rate[1];
params.test_file_name = params.unique_dir + "/validation.txt";
params.multi_src_params.int_file_name_test = params.unique_dir + params.multi_src_params.int_file_name_test;
if(params.char_params.char_cnn) {
params.train_file_name = params.char_params.word_train_file;
params.test_file_name = params.char_params.word_dev_file;
}
if(params.dev_source_file_name == params.dev_target_file_name) {
BZ_CUDA::logger << "ERROR: do not use the same file for source and target data\n";
exit (EXIT_FAILURE);
}
input_file_prep input_helper;
if(!params.char_params.char_cnn) {
input_helper.integerize_file_nonLM(params.output_weight_file,params.dev_source_file_name,
params.dev_target_file_name,params.test_file_name,
params.longest_sent,params.minibatch_size,params.LSTM_size,params.source_vocab_size,params.target_vocab_size,params.num_layers,
params.attent_params.attention_model,params.multi_src_params.multi_source,params.multi_src_params.test_file_name,params.multi_src_params.int_file_name_test,params.multi_src_params.source_model_name);
}
}
if(vm.count("best-model")) {
params.best_model = true;
}
}
if(vm.count("truncated-softmax")) {
params.shortlist_size = std::stoi(trunc_info[0]);
params.sampled_size = std::stoi(trunc_info[1]);
params.truncated_softmax = true;
if(params.shortlist_size + params.sampled_size > params.target_vocab_size) {
BZ_CUDA::logger << "ERROR: you cannot have shortlist size + sampled size >= target vocab size\n";
exit (EXIT_FAILURE);
}
}
//put in the first line of the model file with the correct info
//format:
//0: num_layers
//1: LSTM_size
//2: target_vocab_size
//3: source_vocab_size
//4: attention_model
//5: feed_input
//6: multi_source
//7: combine_LSTM
//8: char_cnn
add_model_info(params.num_layers,params.LSTM_size,params.target_vocab_size,params.source_vocab_size,params.attent_params.attention_model,params.attent_params.feed_input,\
params.multi_src_params.multi_source,params.multi_src_params.lstm_combine,params.char_params.char_cnn,params.output_weight_file);
params.train= true;
params.decode=false;
params.test = false;
params.stochastic_generation = false;
return;
}
else { //checks here for things that should only be specified during training
if(vm.count("train-source-RNN")) {
std::cout << "Error train-source-RNN should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("train-target-RNN")) {
std::cout << "Error train-target-RNN should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("train-source-input-embedding")) {
std::cout << "Error train-source-input-embedding should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("train-target-input-embedding")) {
std::cout << "Error train-target-input-embedding should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("train-target-output-embedding")) {
std::cout << "Error train-target-output-embedding should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("train-train-attention-target-RNN")) {
std::cout << "Error train-train-attention-target-RNN should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("vocab-mapping-file-multi-source")) {
std::cout << "Error vocab-mapping-file-multi-source should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("multi-source")) {
std::cout << "Error train-target-RNN should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("train-target-RNN")) {
std::cout << "Error train-target-RNN should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("multi-attention")) {
std::cout << "Error multi-attention should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("lstm-combine")) {
std::cout << "Error lstm-combine should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("num-layers")) {
std::cout << "Error num-layers should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("dropout")) {
std::cout << "Error dropout should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("learning-rate")) {
std::cout << "Error learning-rate should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("random-seed")) {
std::cout << "Error random-seed should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("hiddenstate-size")) {
std::cout << "Error hiddenstate-size should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("NCE")) {
std::cout << "Error NCE should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("NCE-share-samples")) {
std::cout << "Error NCE-share-samples should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("attention-model")) {
std::cout << "Error attention-model should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("attention-width")) {
std::cout << "Error attention-width should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("feed-input")) {
std::cout << "Error feed-input should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("source-vocab-size")) {
std::cout << "Error source-vocab-size should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("target-vocab-size")) {
std::cout << "Error target-vocab-size should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("parameter-range")) {
std::cout << "Error parameter-range should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("number-epochs")) {
std::cout << "Error number-epochs should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("matrix-clip-gradients")) {
std::cout << "Error matrix-clip-gradients should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("whole-clip-gradients")) {
std::cout << "Error whole-clip-gradients should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("adaptive-halve-lr")) {
std::cout << "Error adaptive-halve-lr should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("clip-cell")) {
std::cout << "Error clip-cell should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("adaptive-decrease-factor")) {
std::cout << "Error adaptive-decrease-factor should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("fixed-halve-lr")) {
std::cout << "Error fixed-halve-lr should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("fixed-halve-lr-full")) {
std::cout << "Error fixed-halve-lr-full should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("screen-print-rate")) {
std::cout << "Error screen-print-rate should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
if(vm.count("best-model")) {
std::cout << "Error best-model should only be used during training (-t) or continue-training (-C)\n";
exit (EXIT_FAILURE);
}
}
if(vm.count("decode")) {
if (kbest_files.size()<3) {
BZ_CUDA::logger << "ERROR: at least 4 arguements must be entered for --decode, 1. number of best outputs\n"\
" 2. neural network file name (this is the output file you get after training the neural network)\n"\
" 3. output file name\n"\
"Additionally more neural network file names can be added to do ensemble decoding\n";
exit (EXIT_FAILURE);
}
//fill into NULL if the user did not specify anything
if(params.decode_user_files_additional.size()==0) {
for(int i=0; i<params.decode_user_files.size(); i++) {
params.decode_user_files_additional.push_back("NULL");
}
}
//once again fill in NULL if user did not specify
if(params.model_names_multi_src.size()==0) {
for(int i=0; i<params.decode_user_files.size(); i++) {
params.model_names_multi_src.push_back("NULL");
}
}
//for ensembles
for(int i=1; i<kbest_files.size()-1; i++) {
params.model_names.push_back(kbest_files[i]);
std::string temp_path = params.unique_dir+ "/kbest_tmp_" + std::to_string(i-1);
params.decode_temp_files.push_back(temp_path);
temp_path = params.unique_dir+ "/kbest_tmp_additional_" + std::to_string(i-1);
params.decode_temp_files_additional.push_back(temp_path);
}
//BZ_CUDA::logger << "params.model_names: " << (int)params.model_names.size() << "\n";
//BZ_CUDA::logger << "decode_user_files: " << (int)params.decode_user_files.size() << "\n";
//BZ_CUDA::logger << "model_names_multi_src: " << (int)params.model_names_multi_src.size() << "\n";
if(params.model_names.size() != params.decode_user_files.size() || params.model_names.size() != params.model_names_multi_src.size()) {
BZ_CUDA::logger << "ERROR: the same number of inputs must be specified as models\n";
exit (EXIT_FAILURE);
}
//params.decode_file_name = params.unique_dir+"/decoder_input.txt";
params.decoder_output_file = params.unique_dir+"/decoder_output.txt";
params.num_hypotheses =std::stoi(kbest_files[0]);
//params.decode_tmp_file = kbest_files[1];
//params.input_weight_file = model_names[0];
params.decoder_final_file = kbest_files.back();
input_file_prep input_helper;
// input_helper.integerize_file_LM(params.input_weight_file,params.decode_tmp_file,"tmp/decoder_input.txt",
// params.longest_sent,1,false,params.LSTM_size,params.target_vocab_size,true,params.source_vocab_size);
for(int i=0; i<params.decode_temp_files.size(); i++) {
input_helper.integerize_file_kbest(params.model_names[i],params.decode_user_files[i],params.decode_temp_files[i],
params.longest_sent,params.target_vocab_size,false,"NULL", params.legacy_model);
if(params.decode_user_files_additional[i]!= "NULL") {
input_helper.integerize_file_kbest(params.model_names[i],params.decode_user_files_additional[i],params.decode_temp_files_additional[i],
params.longest_sent,params.target_vocab_size,true,params.model_names_multi_src[i]);
}
}
if(vm.count("multi-gpu")) {
if(gpu_indicies.size()!=params.model_names.size()) {
BZ_CUDA::logger << "ERROR: for decoding, each model must be specified a gpu\n";
exit (EXIT_FAILURE);
}
params.gpu_indicies = gpu_indicies;
}
else {
for(int i=0; i<params.model_names.size(); i++) {
params.gpu_indicies.push_back(0);
}
}
if(params.beam_size<=0) {
BZ_CUDA::logger << "ERROR: beam size cannot be <=0\n";
exit (EXIT_FAILURE);
}
if(params.penalty<0) {
BZ_CUDA::logger << "ERROR: penalty cannot be less than zero\n";
exit (EXIT_FAILURE);
}
if(vm.count("Dump-LSTM")) {
params.dump_LSTM=true;
}
if(vm.count("dec-ratio")) {
if(decoding_ratio.size()!=2) {
BZ_CUDA::logger << "Decoding ratio size: " << (int)decoding_ratio.size() << "\n";
BZ_CUDA::logger << decoding_ratio[0] << "\n";
BZ_CUDA::logger << "ERROR: only two inputs for decoding ratio\n";
exit (EXIT_FAILURE);
}
params.min_decoding_ratio = decoding_ratio[0];
params.max_decoding_ratio = decoding_ratio[1];
if(params.min_decoding_ratio >= params.max_decoding_ratio) {
BZ_CUDA::logger << "ERROR: min decoding ratio must be <= max_decoding_ratio\n";
exit (EXIT_FAILURE);
}
}
params.train = false;
params.decode = true;
params.test = false;
params.stochastic_generation = false;
params.LM = false;
return;
}
if(vm.count("force-decode")) {
BZ_CUDA::force_decode = true;
if(vm.count("multi-gpu")) {
params.gpu_indicies = gpu_indicies;
}
params.test_file_name = params.unique_dir + "/validation.txt";
if(vm.count("sequence")) {
if(test_files.size()!=3) {
BZ_CUDA::logger << "ERROR: force-decode takes three arguements 1.input file name (input sentences)"\
"2. neural network file name 3.output file name \n";
exit (EXIT_FAILURE);
}
params.attent_params.attention_model = false;
params.target_file_name = test_files[0];
params.input_weight_file = test_files[1];
params.output_force_decode = test_files[2];
params.LM = true;
input_file_prep input_helper;
input_helper.integerize_file_LM(params.input_weight_file,params.target_file_name,params.test_file_name,
params.longest_sent,params.minibatch_size,false,params.LSTM_size,params.target_vocab_size,params.num_layers);
}
else {
if(test_files.size()!=4) {
BZ_CUDA::logger << "ERROR: force-decode takes four arguements: 1. source input file"\
" 2. target input file 3. neural network file name 4. output file name\n";
exit (EXIT_FAILURE);
}
params.LM = false;
params.source_file_name = test_files[0];
params.target_file_name = test_files[1];
params.input_weight_file = test_files[2];
params.output_force_decode = test_files[3];
//stuff for attention model alignments
params.attent_params.tmp_alignment_file = params.unique_dir + "/alignments.txt";
if(params.source_file_name == params.target_file_name) {
BZ_CUDA::logger << "ERROR: do not use the same file for source and target data\n";
exit (EXIT_FAILURE);
}
if(vm.count("multi-source")) {
if(multi_source.size()!=2) {
BZ_CUDA::logger << "ERROR only two arguements for the multi-source flag\n";
exit (EXIT_FAILURE);
}
params.multi_src_params.multi_source = true;
params.multi_src_params.test_file_name = multi_source[0];
params.multi_src_params.source_model_name = multi_source[1];
params.multi_src_params.int_file_name_test = params.unique_dir + params.multi_src_params.int_file_name_test;
}
if(!params.char_params.char_cnn) {
input_file_prep input_helper;
input_helper.integerize_file_nonLM(params.input_weight_file,params.source_file_name,
params.target_file_name,params.test_file_name,params.longest_sent,1,params.LSTM_size,
params.source_vocab_size,params.target_vocab_size,params.num_layers,params.attent_params.attention_model,
params.multi_src_params.multi_source,params.multi_src_params.test_file_name,params.multi_src_params.int_file_name_test,
params.multi_src_params.source_model_name);
}
else {
params.test_file_name = params.char_params.word_dev_file;
}
params.minibatch_size=1;
}
std::ifstream tmp_if_stream(params.input_weight_file.c_str());
std::string tmp_str;
std::string tmp_word;
std::getline(tmp_if_stream,tmp_str);
std::istringstream my_ss(tmp_str,std::istringstream::in);
std::vector<std::string> tmp_model_params;
while(my_ss >> tmp_word) {
tmp_model_params.push_back(tmp_word);
}
if(tmp_model_params.size() != 9) {
BZ_CUDA::logger << "Error: the model file is not in the correct format for force-decode\n";
exit (EXIT_FAILURE);
}
params.num_layers = std::stoi(tmp_model_params[0]);
params.LSTM_size = std::stoi(tmp_model_params[1]);
params.target_vocab_size = std::stoi(tmp_model_params[2]);
params.source_vocab_size = std::stoi(tmp_model_params[3]);
params.attent_params.attention_model = std::stoi(tmp_model_params[4]);
params.attent_params.feed_input = std::stoi(tmp_model_params[5]);
params.multi_src_params.multi_source = std::stoi(tmp_model_params[6]);
params.multi_src_params.lstm_combine = std::stoi(tmp_model_params[7]);
params.char_params.char_cnn = std::stoi(tmp_model_params[8]);
params.train= false;
params.decode=false;
params.test = true;
// params.minibatch_size=1;
params.stochastic_generation = false;
return;
}
if(vm.count("stoch-gen")) {
if(!vm.count("sequence")) {
BZ_CUDA::logger << "ERROR: you can only do stoch-gen on the sequence model\n";
exit (EXIT_FAILURE);
}
if(stoicgen_files.size()!=2) {
BZ_CUDA::logger << "ERROR: stoch-gen takes two inputs"\
" 1. neural network file name 2. output file name\n";
exit (EXIT_FAILURE);
}
params.sg_output_file_temp = params.unique_dir + "/sg.txt";
params.input_weight_file = stoicgen_files[0];
params.sg_output_file = stoicgen_files[1];
std::ifstream weights_file;
std::vector<std::string> info;
std::string str;
std::string word;
weights_file.open(params.input_weight_file.c_str());
weights_file.seekg(0, std::ios::beg);
std::getline(weights_file, str); //info from first sentence
std::istringstream iss(str, std::istringstream::in);
while(iss >> word) {
info.push_back(word);
}
weights_file.close();
params.LSTM_size = std::stoi(info[1]);
params.target_vocab_size = std::stoi(info[2]);
params.LM = true;
params.train= false;
params.decode = false;
params.test = false;
params.minibatch_size = 1;
params.stochastic_generation = true;
return;
}
}
catch(po::error& e) {
std::cerr << "ERROR: " << e.what() << std::endl << std::endl;
//std::cerr << desc << std::endl;
exit (EXIT_FAILURE);
}
}
int main(int argc, char **argv) {
//Timing stuff
std::chrono::time_point<std::chrono::system_clock> start_total,
end_total, begin_minibatch,end_minibatch,begin_decoding,end_decoding,begin_epoch;
std::chrono::duration<double> elapsed_seconds;
start_total = std::chrono::system_clock::now();
//Initializing the model
global_params params; //Declare all of the global parameters
//file_helper file_info(params.train_file_name,params.minibatch_size,params.train_num_lines_in_file); //Initialize the file information
BZ_CUDA::curr_seed = static_cast<unsigned int>(std::time(0));
BZ_CUDA::curr_seed = std::min((unsigned int)100000000,BZ_CUDA::curr_seed);//to prevent overflow
//get the command line arguements
command_line_parse(params,argc,argv);
// if(params.HPC_output) {
// std::cout << "Opening logfile: " << params.HPC_output_file_name << "\n";
// HPC_output.open(params.HPC_output_file_name);
// }
//randomize the seed
if(params.random_seed) {
BZ_CUDA::gen.seed(static_cast<unsigned int>(params.random_seed_int));
}
else {
BZ_CUDA::gen.seed(static_cast<unsigned int>(std::time(0)));
}
neuralMT_model<precision> model; //This is the model
printIntroMessage(params);
if(!params.decode) {
model.initModel(params.LSTM_size,params.minibatch_size,params.source_vocab_size,params.target_vocab_size,
params.longest_sent,params.debug,params.learning_rate,params.clip_gradient,params.norm_clip,
params.input_weight_file,params.output_weight_file,params.softmax_scaled,params.train_perplexity,params.truncated_softmax,
params.shortlist_size,params.sampled_size,params.LM,params.num_layers,params.gpu_indicies,params.dropout,
params.dropout_rate,params.attent_params,params);
}
if(params.load_model_train) {
std::string temp_swap_weights = model.input_weight_file;
model.input_weight_file = params.load_model_name;
model.load_weights();
model.input_weight_file = temp_swap_weights;
}
////////////////////////////////////Train the model//////////////////////////////////////
if(params.train) {
//info for averaging the speed
int curr_batch_num_SPEED = 0;
const int thres_batch_num_SPEED = params.screen_print_rate;//set this to whatever
int total_words_batch_SPEED = 0;
double total_batch_time_SPEED = 0;
//File info for the training file
file_helper file_info(params.train_file_name,params.minibatch_size,params.train_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,params.train_total_words,params.truncated_softmax,
params.shortlist_size,params.sampled_size,params.char_params,params.char_params.char_train_file); //Initialize the file information
//model.initFileInfo(&file_info);
params.half_way_count = params.train_total_words/2;
if(params.google_learning_rate) {
BZ_CUDA::logger << "Number of words at which to start halving the learning rate: " << params.half_way_count << "\n";
// if(params.HPC_output) {
// HPC_output << "Words at which to start halving the learning rate: " << params.half_way_count << "\n";
// HPC_output.flush();
// }
}
int current_epoch = 1;
BZ_CUDA::logger << "Starting model training\n";
BZ_CUDA::logger << "-----------------------------------" << "\n";
BZ_CUDA::logger << "Starting epoch 1\n";
BZ_CUDA::logger << "-----------------------------------" << "\n";
// if(params.HPC_output) {
// HPC_output << "Starting model training\n";
// HPC_output << "Starting epoch 1\n";
// HPC_output.flush();
// }
//stuff for learning rate schedule
int total_words = 0;
precision temp_learning_rate = params.learning_rate; //This is only for the google learning rate
bool learning_rate_flag =true;//used for google learning rate for halving at every 0.5 epochs
double old_perplexity = 0;
model.train_perplexity = 0; //set the model perplexity to zero
begin_epoch = std::chrono::system_clock::now();
while(current_epoch <= params.num_epochs) {
begin_minibatch = std::chrono::system_clock::now();
bool success = file_info.read_minibatch();
if(model.multi_source) {
model.src_fh.read_minibatch();
}
end_minibatch = std::chrono::system_clock::now();
elapsed_seconds = end_minibatch-begin_minibatch;
//std::cout << "File I/O time: " << elapsed_seconds.count()/60.0 << " minutes\n";
total_batch_time_SPEED+= elapsed_seconds.count();
begin_minibatch = std::chrono::system_clock::now();
//cudaProfilerStart();
model.initFileInfo(&file_info);
model.compute_gradients(file_info.minibatch_tokens_source_input,file_info.minibatch_tokens_source_output,
file_info.minibatch_tokens_target_input,file_info.minibatch_tokens_target_output,
file_info.h_input_vocab_indicies_source,file_info.h_output_vocab_indicies_source,
file_info.h_input_vocab_indicies_target,file_info.h_output_vocab_indicies_target,
file_info.current_source_length,file_info.current_target_length,
file_info.h_input_vocab_indicies_source_Wgrad,file_info.h_input_vocab_indicies_target_Wgrad,
file_info.len_source_Wgrad,file_info.len_target_Wgrad,file_info.h_sampled_indices,
file_info.len_unique_words_trunc_softmax,file_info.h_batch_info,&file_info);
//cudaProfilerStop();
//return;
// return 0;
end_minibatch = std::chrono::system_clock::now();
elapsed_seconds = end_minibatch-begin_minibatch;
total_batch_time_SPEED+= elapsed_seconds.count();
total_words_batch_SPEED+=file_info.words_in_minibatch;
if(curr_batch_num_SPEED>=thres_batch_num_SPEED) {
BZ_CUDA::logger << "Recent batch gradient L2 norm size (if using -w): " << BZ_CUDA::global_norm << "\n";
BZ_CUDA::logger << "Time to compute gradients for previous " << params.screen_print_rate << " minibatches: " << total_batch_time_SPEED/60.0 << " minutes\n";
BZ_CUDA::logger << "Number of words in previous " << params.screen_print_rate << " minibatches: " << total_words_batch_SPEED << "\n";
BZ_CUDA::logger << "Throughput for previous " << params.screen_print_rate << " minibatches: " << (total_words_batch_SPEED)/(total_batch_time_SPEED) << " words per second\n";
BZ_CUDA::logger << total_words << " words out of " << params.train_total_words << " epoch: " << current_epoch << "\n\n";
// if(params.HPC_output) {
// HPC_output << "Recent batch gradient L2 norm size: " << BZ_CUDA::global_norm << "\n";
// HPC_output << "Batched Minibatch time: " << total_batch_time_SPEED/60.0 << " minutes\n";
// HPC_output << "Batched Words in minibatch: " << total_words_batch_SPEED << "\n";
// HPC_output << "Batched Throughput: " << (total_words_batch_SPEED)/(total_batch_time_SPEED) << " words per second\n";
// HPC_output << total_words << " out of " << params.train_total_words << " epoch: " << current_epoch << "\n\n";
// HPC_output.flush();
// }
total_words_batch_SPEED = 0;
total_batch_time_SPEED = 0;
curr_batch_num_SPEED = 0;
}
curr_batch_num_SPEED++;
total_words += file_info.words_in_minibatch;
//stuff for google learning rate
if(params.google_learning_rate && current_epoch>=params.epoch_to_start_halving && total_words>=params.half_way_count &&
learning_rate_flag) {
temp_learning_rate = temp_learning_rate/2;
BZ_CUDA::logger << "New Learning Rate: " << temp_learning_rate << "\n";
model.update_learning_rate(temp_learning_rate);
learning_rate_flag = false;
// if(params.HPC_output) {
// HPC_output << "New Learning Rate: " << temp_learning_rate << "\n";
// HPC_output.flush();
// }
}
//stuff for perplexity based learning schedule
if(params.learning_rate_schedule && total_words>=params.half_way_count &&learning_rate_flag) {
learning_rate_flag = false;
double new_perplexity = model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,false,params.test_total_words,params.HPC_output,false,"");
BZ_CUDA::logger << "Old dev set Perplexity: " << old_perplexity << "\n";
BZ_CUDA::logger << "New dev set Perplexity: " << new_perplexity << "\n";
// if(params.HPC_output) {
// HPC_output << "Old dev set Perplexity: " << old_perplexity << "\n";
// HPC_output << "New dev set Perplexity: " << new_perplexity << "\n";
// HPC_output.flush();
// }
if ( (new_perplexity + params.margin >= old_perplexity) && current_epoch!=1) {
temp_learning_rate = temp_learning_rate*params.decrease_factor;
model.update_learning_rate(temp_learning_rate);
BZ_CUDA::logger << "New learning rate:" << temp_learning_rate <<"\n\n";
// if(params.HPC_output) {
// HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
// HPC_output.flush();
// }
}
//perplexity is better so output the best model file
if((params.best_model && params.best_model_perp > new_perplexity) || BZ_CUDA::dump_every_best) {
//BZ_CUDA::logger << "Writing model file: "<< params.best_model_file_name <<"\n";
model.dump_best_model(params.best_model_file_name,params.output_weight_file);
// if(params.HPC_output) {
// HPC_output << "Now outputting the new best model\n";
// HPC_output.flush();
// }
params.best_model_perp = new_perplexity;
}
old_perplexity = new_perplexity;
}
if(!success) {
current_epoch+=1;
//stuff for google learning rate schedule
if(params.google_learning_rate && current_epoch>=params.epoch_to_start_halving) {
temp_learning_rate = temp_learning_rate/2;
BZ_CUDA::logger << "New learning rate:" << temp_learning_rate <<"\n\n";
model.update_learning_rate(temp_learning_rate);
learning_rate_flag = true;
// if(params.HPC_output) {
// HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
// HPC_output.flush();
// }
}
//stuff for stanford learning rate schedule
if(params.stanford_learning_rate && current_epoch>=params.epoch_to_start_halving_full) {
temp_learning_rate = temp_learning_rate/2;
BZ_CUDA::logger << "New learning rate:" << temp_learning_rate <<"\n\n";
model.update_learning_rate(temp_learning_rate);
learning_rate_flag = true;
// if(params.HPC_output) {
// HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
// HPC_output.flush();
// }
}
double new_perplexity;
if(params.learning_rate_schedule) {
new_perplexity = model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,false,params.test_total_words,params.HPC_output,false,"");
}
//stuff for perplexity based learning schedule
if(params.learning_rate_schedule) {
BZ_CUDA::logger << "Old dev set Perplexity: " << old_perplexity << "\n";
BZ_CUDA::logger << "New dev set Perplexity: " << new_perplexity << "\n";
// if(params.HPC_output) {
// HPC_output << "Old dev set Perplexity: " << old_perplexity << "\n";
// HPC_output << "New dev set Perplexity: " << new_perplexity << "\n";
// HPC_output.flush();
// }
if ( (new_perplexity + params.margin >= old_perplexity) && current_epoch!=1) {
temp_learning_rate = temp_learning_rate*params.decrease_factor;
model.update_learning_rate(temp_learning_rate);
BZ_CUDA::logger << "New learning rate:" << temp_learning_rate <<"\n\n";
// if(params.HPC_output) {
// HPC_output << "New learning rate:" << temp_learning_rate <<"\n\n";
// HPC_output.flush();
// }
}
//perplexity is better so output the best model file
if( (params.best_model && params.best_model_perp > new_perplexity) || BZ_CUDA::dump_every_best) {
//BZ_CUDA::logger << "Now outputting the new best model\n";
model.dump_best_model(params.best_model_file_name,params.output_weight_file);
// if(params.HPC_output) {
// HPC_output << "Now outputting the new best model\n";
// HPC_output.flush();
// }
params.best_model_perp = new_perplexity;
}
learning_rate_flag = true;
old_perplexity = new_perplexity;
}
if(params.train_perplexity) {
model.train_perplexity = model.train_perplexity/std::log(2.0);
BZ_CUDA::logger << "PData on train set: " << model.train_perplexity << "\n";
BZ_CUDA::logger << "Total target words: " << file_info.total_target_words << "\n";
BZ_CUDA::logger << "Training set perplexity: " << std::pow(2,-1*model.train_perplexity/file_info.total_target_words) << "\n";
// if(params.HPC_output) {
// HPC_output << "Training set perplexity: " << std::pow(2,-1*model.train_perplexity/file_info.total_target_words) << "\n";
// HPC_output.flush();
// }
model.train_perplexity = 0;
}
total_words=0;
if(current_epoch <= params.num_epochs) {
elapsed_seconds = std::chrono::system_clock::now() - begin_epoch;
BZ_CUDA::logger << "Previous Epoch time (minutes): " << (double)elapsed_seconds.count()/60.0 << "\n";
begin_epoch = std::chrono::system_clock::now();
BZ_CUDA::logger << "-----------------------------------" << "\n";
BZ_CUDA::logger << "Starting epoch " << current_epoch << "\n";
BZ_CUDA::logger << "-----------------------------------" << "\n";
// if(params.HPC_output) {
// HPC_output << "-----------------------------------" << std::endl;
// HPC_output << "Starting epoch " << current_epoch << std::endl;
// HPC_output << "-----------------------------------" << std::endl;
// HPC_output.flush();
// }
}
}
devSynchAll();
}
//Now that training is done, dump the weights
devSynchAll();
model.dump_weights();
}
/////////////////////////////////Get perplexity on test set////////////////////////////////
if(params.test) {
model.get_perplexity(params.test_file_name,params.minibatch_size,params.test_num_lines_in_file,params.longest_sent,
params.source_vocab_size,params.target_vocab_size,true,params.test_total_words,params.HPC_output,true,params.output_force_decode);
//now unint alignments
if(model.attent_params.dump_alignments) {
input_file_prep input_helper;
model.output_alignments.close();
//input_helper.unint_alignments(params.input_weight_file,params.attent_params.tmp_alignment_file,params.attent_params.alignment_file);
}
}
if(params.LM && params.stochastic_generation) {
model.stoicastic_generation(params.sg_length,params.sg_output_file_temp,params.temperature);
input_file_prep input_helper;
input_helper.unint_file(params.input_weight_file,params.sg_output_file_temp,params.sg_output_file,true,false);
}
///////////////////////////////////////////decode the model////////////////////////////////////////////
if(params.decode) {
//std::cout << "-----------------Starting Decoding----------------\n";
ensemble_factory<precision> ensemble_decode(params.model_names,params.num_hypotheses,params.beam_size, params.min_decoding_ratio,
params.penalty, params.longest_sent,params.print_score,
params.decoder_output_file,params.gpu_indicies,params.max_decoding_ratio,
params.target_vocab_size,params);
if (params.fsa_file != ""){
fsa* fsa_model = new fsa(params.fsa_file);
input_file_prep input_helper;
input_helper.load_word_index_mapping(params.model_names[0],false,true);
ensemble_decode.model_decoder->init_fsa(fsa_model, input_helper.tgt_mapping, params);
// encourage list
params.encourage_weight.clear();
std::vector<std::string> ll = split(params.encourage_weight_str,',');
for (std::string s: ll){
float f = std::stof(s);
params.encourage_weight.push_back(f);
}
ensemble_decode.model_decoder->init_encourage_lists(params.encourage_list, params.encourage_weight);
}
begin_decoding = std::chrono::system_clock::now();
BZ_CUDA::logger << "-----------------Starting Decoding----------------\n";
ensemble_decode.decode_file();
end_decoding = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end_decoding-begin_decoding;
BZ_CUDA::logger << "Decoding time: " << elapsed_seconds.count()/60.0 << " minutes\n";
//now unintegerize the file
input_file_prep input_helper;
//use model_names[0] since all models must have the same target vocab mapping and size
//BZ_CUDA::logger << "Uninting file\n";
input_helper.unint_file(params.model_names[0],params.decoder_output_file,params.decoder_final_file,false,true);
if(BZ_CUDA::unk_replacement) {
//BZ_CUDA::logger << "Closing unk rep file stream\n";
BZ_CUDA::unk_rep_file_stream.close();
}
}
//Compute the final runtime
end_total = std::chrono::system_clock::now();
elapsed_seconds = end_total-start_total;
BZ_CUDA::logger << "\n\n\n";
BZ_CUDA::logger << "Total Program Runtime: " << (double)elapsed_seconds.count()/60.0 << " minutes" << "\n";
}
|
the_stack
|
/**
* ___ _ _ ___ _ _ ___ ___ ___ ___
* / __| | | | \ /_\ | | ___| _ ) __/ __/ __|
* | (__| |_| | |) / _ \ | |_|___| _ \ _| (_ \__ \
* \___|\___/|___/_/ \_\ |____| |___/_| \___|___/
* 2012
* by Jens Wetzl (jens.wetzl@fau.de)
* and Oliver Taubmann (oliver.taubmann@fau.de)
*
* This work is licensed under a Creative Commons
* Attribution 3.0 Unported License. (CC-BY)
* http://creativecommons.org/licenses/by/3.0/
*
* File lbfgs.cu: Implementation of class lbfgs (except cpu_lbfgs).
*
**/
#pragma once
#include "lbfgs.h"
#include "timer.h"
#include <iostream>
#include <limits>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <fstream>
#include <sstream>
using namespace std;
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#define MAX(a,b) ((a) > (b) ? (a) : (b))
namespace gpu_lbfgs {
// Variables
__device__ float fkm1;
__device__ float fk;
__device__ float tmp;
__device__ float alpha[HISTORY_SIZE];
__device__ float rho [HISTORY_SIZE];
__device__ float H0;
__device__ float step;
__device__ float tmp2;
__device__ int status;
// Small helper kernels for scalar operations in device memory needed during updates.
// What they're used for is documented by comments in the places they are executed.
// *** Use with a single thread only! ***
__global__ void update1 (float *alpha_out, const float *sDotZ, const float *rho, float *minusAlpha_out); // first update loop
__global__ void update2 (float *alphaMinusBeta_out, const float *rho, const float *yDotZ, const float *alpha); // second update loop
__global__ void update3 (float *rho_out, float *H0_out, const float *yDotS, const float *yDotY); // after line search
}
// linesearch_gpu.h is no real header, it contains
// part of the implementation and must be included
// after the variables above have been declared.
#include "linesearch_gpu.h"
lbfgs::lbfgs(cost_function& cf, cublasHandle_t cublasHandle)
: m_costFunction(cf)
, m_maxIter(100)
, m_maxEvals(std::numeric_limits<size_t>::max())
, m_gradientEps(1e-4f)
,m_cublasHandle (cublasHandle)
{
}
lbfgs::~lbfgs()
{
}
std::string lbfgs::statusToString(lbfgs::status stat)
{
switch (stat)
{
case LBFGS_BELOW_GRADIENT_EPS:
return "Below gradient epsilon";
case LBFGS_REACHED_MAX_ITER:
return "Reached maximum number of iterations";
case LBFGS_REACHED_MAX_EVALS:
return "Reached maximum number of function/gradient evaluations";
case LBFGS_LINE_SEARCH_FAILED:
return "Line search failed";
default:
return "Unknown status";
}
}
lbfgs::status lbfgs::minimize(float *d_x)
{
return gpu_lbfgs(d_x);
}
lbfgs::status lbfgs::minimize_with_host_x(float *h_x)
{
const size_t NX = m_costFunction.getNumberOfUnknowns();
float *d_x;
cudaMalloc((void**)&d_x, NX * sizeof(float));
cudaMemcpy(d_x, h_x, NX * sizeof(float), cudaMemcpyHostToDevice);
status ret = minimize(d_x);
cudaMemcpy(h_x, d_x, NX * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_x);
return ret;
}
lbfgs::status lbfgs::gpu_lbfgs(float *d_x)
{
#ifdef LBFGS_TIMING
timer timer_total ("GPU_LBFGS_total" );
timer timer_evals ("GPU_LBFGS_evals" );
timer timer_updates ("GPU_LBFGS_updates" );
timer timer_linesearch("GPU_LBFGS_linesearch");
timer_total.start();
#endif
using namespace gpu_lbfgs;
const size_t NX = m_costFunction.getNumberOfUnknowns();
float *d_fkm1, *d_fk; // f_{k-1}, f_k, function values at x_{k-1} and x_k
float *d_gkm1, *d_gk; // g_{k-1}, g_k, gradients at x_{k-1} and x_k
float *d_z; // z, search direction
float *d_H0; // H_0, initial inverse Hessian (diagonal, same value for all elements)
float *d_step; // step current step length
float *d_tmp, *d_tmp2; // tmp, tmp2 temporary storage for intermediate results
int *d_status; // status return code for communication device -> host
// Ring buffers for history
float *d_s; // s, history of solution updates
float *d_y; // y, history of gradient updates
float *d_alpha; // alpha, history of alphas (needed for z updates)
float *d_rho; // rho, history of rhos (needed for z updates)
// Allocations
CudaSafeCall( cudaMalloc(&d_gk, NX * sizeof(float)) );
CudaSafeCall( cudaMalloc(&d_gkm1, NX * sizeof(float)) );
CudaSafeCall( cudaMalloc(&d_z, NX * sizeof(float)) );
CudaSafeCall( cudaMalloc(&d_s, HISTORY_SIZE * NX * sizeof(float)) );
CudaSafeCall( cudaMalloc(&d_y, HISTORY_SIZE * NX * sizeof(float)) );
// Addresses of global symbols
CudaSafeCall( cudaGetSymbolAddress((void**)&d_fkm1, gpu_lbfgs::fkm1 ) );
CudaSafeCall( cudaGetSymbolAddress((void**)&d_fk, gpu_lbfgs::fk ) );
CudaSafeCall( cudaGetSymbolAddress((void**)&d_tmp, gpu_lbfgs::tmp ) );
CudaSafeCall( cudaGetSymbolAddress((void**)&d_tmp2, gpu_lbfgs::tmp2 ) );
CudaSafeCall( cudaGetSymbolAddress((void**)&d_H0, gpu_lbfgs::H0 ) );
CudaSafeCall( cudaGetSymbolAddress((void**)&d_alpha, gpu_lbfgs::alpha ) );
CudaSafeCall( cudaGetSymbolAddress((void**)&d_rho, gpu_lbfgs::rho ) );
CudaSafeCall( cudaGetSymbolAddress((void**)&d_step, gpu_lbfgs::step ) );
CudaSafeCall( cudaGetSymbolAddress((void**)&d_status, gpu_lbfgs::status) );
// Initialize
#ifdef LBFGS_TIMING
timer_evals.start();
#endif
m_costFunction.f_gradf(d_x, d_fk, d_gk);
CudaCheckError();
cudaDeviceSynchronize();
#ifdef LBFGS_TIMING
timer_evals.stop();
#endif
size_t evals = 1;
status stat = LBFGS_REACHED_MAX_ITER;
#ifdef LBFGS_VERBOSE
std::cout << "lbfgs::gpu_lbfgs()" << std::endl;
#endif
// H0 = 1.0f;
const float one = 1.0f;
CudaSafeCall( cudaMemcpy(d_H0, &one, sizeof(float), cudaMemcpyHostToDevice) );
size_t it;
for (it = 0; it < m_maxIter; ++it)
{
#ifdef LBFGS_VERBOSE
float h_y;
CudaSafeCall( cudaMemcpy(&h_y, d_fk, sizeof(float), cudaMemcpyDeviceToHost) );
float gknorm2;
dispatch_dot(NX, &gknorm2, d_gk, d_gk, false);
printf("f(x) = % 12e, ||grad||_2 = % 12e\n", h_y, std::sqrt(gknorm2));
#endif
// Check for convergence
// ---------------------
float gkNormSquared;
float xkNormSquared;
dispatch_dot(NX, &xkNormSquared, d_x, d_x, false);
dispatch_dot(NX, &gkNormSquared, d_gk, d_gk, false);
if (gkNormSquared < (m_gradientEps * m_gradientEps) * max(xkNormSquared, 1.0f))
{
stat = LBFGS_BELOW_GRADIENT_EPS;
break;
}
// Find search direction
// ---------------------
#ifdef LBFGS_TIMING
timer_updates.start();
#endif
const float minusOne = -1.0f;
dispatch_scale(NX, d_z, d_gk, &minusOne, false); // z = -gk
const size_t MAX_IDX = MIN(it, HISTORY_SIZE);
for (size_t i = 1; i <= MAX_IDX; ++i)
{
size_t idx = index(it - i);
dispatch_dot(NX, d_tmp, d_s + idx * NX, d_z); // tmp = sDotZ
// alpha = tmp * rho
// tmp = -alpha
update1<<<1, 1>>>(d_alpha + idx, d_tmp, d_rho + idx, d_tmp);
CudaCheckError();
cudaDeviceSynchronize();
// z += tmp * y
dispatch_axpy(NX, d_z, d_z, d_y + idx * NX, d_tmp);
}
dispatch_scale(NX, d_z, d_z, d_H0); // z = H0 * z
for (size_t i = MAX_IDX; i > 0; --i)
{
size_t idx = index(it - i);
dispatch_dot(NX, d_tmp, d_y + idx * NX, d_z); // tmp = yDotZ
// beta = rho * tmp
// tmp = alpha - beta
update2<<<1, 1>>>(d_tmp, d_rho + idx, d_tmp, d_alpha + idx);
CudaCheckError();
cudaDeviceSynchronize();
// z += tmp * s
dispatch_axpy(NX, d_z, d_z, d_s + idx * NX, d_tmp);
}
#ifdef LBFGS_TIMING
timer_updates.stop();
timer_linesearch.start();
#endif
CudaSafeCall( cudaMemcpy(d_fkm1, d_fk, 1 * sizeof(float), cudaMemcpyDeviceToDevice) ); // fkm1 = fk;
CudaSafeCall( cudaMemcpy(d_gkm1, d_gk, NX * sizeof(float), cudaMemcpyDeviceToDevice) ); // gkm1 = gk;
timer *t_evals = NULL, *t_linesearch = NULL;
#ifdef LBFGS_TIMING
t_evals = &timer_evals;
t_linesearch = &timer_linesearch;
#endif
// (line search defined in linesearch_gpu.h)
if (!gpu_linesearch(d_x, d_z, d_fk, d_gk, evals, d_gkm1, d_fkm1, stat, d_step,
m_maxEvals, t_evals, t_linesearch, d_tmp, d_status))
{
break;
}
#ifdef LBFGS_TIMING
timer_linesearch.stop();
timer_updates.start();
#endif
// Update s, y, rho and H_0
// ------------------------
// s = x_k - x_{k-1} = step * z
// y = g_k - g_{k-1}
// rho = 1 / (y^T s)
// H_0 = (y^T s) / (y^T y)
float *d_curS = d_s + index(it) * NX;
float *d_curY = d_y + index(it) * NX;
dispatch_scale(NX, d_curS, d_z, d_step); // s = step * z
dispatch_axpy (NX, d_curY, d_gk, d_gkm1, &minusOne, false); // y = gk - gkm1
dispatch_dot(NX, d_tmp, d_curY, d_curS); // tmp = yDotS
dispatch_dot(NX, d_tmp2, d_curY, d_curY); // tmp2 = yDotY
// rho = 1 / tmp
// if (tmp2 > 1e-5)
// H0 = tmp / tmp2
update3<<<1, 1>>>(d_rho + index(it), d_H0, d_tmp, d_tmp2);
CudaCheckError();
cudaDeviceSynchronize();
#ifdef LBFGS_TIMING
timer_updates.stop();
#endif
}
// Deallocations
CudaSafeCall( cudaFree(d_gk) );
CudaSafeCall( cudaFree(d_gkm1) );
CudaSafeCall( cudaFree(d_z) );
CudaSafeCall( cudaFree(d_s) );
CudaSafeCall( cudaFree(d_y) );
#ifdef LBFGS_TIMING
timer_total.stop();
timer_total.saveMeasurement();
timer_evals.saveMeasurement();
timer_updates.saveMeasurement();
timer_linesearch.saveMeasurement();
#endif
#ifdef LBFGS_VERBOSE
std::cout << "Number of iterations: " << it << std::endl;
std::cout << "Number of function/gradient evaluations: " << evals << std::endl;
std::cout << "Reason for termination: " << statusToString(stat) << std::endl;
#endif
return stat;
}
// Vector operations
// -----------------
void lbfgs::dispatch_axpy(const size_t n, float *d_dst, const float *d_y, const float *d_x, const float *a, bool aDevicePointer) const
{
const cublasPointerMode_t mode = aDevicePointer ? CUBLAS_POINTER_MODE_DEVICE
: CUBLAS_POINTER_MODE_HOST;
CublasSafeCall( cublasSetPointerMode(m_cublasHandle, mode) );
if (d_dst != d_y)
CudaSafeCall( cudaMemcpy(d_dst, d_y, n * sizeof(float), cudaMemcpyDeviceToDevice) );
CublasSafeCall( cublasSaxpy(m_cublasHandle, int(n), a, d_x, 1, d_dst, 1) );
}
void lbfgs::dispatch_scale(const size_t n, float *d_dst, const float *d_x, const float *a, bool aDevicePointer) const
{
const cublasPointerMode_t mode = aDevicePointer ? CUBLAS_POINTER_MODE_DEVICE
: CUBLAS_POINTER_MODE_HOST;
CublasSafeCall( cublasSetPointerMode(m_cublasHandle, mode) );
if (d_dst != d_x)
CudaSafeCall( cudaMemcpy(d_dst, d_x, n * sizeof(float), cudaMemcpyDeviceToDevice) );
CublasSafeCall( cublasSscal(m_cublasHandle, int(n), a, d_dst, 1) );
}
void lbfgs::dispatch_dot(const size_t n, float *dst, const float *d_x, const float *d_y, bool dstDevicePointer) const
{
const cublasPointerMode_t mode = dstDevicePointer ? CUBLAS_POINTER_MODE_DEVICE
: CUBLAS_POINTER_MODE_HOST;
CublasSafeCall( cublasSetPointerMode(m_cublasHandle, mode) );
CublasSafeCall( cublasSdot(m_cublasHandle, int(n), d_x, 1, d_y, 1, dst) );
}
// -----------------
// Device / kernel functions
// -------------------------
namespace gpu_lbfgs
{
__global__ void update1(float *alpha_out, const float *sDotZ, const float *rho, float *minusAlpha_out)
{
*alpha_out = *sDotZ * *rho;
*minusAlpha_out = -*alpha_out;
}
__global__ void update2(float *alphaMinusBeta_out, const float *rho, const float *yDotZ, const float *alpha)
{
const float beta = *rho * *yDotZ;
*alphaMinusBeta_out = *alpha - beta;
}
__global__ void update3(float *rho_out, float *H0_out, const float *yDotS, const float *yDotY)
{
*rho_out = 1.0f / *yDotS;
if (*yDotY > 1e-5)
*H0_out = *yDotS / *yDotY;
}
}
// ------------------
|
the_stack
|
namespace arboretum {
namespace core {
template <typename SUM_T>
__global__ void hist_gain_kernel(
const SUM_T *const __restrict__ hist_prefix_sum,
const unsigned *const __restrict__ hist_prefix_count,
const SUM_T *const __restrict__ parent_sum,
const unsigned int *const __restrict__ parent_count, const unsigned hist_size,
const size_t n, const GainFunctionParameters parameters, my_atomics *res) {
for (unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; i < n;
i += gridDim.x * blockDim.x) {
unsigned segment = i / hist_size;
const SUM_T left_sum_offset = parent_sum[segment];
const SUM_T left_sum_value = hist_prefix_sum[i] - left_sum_offset;
const unsigned left_count_offset = parent_count[segment];
const unsigned left_count_value = hist_prefix_count[i] - left_count_offset;
const SUM_T total_sum = parent_sum[segment + 1] - left_sum_offset;
const unsigned total_count = parent_count[segment + 1] - left_count_offset;
const float gain = gain_func(left_sum_value, total_sum, left_count_value,
total_count, parameters);
if (gain > 0.0) {
updateAtomicMax(&(res[segment].ulong), gain, i);
}
}
}
template <typename NODE_T, typename SUM_T, typename BIN_T>
__global__ void hist_apply_candidates(
my_atomics *gain_feature, SUM_T *sum, unsigned *split, unsigned *count,
unsigned *node_size_prefix_sum_next, SUM_T *node_sum_prefix_sum_next,
const my_atomics *candidates, const SUM_T *split_sum,
const unsigned *split_count, const BIN_T *fvalue, NODE_T *row2Node,
const unsigned *node_size_prefix_sum, const SUM_T *node_sum_prefix_sum,
const int feature, const unsigned level, const unsigned hist_size,
const unsigned n) {
for (unsigned i = blockDim.x * blockIdx.x + threadIdx.x; i < n;
i += gridDim.x * blockDim.x) {
const unsigned node_start = node_size_prefix_sum[i];
const unsigned node_end = node_size_prefix_sum[i + 1];
const unsigned node_size = node_end - node_start;
const float gain_ = candidates[i].floats[0];
const unsigned idx = candidates[i].ints[1];
const unsigned split_count_value = split_count[idx];
const SUM_T node_start_sum = node_sum_prefix_sum[i];
const SUM_T node_end_sum = node_sum_prefix_sum[i + 1];
if (node_size > 0) {
const my_atomics current = gain_feature[i];
if (current.Gain() < gain_ ||
(current.Gain() == gain_ && feature < current.Feature())) {
my_atomics val;
val.floats[0] = gain_;
val.ints[1] = feature;
gain_feature[i] = val;
sum[i] = split_sum[idx] - node_start_sum;
count[i] = split_count_value - node_start;
BIN_T threshold = idx % hist_size;
split[i] = threshold;
unsigned block_size = MAX_THREADS > node_size ? node_size : MAX_THREADS;
unsigned grid_size =
unsigned((node_size + block_size - 1) / block_size);
cudaStream_t s;
DEVICE_OK(cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking));
apply_split<NODE_T, BIN_T><<<grid_size, block_size, 0, s>>>(
row2Node + node_start, fvalue + node_start, threshold + 1, level,
node_size);
DEVICE_OK(cudaDeviceSynchronize());
DEVICE_OK(cudaStreamDestroy(s));
node_size_prefix_sum_next[2 * i + 1] = split_count_value;
node_size_prefix_sum_next[2 * i + 2] = node_end;
node_sum_prefix_sum_next[2 * i + 1] = split_sum[idx];
node_sum_prefix_sum_next[2 * i + 2] = node_end_sum;
} else if (current.Gain() == 0 &&
current.Feature() == -1) { // no split, all to left
sum[i] = node_end_sum - node_start_sum;
split[i] = (unsigned)-1;
count[i] = node_size;
node_size_prefix_sum_next[2 * i + 1] =
node_size_prefix_sum_next[2 * i + 2] = node_end;
node_sum_prefix_sum_next[2 * i + 1] =
node_sum_prefix_sum_next[2 * i + 2] = node_end_sum;
}
// ignore not-optimal splits
} else {
node_size_prefix_sum_next[2 * i + 1] =
node_size_prefix_sum_next[2 * i + 2] = node_end;
node_sum_prefix_sum_next[2 * i + 1] =
node_sum_prefix_sum_next[2 * i + 2] = node_end_sum;
}
}
} // namespace core
template <typename NODE_T, typename BIN_T, typename GRAD_T, typename SUM_T>
HistTreeGrower<NODE_T, BIN_T, GRAD_T, SUM_T>::HistTreeGrower(
const size_t size, const unsigned depth, const unsigned hist_size,
const BestSplit<SUM_T> *best, Histogram<SUM_T> *features_histogram,
const InternalConfiguration *config)
: BaseGrower<NODE_T, BIN_T, GRAD_T, SUM_T>(size, depth, best,
features_histogram, config),
hist_size(hist_size) {
assert(hist_size > 0);
unsigned index = hist_size;
hist_size_bits = 1;
while (index >>= 1) ++hist_size_bits;
const size_t total_hist_size = hist_size * 2 * ((1 << depth) - 1);
this->sum.resize(total_hist_size);
hist_prefix_sum.resize(total_hist_size);
hist_bin_count.resize(total_hist_size);
hist_prefix_count.resize(total_hist_size);
// TODO: fix BIN_TYPE
cudaFuncSetCacheConfig(hist_sum_node<SUM_T, GRAD_T, BIN_T>,
cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(hist_sum_multi_node<SUM_T, GRAD_T, BIN_T, true>,
cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(hist_sum_multi_node<SUM_T, GRAD_T, BIN_T, false>,
cudaFuncCachePreferShared);
size_t temp_storage_bytes = 0;
PartitioningLeafs<NODE_T> conversion_op(0);
cub::TransformInputIterator<bool, PartitioningLeafs<NODE_T>, NODE_T *>
partition_itr((NODE_T *)nullptr, conversion_op);
cub::DiscardOutputIterator<unsigned> discard_itr;
OK(cub::DevicePartition::Flagged(NULL, temp_storage_bytes, (GRAD_T *)nullptr,
partition_itr, (GRAD_T *)nullptr,
discard_itr, size));
this->temp_bytes_allocated =
std::max(this->temp_bytes_allocated, temp_storage_bytes);
temp_storage_bytes = 0;
OK(cub::DevicePartition::Flagged(NULL, temp_storage_bytes, (NODE_T *)nullptr,
partition_itr, (NODE_T *)nullptr,
discard_itr, size));
this->temp_bytes_allocated =
std::max(this->temp_bytes_allocated, temp_storage_bytes);
temp_storage_bytes = 0;
OK(cub::DevicePartition::Flagged(NULL, temp_storage_bytes, (GRAD_T *)nullptr,
partition_itr, (GRAD_T *)nullptr,
discard_itr, size / (1 << this->depth)));
this->temp_bytes_allocated = std::max(
this->temp_bytes_allocated, temp_storage_bytes * (1 << this->depth));
temp_storage_bytes = 0;
OK(cub::DevicePartition::Flagged(NULL, temp_storage_bytes, (NODE_T *)nullptr,
partition_itr, (NODE_T *)nullptr,
discard_itr, size / (1 << this->depth)));
this->temp_bytes_allocated = std::max(
this->temp_bytes_allocated, temp_storage_bytes * (1 << this->depth));
temp_storage_bytes = 0;
cub::Sum sum_op;
OK(cub::DeviceScan::InclusiveScan(NULL, temp_storage_bytes, (SUM_T *)nullptr,
(SUM_T *)nullptr, sum_op,
(1 << this->depth) * this->hist_size));
this->temp_bytes_allocated =
std::max(this->temp_bytes_allocated, temp_storage_bytes);
temp_storage_bytes = 0;
OK(cub::DeviceScan::InclusiveSum(NULL, temp_storage_bytes,
(unsigned *)nullptr, (unsigned *)nullptr,
(1 << this->depth) * this->hist_size));
this->temp_bytes_allocated =
std::max(this->temp_bytes_allocated, temp_storage_bytes);
OK(cudaMalloc(&this->temp_bytes, this->temp_bytes_allocated));
}
template <typename NODE_T, typename BIN_T, typename GRAD_T, typename SUM_T>
void HistTreeGrower<NODE_T, BIN_T, GRAD_T, SUM_T>::HistSum(
SUM_T *sum, unsigned *bin_count, const SUM_T *hist_sum_parent,
const unsigned *hist_count_parent, const GRAD_T *grad,
const unsigned *node_size, const BIN_T *fvalue, const unsigned hist_size_bits,
const unsigned hist_size, const unsigned size, const bool use_trick,
cudaStream_t stream) {
unsigned min_grid_size = 256;
int blocks_per_node = 4;
while ((size << blocks_per_node) < min_grid_size) blocks_per_node++;
constexpr unsigned blockSize = HIST_SUM_BLOCK_DIM;
const unsigned gridSize = size * (1 << blocks_per_node);
if (use_trick) {
hist_sum_multi_node<SUM_T, GRAD_T, BIN_T, true>
<<<gridSize / 2, blockSize, 0, stream>>>(
sum, bin_count, hist_sum_parent, hist_count_parent, grad, node_size,
fvalue, hist_size, hist_size_bits, blocks_per_node);
const unsigned block_size = 1024;
const unsigned grid_size =
(hist_size * size / 2 + block_size - 1) / block_size;
update_multi_node<SUM_T><<<grid_size, block_size, 0, stream>>>(
sum, bin_count, hist_sum_parent, hist_count_parent, sum, bin_count,
node_size, hist_size, hist_size * size / 2);
} else {
hist_sum_multi_node<SUM_T, GRAD_T, BIN_T, false>
<<<gridSize, blockSize, 0, stream>>>(
sum, bin_count, hist_sum_parent, hist_count_parent, grad, node_size,
fvalue, hist_size, hist_size_bits, blocks_per_node);
}
}
template <typename NODE_T, typename BIN_T, typename GRAD_T, typename SUM_T>
void HistTreeGrower<NODE_T, BIN_T, GRAD_T, SUM_T>::HistSumStatic(
SUM_T *sum, unsigned *bin_count, const SUM_T *hist_sum_parent,
const unsigned *hist_count_parent, const GRAD_T *grad,
const unsigned *node_size, const BIN_T *fvalue, const unsigned hist_size_bits,
const unsigned hist_size, const unsigned size, const bool use_trick,
cudaStream_t stream) {
if (use_trick) {
assert(size % 2 == 0);
for (unsigned i = 0; i < size / 2; ++i) {
unsigned left_segment_id = i * 2;
unsigned right_segment_id = i * 2 + 1;
unsigned smaller_segment_id = right_segment_id;
unsigned larger_segment_id = left_segment_id;
if (node_size[left_segment_id + 1] - node_size[left_segment_id] <=
node_size[right_segment_id + 1] - node_size[right_segment_id]) {
smaller_segment_id = left_segment_id;
larger_segment_id = right_segment_id;
}
unsigned segment_start = node_size[smaller_segment_id];
unsigned segment_size =
node_size[smaller_segment_id + 1] - node_size[smaller_segment_id];
if (segment_size != 0)
HistTreeGrower<NODE_T, BIN_T, GRAD_T, SUM_T>::HistSumSingleNode(
sum + smaller_segment_id * hist_size,
bin_count + smaller_segment_id * hist_size, grad + segment_start,
node_size + smaller_segment_id, fvalue + segment_start,
hist_size_bits, segment_size, stream);
const unsigned block_size = std::min(unsigned(1024), hist_size);
const unsigned grid_size = (hist_size + block_size - 1) / block_size;
update<SUM_T><<<grid_size, block_size, 0, stream>>>(
sum + larger_segment_id * hist_size,
bin_count + larger_segment_id * hist_size,
hist_sum_parent + i * hist_size, hist_count_parent + i * hist_size,
sum + smaller_segment_id * hist_size,
bin_count + smaller_segment_id * hist_size, hist_size);
}
} else {
for (unsigned i = 0; i < size; ++i) {
unsigned segment_start = node_size[i];
unsigned segment_size = node_size[i + 1] - node_size[i];
if (segment_size != 0)
HistTreeGrower<NODE_T, BIN_T, GRAD_T, SUM_T>::HistSumSingleNode(
sum + i * hist_size, bin_count + i * hist_size, grad + segment_start,
node_size + i, fvalue + segment_start, hist_size_bits, segment_size,
stream);
}
}
}
template <typename NODE_T, typename BIN_T, typename GRAD_T, typename SUM_T>
void HistTreeGrower<NODE_T, BIN_T, GRAD_T, SUM_T>::HistSumSingleNode(
SUM_T *sum, unsigned *bin_count, const GRAD_T *grad,
const unsigned *node_size, const BIN_T *fvalue, const unsigned hist_size_bits,
const unsigned size, cudaStream_t stream) {
constexpr unsigned blockSize = HIST_SUM_BLOCK_DIM;
constexpr unsigned items_per_thread = ITEMS_PER_THREAD_FOR_TYPE<GRAD_T, 96>();
const unsigned gridSize = (size + (blockSize * items_per_thread) - 1) /
(blockSize * items_per_thread);
hist_sum_node<SUM_T, GRAD_T, BIN_T><<<gridSize, blockSize, 0, stream>>>(
sum, bin_count, grad, fvalue, hist_size_bits, 0, size);
}
template <typename NODE_T, typename BIN_T, typename GRAD_T, typename SUM_T>
void HistTreeGrower<NODE_T, BIN_T, GRAD_T, SUM_T>::HistSumNaive(
SUM_T *sum, unsigned *bin_count, const GRAD_T *grad,
const unsigned *node_size, const BIN_T *fvalue,
const unsigned hist_size, const unsigned size, cudaStream_t stream) {
constexpr unsigned blockSize = 1024;
const unsigned gridSize = (size + blockSize - 1) / blockSize;
hist_sum<SUM_T, GRAD_T><<<gridSize, blockSize, 0, stream>>>(
sum, bin_count, grad, node_size, fvalue, hist_size, size);
}
template <typename NODE_T, typename BIN_T, typename GRAD_T, typename SUM_T>
void HistTreeGrower<NODE_T, BIN_T, GRAD_T, SUM_T>::ProcessDenseFeature(
const device_vector<unsigned> &partitioning_index,
const device_vector<NODE_T> &row2Node, const device_vector<GRAD_T> &grad_d,
device_vector<BIN_T> &fvalue_d, BIN_T *fvalue_h,
const device_vector<SUM_T> &parent_node_sum,
const device_vector<unsigned int> &parent_node_count,
const unsigned char fvalue_size, const unsigned level, const unsigned depth,
const GainFunctionParameters gain_param, const bool partition_only,
const int fid) {
const unsigned length = 1 << level;
OK(cudaMemsetAsync(thrust::raw_pointer_cast(this->result_d.data()), 0,
length * sizeof(my_atomics), this->stream));
OK(cudaMemsetAsync(thrust::raw_pointer_cast(this->sum.data()), 0,
this->hist_size * length * sizeof(SUM_T), this->stream));
OK(cudaMemsetAsync(thrust::raw_pointer_cast(this->hist_bin_count.data()), 0,
this->hist_size * length * sizeof(unsigned),
this->stream));
BIN_T *fvalue_tmp = NULL;
if (!fvalue_d.empty()) {
fvalue_tmp = thrust::raw_pointer_cast(fvalue_d.data());
} else {
OK(cudaMemcpyAsync(thrust::raw_pointer_cast(this->fvalue.data()), fvalue_h,
this->size * sizeof(BIN_T), cudaMemcpyHostToDevice,
this->stream));
fvalue_tmp = thrust::raw_pointer_cast(this->fvalue.data());
}
if (level != 0) {
this->PartitionByIndex(thrust::raw_pointer_cast(this->fvalue_dst.data()),
fvalue_tmp, partitioning_index);
OK(cudaEventRecord(this->event, this->stream));
OK(cudaStreamWaitEvent(this->copy_d2h_stream, this->event, 0));
if (fvalue_d.empty()) {
OK(cudaMemcpyAsync(fvalue_h,
thrust::raw_pointer_cast(this->fvalue_dst.data()),
this->size * sizeof(BIN_T), cudaMemcpyDeviceToHost,
this->copy_d2h_stream));
this->d_fvalue_partitioned =
thrust::raw_pointer_cast(this->fvalue_dst.data());
} else {
this->fvalue_dst.swap(fvalue_d);
this->d_fvalue_partitioned = thrust::raw_pointer_cast(fvalue_d.data());
}
} else {
this->d_fvalue_partitioned = fvalue_tmp;
}
if (partition_only) return;
if (level != 0) {
HistTreeGrower<NODE_T, BIN_T, GRAD_T, SUM_T>::HistSum(
thrust::raw_pointer_cast(this->sum.data()),
thrust::raw_pointer_cast(this->hist_bin_count.data()),
thrust::raw_pointer_cast(this->features_histogram->grad_hist[fid].data()),
thrust::raw_pointer_cast(
this->features_histogram->count_hist[fid].data()),
thrust::raw_pointer_cast(grad_d.data()),
thrust::raw_pointer_cast(parent_node_count.data()),
this->d_fvalue_partitioned, unsigned(fvalue_size), hist_size, length,
this->features_histogram->CanUseTrick(fid, level), this->stream);
} else {
HistTreeGrower<NODE_T, BIN_T, GRAD_T, SUM_T>::HistSumSingleNode(
thrust::raw_pointer_cast(this->sum.data()),
thrust::raw_pointer_cast(this->hist_bin_count.data()),
thrust::raw_pointer_cast(grad_d.data()),
thrust::raw_pointer_cast(parent_node_count.data()),
this->d_fvalue_partitioned, unsigned(fvalue_size), this->size,
this->stream);
}
cub::Sum sum_op;
OK(cub::DeviceScan::InclusiveScan(
this->temp_bytes, this->temp_bytes_allocated,
thrust::raw_pointer_cast(this->sum.data()),
thrust::raw_pointer_cast(this->hist_prefix_sum.data()), sum_op,
length * this->hist_size, this->stream));
OK(cub::DeviceScan::InclusiveSum(
this->temp_bytes, this->temp_bytes_allocated,
thrust::raw_pointer_cast(this->hist_bin_count.data()),
thrust::raw_pointer_cast(this->hist_prefix_count.data()),
length * this->hist_size, this->stream));
int grid_size = 0;
int block_size = 0;
compute1DInvokeConfig(length * this->hist_size, &grid_size, &block_size,
hist_gain_kernel<SUM_T>, 0, 1024);
hist_gain_kernel<SUM_T><<<grid_size, block_size, 0, this->stream>>>(
thrust::raw_pointer_cast(this->hist_prefix_sum.data()),
thrust::raw_pointer_cast(this->hist_prefix_count.data()),
thrust::raw_pointer_cast(parent_node_sum.data()),
thrust::raw_pointer_cast(parent_node_count.data()), this->hist_size,
length * this->hist_size, gain_param,
thrust::raw_pointer_cast(this->result_d.data()));
}
template <typename NODE_T, typename BIN_T, typename GRAD_T, typename SUM_T>
void HistTreeGrower<NODE_T, BIN_T, GRAD_T, SUM_T>::FindBest(
BestSplit<SUM_T> &best, device_vector<NODE_T> &row2Node,
const device_vector<SUM_T> &parent_node_sum,
const device_vector<unsigned int> &parent_node_count, unsigned fid,
const unsigned level, const unsigned depth, const unsigned size) {
int gridSize = 0;
int blockSize = 0;
compute1DInvokeConfig(size, &gridSize, &blockSize,
hist_apply_candidates<NODE_T, SUM_T, BIN_T>);
hist_apply_candidates<NODE_T, SUM_T, BIN_T>
<<<gridSize, blockSize, 0, this->stream>>>(
thrust::raw_pointer_cast(best.gain_feature.data()),
thrust::raw_pointer_cast(best.sum.data()),
thrust::raw_pointer_cast(best.split_value.data()),
thrust::raw_pointer_cast(best.count.data()),
thrust::raw_pointer_cast(best.parent_node_count_next.data()),
thrust::raw_pointer_cast(best.parent_node_sum_next.data()),
thrust::raw_pointer_cast(this->result_d.data()),
thrust::raw_pointer_cast(this->hist_prefix_sum.data()),
thrust::raw_pointer_cast(this->hist_prefix_count.data()),
this->d_fvalue_partitioned, thrust::raw_pointer_cast(row2Node.data()),
thrust::raw_pointer_cast(parent_node_count.data()),
thrust::raw_pointer_cast(parent_node_sum.data()), fid, depth - level - 2,
this->hist_size, size);
if (this->config->use_hist_subtraction_trick) {
this->features_histogram->Update(this->sum, this->hist_bin_count, fid,
level, this->stream);
}
}
// clang-format off
/*[[[cog
import cog
for t in [('float', 'float'), ('float', 'double'), ('float2', 'float2'), ('float2', 'mydouble2')]:
for node_type in ['unsigned int', 'unsigned short', 'unsigned char']:
for bin_type in [ 'unsigned short', 'unsigned char']:
cog.outl("template class HistTreeGrower<{0}, {3}, {1}, {2}>;".format(
node_type, t[0], t[1], bin_type))
]]]*/
template class HistTreeGrower<unsigned int, unsigned short, float, float>;
template class HistTreeGrower<unsigned int, unsigned char, float, float>;
template class HistTreeGrower<unsigned short, unsigned short, float, float>;
template class HistTreeGrower<unsigned short, unsigned char, float, float>;
template class HistTreeGrower<unsigned char, unsigned short, float, float>;
template class HistTreeGrower<unsigned char, unsigned char, float, float>;
template class HistTreeGrower<unsigned int, unsigned short, float, double>;
template class HistTreeGrower<unsigned int, unsigned char, float, double>;
template class HistTreeGrower<unsigned short, unsigned short, float, double>;
template class HistTreeGrower<unsigned short, unsigned char, float, double>;
template class HistTreeGrower<unsigned char, unsigned short, float, double>;
template class HistTreeGrower<unsigned char, unsigned char, float, double>;
template class HistTreeGrower<unsigned int, unsigned short, float2, float2>;
template class HistTreeGrower<unsigned int, unsigned char, float2, float2>;
template class HistTreeGrower<unsigned short, unsigned short, float2, float2>;
template class HistTreeGrower<unsigned short, unsigned char, float2, float2>;
template class HistTreeGrower<unsigned char, unsigned short, float2, float2>;
template class HistTreeGrower<unsigned char, unsigned char, float2, float2>;
template class HistTreeGrower<unsigned int, unsigned short, float2, mydouble2>;
template class HistTreeGrower<unsigned int, unsigned char, float2, mydouble2>;
template class HistTreeGrower<unsigned short, unsigned short, float2, mydouble2>;
template class HistTreeGrower<unsigned short, unsigned char, float2, mydouble2>;
template class HistTreeGrower<unsigned char, unsigned short, float2, mydouble2>;
template class HistTreeGrower<unsigned char, unsigned char, float2, mydouble2>;
//[[[end]]] (checksum: a79a13b900ad9b058327388aff588e28)
// clang-format on
} // namespace core
} // namespace arboretum
|
the_stack
|
using ::testing::ElementsAreArray;
class UpdatesTest : public ::testing::TestWithParam<::testing::tuple<FloatT, FloatT>> {
protected:
template <typename T>
T* create_storage(const FloatT value,
const size_t first_size,
const size_t second_size,
Streams* const streams) const {
T* const storage = new T(first_size, second_size, streams);
dynamic_cast<Storage<FloatT>*>(storage)->initialize_with_constant(value);
return storage;
}
FloatT scaled_regularization_lambda() const {
return std::get<0>(GetParam());
}
FloatT learning_rate() const {
return std::get<1>(GetParam());
}
};
INSTANTIATE_TEST_CASE_P(Regularization,
UpdatesTest,
::testing::Combine(
::testing::Values<FloatT>(0.0, 0.1),
::testing::Values<FloatT>(1.0, 0.5)));
TEST_P(UpdatesTest, SGDTransformGradientUpdater) {
std::unique_ptr<TransformStorage<FloatT>> storage(
create_storage<TransformStorage<FloatT>>(
5.0, /* initial value */
8, 3, DefaultStream::get()));
SGDTransformGradientUpdater<FloatT> updater;
device_matrix<FloatT> grad_matrix(3, 8, NULL /* stream */);
to_device({1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}, &grad_matrix);
device_matrix<FloatT> grad_bias(3, 1, NULL /* stream */);
to_device({25.0, 26.0, 27.0}, &grad_bias);
TransformStorage<FloatT>::GradientType gradient_desc = std::forward_as_tuple(
grad_matrix, grad_bias);
updater.update(storage.get(), &gradient_desc,
learning_rate(),
scaled_regularization_lambda(),
DefaultStream::get());
EXPECT_THAT(
to_host(*std::get<0>(storage->get())),
ElementsAreArray({
5.0 + learning_rate() * (1.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (2.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (3.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (4.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (5.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (6.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (7.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (8.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (9.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (10.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (11.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (12.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (13.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (14.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (15.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (16.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (17.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (18.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (19.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (20.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (21.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (22.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (23.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (24.0 - scaled_regularization_lambda() * 5.0)
}));
EXPECT_THAT(
to_host(*std::get<1>(storage->get())),
ElementsAreArray({
5.0 + learning_rate() * 25.0,
5.0 + learning_rate() * 26.0,
5.0 + learning_rate() * 27.0
}));
}
TEST_P(UpdatesTest, SGDRepresentationsGradientUpdater) {
const size_t repr_size = 4;
std::unique_ptr<RepresentationsStorage<FloatT, IdxType>> storage(
create_storage<RepresentationsStorage<FloatT, IdxType>>(
5.0, /* initial value */
10, repr_size, DefaultStream::get()));
SGDRepresentationsGradientUpdater<FloatT, IdxType> updater;
device_matrix<FloatT> first_grad_repr(repr_size, 1, NULL /* stream */);
to_device({2.0, 2.5, 3.0, 4.0}, // identity: (2.0 + 2.5 + 3.0 + 4.0) / 4 = 2.875
// squared: (4.0 + 6.25 + 9.0 + 16.0) / 4 = 35.25 / 4 = 8.8125
&first_grad_repr);
device_matrix<FloatT> second_grad_repr(repr_size, 1, NULL /* stream */);
to_device({10.0, 11.0, 12.0, 13.0}, // identity: (10.0 + 11.0 + 12.0 + 13.0) / 4 = 11.5
// squared: (100.0 + 121.0 + 144.0 + 169.0) / 4 = 534.0 / 4 = 133.5
&second_grad_repr);
device_matrix<IdxType> first_repr_idx(1, 3, NULL /* stream */);
to_device({9, 0, 1},
&first_repr_idx);
device_matrix<IdxType> second_repr_idx(1, 3, NULL /* stream */);
to_device({5, 1, 8},
&second_repr_idx);
const size_t window_size = 3;
RepresentationsStorage<FloatT, IdxType>::GradientType gradient_desc = {
std::forward_as_tuple(first_grad_repr, first_repr_idx, window_size, nullptr),
std::forward_as_tuple(second_grad_repr, second_repr_idx, window_size, nullptr),
};
updater.update(storage.get(), &gradient_desc,
learning_rate(),
scaled_regularization_lambda(),
DefaultStream::get());
EXPECT_THAT(
to_host(*storage->get()),
ElementsAreArray({5.0 + learning_rate() * (2.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (2.5 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (3.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (4.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (2.0 + 10.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (2.5 + 11.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (3.0 + 12.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (4.0 + 13.0 - scaled_regularization_lambda() * 5.0),
(1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0, (1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0, (1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0, (1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0,
(1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0, (1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0, (1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0, (1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0,
(1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0, (1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0, (1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0, (1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0,
5.0 + learning_rate() * (10.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (11.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (12.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (13.0 - scaled_regularization_lambda() * 5.0),
(1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0, (1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0, (1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0, (1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0,
(1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0, (1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0, (1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0, (1.0 - learning_rate() * scaled_regularization_lambda()) * 5.0,
5.0 + learning_rate() * (10.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (11.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (12.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (13.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (2.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (2.5 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (3.0 - scaled_regularization_lambda() * 5.0),
5.0 + learning_rate() * (4.0 - scaled_regularization_lambda() * 5.0)}));
}
TEST_P(UpdatesTest, AdagradTransformGradientUpdater) {
const FloatT epsilon = 1e-6;
std::unique_ptr<TransformStorage<FloatT>> storage(
create_storage<TransformStorage<FloatT>>(
5.0, /* initial value */
8, 3, DefaultStream::get()));
AdagradTransformGradientUpdater<FloatT> updater(
8, /* source_vector_dim */
3, /* dest_vector_dim */
DefaultStream::get(),
epsilon);
device_matrix<FloatT> grad_matrix(3, 8, NULL /* stream */);
to_device({1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}, &grad_matrix);
device_matrix<FloatT> grad_bias(3, 1, NULL /* stream */);
to_device({25.0, 26.0, 27.0}, &grad_bias);
TransformStorage<FloatT>::GradientType gradient_desc = std::forward_as_tuple(
grad_matrix, grad_bias);
updater.update(storage.get(), &gradient_desc,
learning_rate(),
scaled_regularization_lambda(),
DefaultStream::get());
EXPECT_THAT(
to_host(*updater.storages_[0]->get_data()["transform"]),
ElementsAreArray({1.0, 4.0, 9.0, 16.0, 25.0, 36.0, 49.0, 64.0,
81.0, 100.0, 121.0, 144.0, 169.0, 196.0, 225.0, 256.0,
289.0, 324.0, 361.0, 400.0, 441.0, 484.0, 529.0, 576.0}));
EXPECT_THAT(
to_host(*updater.storages_[0]->get_data()["bias"]),
ElementsAreArray({625.0,
676.0,
729.0}));
EXPECT_THAT(
to_host(grad_matrix),
ElementsAreArray({FPHelper<FloatT>::eq(1.0 / sqrt(1.0 + epsilon)),
FPHelper<FloatT>::eq(2.0 / sqrt(4.0 + epsilon)),
FPHelper<FloatT>::eq(3.0 / sqrt(9.0 + epsilon)),
FPHelper<FloatT>::eq(4.0 / sqrt(16.0 + epsilon)),
FPHelper<FloatT>::eq(5.0 / sqrt(25.0 + epsilon)),
FPHelper<FloatT>::eq(6.0 / sqrt(36.0 + epsilon)),
FPHelper<FloatT>::eq(7.0 / sqrt(49.0 + epsilon)),
FPHelper<FloatT>::eq(8.0 / sqrt(64.0 + epsilon)),
FPHelper<FloatT>::eq(9.0 / sqrt(81.0 + epsilon)),
FPHelper<FloatT>::eq(10.0 / sqrt(100.0 + epsilon)),
FPHelper<FloatT>::eq(11.0 / sqrt(121.0 + epsilon)),
FPHelper<FloatT>::eq(12.0 / sqrt(144.0 + epsilon)),
FPHelper<FloatT>::eq(13.0 / sqrt(169.0 + epsilon)),
FPHelper<FloatT>::eq(14.0 / sqrt(196.0 + epsilon)),
FPHelper<FloatT>::eq(15.0 / sqrt(225.0 + epsilon)),
FPHelper<FloatT>::eq(16.0 / sqrt(256.0 + epsilon)),
FPHelper<FloatT>::eq(17.0 / sqrt(289.0 + epsilon)),
FPHelper<FloatT>::eq(18.0 / sqrt(324.0 + epsilon)),
FPHelper<FloatT>::eq(19.0 / sqrt(361.0 + epsilon)),
FPHelper<FloatT>::eq(20.0 / sqrt(400.0 + epsilon)),
FPHelper<FloatT>::eq(21.0 / sqrt(441.0 + epsilon)),
FPHelper<FloatT>::eq(22.0 / sqrt(484.0 + epsilon)),
FPHelper<FloatT>::eq(23.0 / sqrt(529.0 + epsilon)),
FPHelper<FloatT>::eq(24.0 / sqrt(576.0 + epsilon))}));
EXPECT_THAT(
to_host(grad_bias),
ElementsAreArray({FPHelper<FloatT>::eq(25.0 / sqrt(625.0 + epsilon)),
FPHelper<FloatT>::eq(26.0 / sqrt(676.0 + epsilon)),
FPHelper<FloatT>::eq(27.0 / sqrt(729.0 + epsilon))}));
}
TEST_P(UpdatesTest, AdagradRepresentationsGradientUpdater) {
const FloatT epsilon = 1e-6;
std::unique_ptr<RepresentationsStorage<FloatT, IdxType>> storage(
create_storage<RepresentationsStorage<FloatT, IdxType>>(
5.0, /* initial value */
10, 4, DefaultStream::get()));
AdagradRepresentationsGradientUpdater<FloatT, IdxType> updater(
10, /* num_objects */
DefaultStream::get(),
epsilon);
device_matrix<FloatT> grad_repr(4, 2, NULL /* stream */);
to_device({2.0, 2.5, 3.0, 4.0, // (4.0 + 6.25 + 9.0 + 16.0) / 4 = 35.25 / 4 = 8.8125
10.0, 11.0, 12.0, 13.0}, // (100.0 + 121.0 + 144.0 + 169.0)/ 4 = 534.0 / 4 = 133.5
&grad_repr);
device_matrix<IdxType> repr_idx(1, 6, NULL /* stream */);
to_device({9, 0, 1, 5, 1, 8},
&repr_idx);
const size_t window_size = 3;
RepresentationsStorage<FloatT, IdxType>::GradientType gradient_desc = {
std::forward_as_tuple(grad_repr, repr_idx, window_size, nullptr)
};
updater.update(storage.get(), &gradient_desc,
learning_rate(),
scaled_regularization_lambda(),
DefaultStream::get());
EXPECT_THAT(
to_host(*updater.storages_[0]->get_data()["representations"]),
ElementsAreArray({8.8125, 142.3125, 0.0, 0.0, 0.0, 133.5, 0.0, 0.0, 133.5, 8.8125}));
EXPECT_THAT(
to_host(grad_repr),
ElementsAreArray({FPHelper<FloatT>::eq(2.0 / sqrt(((8.8125 + 8.8125 + 142.3125) / 3.0) + epsilon)),
FPHelper<FloatT>::eq(2.5 / sqrt(((8.8125 + 8.8125 + 142.3125) / 3.0) + epsilon)),
FPHelper<FloatT>::eq(3.0 / sqrt(((8.8125 + 8.8125 + 142.3125) / 3.0) + epsilon)),
FPHelper<FloatT>::eq(4.0 / sqrt(((8.8125 + 8.8125 + 142.3125) / 3.0) + epsilon)),
FPHelper<FloatT>::eq(10.0 / sqrt(((133.5 + 142.3125 + 133.5) / 3.0) + epsilon)),
FPHelper<FloatT>::eq(11.0 / sqrt(((133.5 + 142.3125 + 133.5) / 3.0) + epsilon)),
FPHelper<FloatT>::eq(12.0 / sqrt(((133.5 + 142.3125 + 133.5) / 3.0) + epsilon)),
FPHelper<FloatT>::eq(13.0 / sqrt(((133.5 + 142.3125 + 133.5) / 3.0) + epsilon))}));
}
TEST_P(UpdatesTest, AdamTransformGradientUpdater) {
const FloatT epsilon = 1e-5;
const FloatT beta1 = 0.9;
const FloatT beta2 = 0.999;
std::unique_ptr<TransformStorage<FloatT>> storage(
create_storage<TransformStorage<FloatT>>(
5.0, /* initial value */
8, 3, DefaultStream::get()));
AdamTransformGradientUpdater<FloatT> updater(
8, /* source_vector_dim */
3, /* dest_vector_dim */
DefaultStream::get(),
beta1,
beta2,
epsilon);
{
device_matrix<FloatT> grad_matrix(3, 8, NULL /* stream */);
to_device({1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}, &grad_matrix);
device_matrix<FloatT> grad_bias(3, 1, NULL /* stream */);
to_device({25.0, 26.0, 27.0}, &grad_bias);
TransformStorage<FloatT>::GradientType gradient_desc = std::forward_as_tuple(
grad_matrix, grad_bias);
updater.update(storage.get(), &gradient_desc,
learning_rate(),
scaled_regularization_lambda(),
DefaultStream::get());
const FloatT bias_correction_t1 = sqrt(1.0 - pow(beta2, 1)) / (1.0 - pow(beta1, 1));
for (size_t raw_g_idx = 0; raw_g_idx < 24; ++raw_g_idx) {
const FloatT raw_g = static_cast<FloatT>(raw_g_idx + 1);
const FloatT g = raw_g - scaled_regularization_lambda() * 5.0;
EXPECT_THAT(
to_host(grad_matrix)[raw_g_idx],
FPHelper<FloatT>::eq(
bias_correction_t1 *
((1.0 - beta1) * g) /
(sqrt((1.0 - beta2) * pow(g, 2)) + epsilon))
);
}
EXPECT_THAT(
to_host(grad_bias),
ElementsAreArray({FPHelper<FloatT>::eq(0.9999873510493572093),
FPHelper<FloatT>::eq(0.99998783754154196846),
FPHelper<FloatT>::eq(0.99998828799769046149)}));
EXPECT_THAT(
to_host(*updater.storages_[0]->get_data()["bias"]),
ElementsAreArray({FPHelper<FloatT>::eq(2.5),
FPHelper<FloatT>::eq(2.6),
FPHelper<FloatT>::eq(2.7)}));
EXPECT_THAT(
to_host(*updater.storages_[1]->get_data()["bias"]),
ElementsAreArray({FPHelper<FloatT>::eq(0.62500000000000055511),
FPHelper<FloatT>::eq(0.67600000000000060041),
FPHelper<FloatT>::eq(0.72900000000000064748)}));
}
{
const std::vector<FloatT> param_transform_before_update =
to_host(*std::get<0>(storage->get()));
device_matrix<FloatT> grad_matrix(3, 8, NULL /* stream */);
to_device({1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}, &grad_matrix);
device_matrix<FloatT> grad_bias(3, 1, NULL /* stream */);
to_device({25.0, 26.0, 27.0}, &grad_bias);
TransformStorage<FloatT>::GradientType gradient_desc = std::forward_as_tuple(
grad_matrix, grad_bias);
updater.update(storage.get(), &gradient_desc,
learning_rate(),
scaled_regularization_lambda(),
DefaultStream::get());
const FloatT bias_correction_t2 = sqrt(1.0 - pow(beta2, 2)) / (1.0 - pow(beta1, 2));
for (size_t raw_g_idx = 0; raw_g_idx < 24; ++raw_g_idx) {
const FloatT raw_g = static_cast<FloatT>(raw_g_idx + 1);
const FloatT g = raw_g - scaled_regularization_lambda() * param_transform_before_update[raw_g_idx];
const FloatT m_t1 = (1.0 - beta1) * (raw_g - scaled_regularization_lambda() * 5.0);
const FloatT v_t1 = (1.0 - beta2) * pow(raw_g - scaled_regularization_lambda() * 5.0, 2);
EXPECT_THAT(
to_host(grad_matrix)[raw_g_idx],
FPHelper<FloatT>::eq(
bias_correction_t2 *
(beta1 * m_t1 + (1.0 - beta1) * g) /
(sqrt(beta2 * v_t1 + (1.0 - beta2) * pow(g, 2)) + epsilon))
);
}
EXPECT_THAT(
to_host(grad_bias),
ElementsAreArray({FPHelper<FloatT>::eq(1.0523589755648365962),
FPHelper<FloatT>::eq(1.0523593375842164033),
FPHelper<FloatT>::eq(1.0523596727875677015)}));
EXPECT_THAT(
to_host(*updater.storages_[0]->get_data()["bias"]),
ElementsAreArray({FPHelper<FloatT>::eq(4.9999999999999991118),
FPHelper<FloatT>::eq(5.1999999999999992895),
FPHelper<FloatT>::eq(5.3999999999999985789)}));
EXPECT_THAT(
to_host(*updater.storages_[1]->get_data()["bias"]),
ElementsAreArray({FPHelper<FloatT>::eq(1.2500000000000011102),
FPHelper<FloatT>::eq(1.3520000000000012008),
FPHelper<FloatT>::eq(1.458000000000001295)}));
}
}
TEST_P(UpdatesTest, AdamRepresentationsGradientUpdater_SPARSE) {
const size_t repr_size = 4;
const FloatT epsilon = 1e-5;
const FloatT beta1 = 0.9;
const FloatT beta2 = 0.999;
std::unique_ptr<RepresentationsStorage<FloatT, IdxType>> storage(
create_storage<RepresentationsStorage<FloatT, IdxType>>(
5.0, /* initial value */
5, 4, DefaultStream::get()));
AdamRepresentationsGradientUpdater<FloatT, IdxType> updater(
5, /* num_objects */
repr_size,
ParseProto<AdamConf>("mode: SPARSE"), /* conf */
DefaultStream::get(),
beta1,
beta2,
epsilon);
device_matrix<FloatT> grad_repr(repr_size, 2, NULL /* stream */);
to_device({2.0, 2.5, 3.0, 4.0, // identity: (2.0 + 2.5 + 3.0 + 4.0) / 4 = 2.875
// squared: (4.0 + 6.25 + 9.0 + 16.0) / 4 = 35.25 / 4 = 8.8125
10.0, 11.0, 12.0, 13.0}, // identity: (10.0 + 11.0 + 12.0 + 13.0) / 4 = 11.5
// squared: (100.0 + 121.0 + 144.0 + 169.0)/ 4 = 534.0 / 4 = 133.5
&grad_repr);
device_matrix<IdxType> repr_idx(1, 6, NULL /* stream */);
to_device({4, 0, 1, 3, 1, 2},
&repr_idx);
const size_t window_size = 3;
RepresentationsStorage<FloatT, IdxType>::GradientType gradient_desc = {
std::forward_as_tuple(grad_repr, repr_idx, window_size, nullptr),
};
updater.update(storage.get(), &gradient_desc,
learning_rate(),
scaled_regularization_lambda(),
DefaultStream::get());
EXPECT_THAT(
to_host(*updater.storages_[0]->get_data()["representations"]),
ElementsAreArray({(1.0 - beta1) * 2.0, (1.0 - beta1) * 2.5, (1.0 - beta1) * 3.0, (1.0 - beta1) * 4.0,
(1.0 - beta1) * (2.0 + 10.0), (1.0 - beta1) * (2.5 + 11.0), (1.0 - beta1) * (3.0 + 12.0), (1.0 - beta1) * (4.0 + 13.0),
(1.0 - beta1) * 10.0, (1.0 - beta1) * 11.0, (1.0 - beta1) * 12.0, (1.0 - beta1) * 13.0,
(1.0 - beta1) * 10.0, (1.0 - beta1) * 11.0, (1.0 - beta1) * 12.0, (1.0 - beta1) * 13.0,
(1.0 - beta1) * 2.0, (1.0 - beta1) * 2.5, (1.0 - beta1) * 3.0, (1.0 - beta1) * 4.0}));
EXPECT_THAT(
to_host(*updater.storages_[1]->get_data()["representations"]),
ElementsAreArray({(1.0 - beta2) * 8.8125,
(1.0 - beta2) * (8.8125 + 133.5),
(1.0 - beta2) * 133.5,
(1.0 - beta2) * 133.5,
(1.0 - beta2) * 8.8125}));
const FloatT bias_correction = sqrt(1.0 - pow(beta2, 1)) / (1.0 - pow(beta1, 1));
EXPECT_THAT(
to_host(grad_repr),
ElementsAreArray({FPHelper<FloatT>::eq(
bias_correction *
(((1.0 - beta1) * 2.0 + (1.0 - beta1) * 2.0 + (1.0 - beta1) * (2.0 + 10.0)) / window_size) /
(sqrt(((1.0 - beta2) * 8.8125 + (1.0 - beta2) * 8.8125 + (1.0 - beta2) * (8.8125 + 133.5)) / window_size) + epsilon)),
FPHelper<FloatT>::eq(
bias_correction *
((1.0 - beta1) * (2.5 + 2.5 + (2.5 + 11.0)) / window_size) /
(sqrt((1.0 - beta2) * (8.8125 + 8.8125 + 133.5 + 8.8125) / window_size) + epsilon)),
FPHelper<FloatT>::eq(
bias_correction *
((1.0 - beta1) * (3.0 + 3.0 + (3.0 + 12.0)) / window_size) /
(sqrt((1.0 - beta2) * (8.8125 + 8.8125 + 133.5 + 8.8125) / window_size) + epsilon)),
FPHelper<FloatT>::eq(
bias_correction *
((1.0 - beta1) * (4.0 + 4.0 + (4.0 + 13.0)) / window_size) /
(sqrt((1.0 - beta2) * (8.8125 + 8.8125 + 133.5 + 8.8125) / window_size) + epsilon)),
FPHelper<FloatT>::eq(
bias_correction *
((1.0 - beta1) * (10.0 + (2.0 + 10.0) + 10.0) / window_size) /
(sqrt((1.0 - beta2) * (133.5 + (8.8125 + 133.5) + 133.5) / window_size) + epsilon)),
FPHelper<FloatT>::eq(
bias_correction *
((1.0 - beta1) * (11.0 + (2.5 + 11.0) + 11.0) / window_size) /
(sqrt((1.0 - beta2) * (133.5 + (8.8125 + 133.5) + 133.5) / window_size) + epsilon)),
FPHelper<FloatT>::eq(
bias_correction *
((1.0 - beta1) * (12.0 + (3.0 + 12.0) + 12.0) / window_size) /
(sqrt((1.0 - beta2) * (133.5 + (8.8125 + 133.5) + 133.5) / window_size) + epsilon)),
FPHelper<FloatT>::eq(
bias_correction *
((1.0 - beta1) * (13.0 + (4.0 + 13.0) + 13.0) / window_size) /
(sqrt((1.0 - beta2) * (133.5 + (8.8125 + 133.5) + 133.5) / window_size) + epsilon)),
}));
}
TEST_P(UpdatesTest, AdamRepresentationsGradientUpdater_DENSE_UPDATE) {
const size_t repr_size = 4;
const FloatT epsilon = 1e-5;
const FloatT beta1 = 0.9;
const FloatT beta2 = 0.999;
std::unique_ptr<RepresentationsStorage<FloatT, IdxType>> storage(
create_storage<RepresentationsStorage<FloatT, IdxType>>(
5.0, /* initial value */
5, 4, DefaultStream::get()));
AdamRepresentationsGradientUpdater<FloatT, IdxType> updater(
5, /* num_objects */
repr_size,
ParseProto<AdamConf>("mode: DENSE_UPDATE"), /* conf */
DefaultStream::get(),
beta1,
beta2,
epsilon);
device_matrix<FloatT> first_grad_repr(repr_size, 1, NULL /* stream */);
to_device({2.0, 2.5, 3.0, 4.0}, // identity: (2.0 + 2.5 + 3.0 + 4.0) / 4 = 2.875
// squared: (4.0 + 6.25 + 9.0 + 16.0) / 4 = 35.25 / 4 = 8.8125
&first_grad_repr);
device_matrix<FloatT> second_grad_repr(repr_size, 1, NULL /* stream */);
to_device({10.0, 11.0, 12.0, 13.0}, // identity: (10.0 + 11.0 + 12.0 + 13.0) / 4 = 11.5
// squared: (100.0 + 121.0 + 144.0 + 169.0) / 4 = 534.0 / 4 = 133.5
&second_grad_repr);
device_matrix<IdxType> first_repr_idx(1, 3, NULL /* stream */);
to_device({4, 0, 1},
&first_repr_idx);
device_matrix<IdxType> second_repr_idx(1, 3, NULL /* stream */);
to_device({3, 1, 2},
&second_repr_idx);
const size_t window_size = 3;
RepresentationsStorage<FloatT, IdxType>::GradientType gradient_desc = {
std::forward_as_tuple(first_grad_repr, first_repr_idx, window_size, nullptr),
std::forward_as_tuple(second_grad_repr, second_repr_idx, window_size, nullptr),
};
EXPECT_THAT(
to_host(*storage->get()),
ElementsAreArray({5.0, 5.0, 5.0, 5.0,
5.0, 5.0, 5.0, 5.0,
5.0, 5.0, 5.0, 5.0,
5.0, 5.0, 5.0, 5.0,
5.0, 5.0, 5.0, 5.0}));
updater.update(storage.get(), &gradient_desc,
learning_rate(),
scaled_regularization_lambda(),
DefaultStream::get());
EXPECT_THAT(
to_host(*updater.storages_[0]->get_data()["representations"]),
ElementsAreArray({(1.0 - beta1) * 2.0, (1.0 - beta1) * 2.5, (1.0 - beta1) * 3.0, (1.0 - beta1) * 4.0,
(1.0 - beta1) * (2.0 + 10.0), (1.0 - beta1) * (2.5 + 11.0), (1.0 - beta1) * (3.0 + 12.0), (1.0 - beta1) * (4.0 + 13.0),
(1.0 - beta1) * 10.0, (1.0 - beta1) * 11.0, (1.0 - beta1) * 12.0, (1.0 - beta1) * 13.0,
(1.0 - beta1) * 10.0, (1.0 - beta1) * 11.0, (1.0 - beta1) * 12.0, (1.0 - beta1) * 13.0,
(1.0 - beta1) * 2.0, (1.0 - beta1) * 2.5, (1.0 - beta1) * 3.0, (1.0 - beta1) * 4.0}));
EXPECT_THAT(
to_host(*updater.storages_[1]->get_data()["representations"]),
ElementsAreArray({(1.0 - beta2) * 8.8125,
(1.0 - beta2) * (8.8125 + 133.5),
(1.0 - beta2) * 133.5,
(1.0 - beta2) * 133.5,
(1.0 - beta2) * 8.8125}));
const FloatT bias_correction = sqrt(1.0 - pow(beta2, 1)) / (1.0 - pow(beta1, 1));
EXPECT_THAT(
to_host(*storage->get()),
ElementsAreArray({FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * 2.0 / (sqrt((1.0 - beta2) * 8.8125) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * 2.5 / (sqrt((1.0 - beta2) * 8.8125) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * 3.0/ (sqrt((1.0 - beta2) * 8.8125) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * 4.0 / (sqrt((1.0 - beta2) * 8.8125) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (2.0 + 10.0) / (sqrt((1.0 - beta2) * (8.8125 + 133.5)) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (2.5 + 11.0) / (sqrt((1.0 - beta2) * (8.8125 + 133.5)) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (3.0 + 12.0) / (sqrt((1.0 - beta2) * (8.8125 + 133.5)) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (4.0 + 13.0) / (sqrt((1.0 - beta2) * (8.8125 + 133.5)) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * 10.0 / (sqrt((1.0 - beta2) * 133.5) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * 11.0 / (sqrt((1.0 - beta2) * 133.5) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * 12.0 / (sqrt((1.0 - beta2) * 133.5) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * 13.0 / (sqrt((1.0 - beta2) * 133.5) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * 10.0 / (sqrt((1.0 - beta2) * 133.5) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * 11.0 / (sqrt((1.0 - beta2) * 133.5) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * 12.0 / (sqrt((1.0 - beta2) * 133.5) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * 13.0 / (sqrt((1.0 - beta2) * 133.5) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * 2.0 / (sqrt((1.0 - beta2) * 8.8125) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * 2.5 / (sqrt((1.0 - beta2) * 8.8125) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * 3.0 / (sqrt((1.0 - beta2) * 8.8125) + epsilon) - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * 4.0 / (sqrt((1.0 - beta2) * 8.8125) + epsilon) - scaled_regularization_lambda() * 5.0))}));
}
TEST_P(UpdatesTest, AdamRepresentationsGradientUpdater_DENSE_UPDATE_DENSE_VARIANCE) {
const size_t repr_size = 4;
const FloatT epsilon = 1e-5;
const FloatT beta1 = 0.9;
const FloatT beta2 = 0.999;
std::unique_ptr<RepresentationsStorage<FloatT, IdxType>> storage(
create_storage<RepresentationsStorage<FloatT, IdxType>>(
5.0, /* initial value */
5, 4, DefaultStream::get()));
AdamRepresentationsGradientUpdater<FloatT, IdxType> updater(
5, /* num_objects */
repr_size,
ParseProto<AdamConf>("mode: DENSE_UPDATE_DENSE_VARIANCE"), /* conf */
DefaultStream::get(),
beta1,
beta2,
epsilon);
device_matrix<FloatT> first_grad_repr(repr_size, 1, NULL /* stream */);
to_device({2.0, 2.5, 3.0, 4.0}, // identity: (2.0 + 2.5 + 3.0 + 4.0) / 4 = 2.875
// squared: (4.0 + 6.25 + 9.0 + 16.0) / 4 = 35.25 / 4 = 8.8125
&first_grad_repr);
device_matrix<FloatT> second_grad_repr(repr_size, 1, NULL /* stream */);
to_device({10.0, 11.0, 12.0, 13.0}, // identity: (10.0 + 11.0 + 12.0 + 13.0) / 4 = 11.5
// squared: (100.0 + 121.0 + 144.0 + 169.0) / 4 = 534.0 / 4 = 133.5
&second_grad_repr);
device_matrix<IdxType> first_repr_idx(1, 3, NULL /* stream */);
to_device({4, 0, 1},
&first_repr_idx);
device_matrix<IdxType> second_repr_idx(1, 3, NULL /* stream */);
to_device({3, 1, 2},
&second_repr_idx);
const size_t window_size = 3;
RepresentationsStorage<FloatT, IdxType>::GradientType gradient_desc = {
std::forward_as_tuple(first_grad_repr, first_repr_idx, window_size, nullptr),
std::forward_as_tuple(second_grad_repr, second_repr_idx, window_size, nullptr),
};
EXPECT_THAT(
to_host(*storage->get()),
ElementsAreArray({5.0, 5.0, 5.0, 5.0,
5.0, 5.0, 5.0, 5.0,
5.0, 5.0, 5.0, 5.0,
5.0, 5.0, 5.0, 5.0,
5.0, 5.0, 5.0, 5.0}));
updater.update(storage.get(), &gradient_desc,
learning_rate(),
scaled_regularization_lambda(),
DefaultStream::get());
EXPECT_THAT(
to_host(*updater.storages_[0]->get_data()["representations"]),
ElementsAreArray({FPHelper<FloatT>::eq((1.0 - beta1) * (2.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (2.5 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (3.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (4.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (2.0 + 10.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (2.5 + 11.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (3.0 + 12.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (4.0 + 13.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (10.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (11.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (12.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (13.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (10.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (11.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (12.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (13.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (2.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (2.5 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (3.0 - scaled_regularization_lambda() * 5.0)),
FPHelper<FloatT>::eq((1.0 - beta1) * (4.0 - scaled_regularization_lambda() * 5.0))}));
EXPECT_THAT(
to_host(*updater.storages_[1]->get_data()["representations"]),
ElementsAreArray({(1.0 - beta2) * pow(2.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(2.5 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(3.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(4.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(2.0 + 10.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(2.5 + 11.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(3.0 + 12.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(4.0 + 13.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(10.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(11.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(12.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(13.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(10.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(11.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(12.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(13.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(2.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(2.5 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(3.0 - scaled_regularization_lambda() * 5.0, 2),
(1.0 - beta2) * pow(4.0 - scaled_regularization_lambda() * 5.0, 2)}));
const FloatT bias_correction = sqrt(1.0 - pow(beta2, 1)) / (1.0 - pow(beta1, 1));
EXPECT_THAT(
to_host(*storage->get()),
ElementsAreArray({FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (2.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(2.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (2.5 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(2.5 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (3.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(3.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (4.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(4.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (2.0 + 10.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(2.0 + 10.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (2.5 + 11.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(2.5 + 11.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (3.0 + 12.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(3.0 + 12.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (4.0 + 13.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(4.0 + 13.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (10.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(10.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (11.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(11.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (12.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(12.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (13.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(13.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (10.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(10.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (11.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(11.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (12.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(12.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (13.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(13.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (2.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(2.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (2.5 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(2.5 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (3.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(3.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon))),
FPHelper<FloatT>::eq(5.0 + learning_rate() * (bias_correction * (1.0 - beta1) * (4.0 - scaled_regularization_lambda() * 5.0) / (sqrt((1.0 - beta2) * pow(4.0 - scaled_regularization_lambda() * 5.0, 2)) + epsilon)))}));
}
|
the_stack
|
namespace cuHE {
// Initailization
static uint32 **dhBuffer_; // pinned memory for ZZX-Raw conversions
void initCuHE(ZZ *coeffMod_, ZZX modulus) {
dhBuffer_ = new uint32 *[numDevices()];
for (int i=0; i<numDevices(); i++) {
CSC(cudaSetDevice(i));
CSC(cudaMallocHost(&dhBuffer_[i],
param.rawLen*param._wordsCoeff(0)*sizeof(uint32)));
for (int j=0; j<numDevices(); j++) {
if (i != j)
CSC(cudaDeviceEnablePeerAccess(j, 0));
}
}
initNtt();
initCrt(coeffMod_);
initBarrett(modulus);
}
void startAllocator() {
bootDeviceAllocator(param.numCrtPrime*param.nttLen*sizeof(uint64));
}
void stopAllocator() {
haltDeviceAllocator();
}
void multiGPUs(int num) {
setNumDevices(num);
}
int numGPUs() {
return numDevices();
}
void setParameters(int d, int p, int w, int min, int cut, int m) {
setParam(d, p, w, min, cut, m);
}
void resetParameters() {
resetParam();
}
void initRelinearization(ZZX* evalkey) {
initRelin(evalkey);
}
// Operations: CuCtxt & CuPtxt
void copy(CuCtxt& dst, CuCtxt src, cudaStream_t st) {
if (&dst != &src) {
dst.reset();
dst.setLevel(src.level(), src.domain(), src.device(), st);
dst.isProd(src.isProd());
CSC(cudaSetDevice(dst.device()));
if (dst.domain() == 0)
dst.zRep(src.zRep());
else if (dst.domain() == 1)
CSC(cudaMemcpyAsync(dst.rRep(), src.rRep(),
dst.rRepSize(), cudaMemcpyDeviceToDevice, st));
else if (dst.domain() == 2)
CSC(cudaMemcpyAsync(dst.cRep(), src.cRep(),
dst.cRepSize(), cudaMemcpyDeviceToDevice, st));
else if (dst.domain() == 3)
CSC(cudaMemcpyAsync(dst.nRep(), src.nRep(),
dst.nRepSize(), cudaMemcpyDeviceToDevice, st));
CSC(cudaStreamSynchronize(st));
}
}
void cAnd(CuCtxt& out, CuCtxt& in0, CuCtxt& in1, cudaStream_t st) {
if (in0.device() != in1.device()) {
cout<<"Error: Multiplication of different devices!"<<endl;
terminate();
}
if (in0.domain() != 3 || in1.domain() != 3) {
cout<<"Error: Multiplication of non-NTT domain!"<<endl;
terminate();
}
if (in0.logq() != in1.logq()) {
cout<<"Error: Multiplication of different levels!"<<endl;
terminate();
}
if (&out != &in0) {
out.reset();
out.setLevel(in0.level(), 3, in0.device(), st);
}
CSC(cudaSetDevice(out.device()));
nttMul(out.nRep(), in0.nRep(), in1.nRep(), out.logq(), out.device(), st);
out.isProd(true);
CSC(cudaStreamSynchronize(st));
}
void cAnd(CuCtxt& out, CuCtxt& inc, CuPtxt& inp, cudaStream_t st) {
if (inc.device() != inp.device()) {
cout<<"Error: Multiplication of different devices!"<<endl;
terminate();
}
if (inc.domain() != 3 || inp.domain() != 3) {
cout<<"Error: Multiplication of non-NTT domain!"<<endl;
terminate();
}
if (&out != &inc) {
out.reset();
out.setLevel(inc.level(), 3, inc.device(), st);
}
CSC(cudaSetDevice(out.device()));
nttMulNX1(out.nRep(), inc.nRep(), inp.nRep(), out.logq(), out.device(), st);
out.isProd(true);
CSC(cudaStreamSynchronize(st));
}
void cXor(CuCtxt& out, CuCtxt& in0, CuCtxt& in1, cudaStream_t st) {
if (in0.device() != in1.device()) {
cout<<"Error: Addition of different devices!"<<endl;
terminate();
}
if (in0.logq() != in1.logq()) {
cout<<"Error: Addition of different levels!"<<endl;
terminate();
}
if (in0.domain() == 2 && in1.domain() == 2) {
if (&out != &in0) {
out.reset();
out.setLevel(in0.level(), 2, in0.device(), st);
}
CSC(cudaSetDevice(out.device()));
crtAdd(out.cRep(), in0.cRep(), in1.cRep(), out.logq(), out.device(), st);
CSC(cudaStreamSynchronize(st));
}
else if (in0.domain() == 3 && in1.domain() == 3) {
if (&out != &in0) {
out.reset();
out.setLevel(in0.level(), 3, in0.device(), st);
out.isProd(in0.isProd()||in1.isProd());
}
CSC(cudaSetDevice(out.device()));
nttAdd(out.nRep(), in0.nRep(), in1.nRep(), out.logq(), out.device(), st);
CSC(cudaStreamSynchronize(st));
}
else {
cout<<"Error: Addition of non-CRT-nor-NTT domain!"<<endl;
terminate();
}
}
void cXor(CuCtxt& out, CuCtxt& in0, CuPtxt& in1, cudaStream_t st) {
if (in0.device() != in1.device()) {
cout<<"Error: Addition of different devices!"<<endl;
terminate();
}
if (in0.domain() == 2 && in1.domain() == 2) {
if (&out != &in0) {
out.reset();
out.setLevel(in0.level(), 2, in0.device(), st);
}
CSC(cudaSetDevice(out.device()));
crtAddNX1(out.cRep(), in0.cRep(), in1.cRep(), out.logq(), out.device(), st);
CSC(cudaStreamSynchronize(st));
}
else if (in0.domain() == 3 && in1.domain() == 3) {
if (&out != &in0) {
out.reset();
out.setLevel(in0.level(), 3, in0.device(), st);
out.isProd(in0.isProd()||in1.isProd());
}
CSC(cudaSetDevice(out.device()));
nttAddNX1(out.nRep(), in0.nRep(), in1.nRep(), out.logq(), out.device(), st);
CSC(cudaStreamSynchronize(st));
}
else {
cout<<"Error: Addition of non-CRT-nor-NTT domain!"<<endl;
terminate();
}
}
void cNot(CuCtxt& out, CuCtxt& in, cudaStream_t st) {
if (in.domain() != 2) {
cout<<"Error: cNot of non-CRT domain!"<<endl;
terminate();
}
if (&out != &in) {
out.reset();
out.setLevel(in.level(), in.domain(), in.device(), st);
}
CSC(cudaSetDevice(out.device()));
crtAddInt(out.cRep(), in.cRep(), (unsigned)param.modMsg-1, out.logq(),
out.device(), st);
CSC(cudaStreamSynchronize(st));
}
void moveTo(CuCtxt& tar, int dstDev, cudaStream_t st) {
if (dstDev != tar.device()) {
void *ptr;
if (tar.domain() == 1) {
CSC(cudaSetDevice(dstDev));
ptr = deviceMalloc(tar.rRepSize());
CSC(cudaSetDevice(tar.device()));
CSC(cudaMemcpyPeerAsync(ptr, dstDev, tar.rRep(), tar.device(),
tar.rRepSize(), st));
tar.rRepFree();
tar.rRep((uint32 *)ptr);
CSC(cudaStreamSynchronize(st));
}
else if (tar.domain() == 2) {
CSC(cudaSetDevice(dstDev));
ptr = deviceMalloc(tar.cRepSize());
CSC(cudaSetDevice(tar.device()));
CSC(cudaMemcpyPeerAsync(ptr, dstDev, tar.cRep(), tar.device(),
tar.cRepSize(), st));
tar.cRepFree();
tar.cRep((uint32 *)ptr);
CSC(cudaStreamSynchronize(st));
}
else if (tar.domain() == 3) {
CSC(cudaSetDevice(dstDev));
ptr = deviceMalloc(tar.nRepSize());
CSC(cudaSetDevice(tar.device()));
CSC(cudaMemcpyPeerAsync(ptr, dstDev, tar.nRep(), tar.device(),
tar.nRepSize(), st));
tar.nRepFree();
tar.nRep((uint64 *)ptr);
CSC(cudaStreamSynchronize(st));
}
tar.device(dstDev);
}
}
void copyTo(CuCtxt& dst, CuCtxt& src, int dstDev, cudaStream_t st) {
copy(dst, src, st);
moveTo(dst, dstDev, st);
}
// NTL Interface
void mulZZX(ZZX& out, ZZX in0, ZZX in1, int lvl, int dev, cudaStream_t st) {
CuCtxt cin0, cin1;
cin0.setLevel(lvl, dev, in0);
cin1.setLevel(lvl, dev, in1);
cin0.x2n(st);
cin1.x2n(st);
cAnd(cin0, cin0, cin1, st);
cin0.x2z(st);
out = cin0.zRep();
}
// @class CuPolynomial
// Constructor
CuPolynomial::CuPolynomial() {
logq_ = -1;
domain_ = -1;
device_ = -1;
clear(zRep_);
rRep_ = NULL;
cRep_ = NULL;
nRep_ = NULL;
isProd_ = 0;
}
CuPolynomial::~CuPolynomial() {
reset();
}
void CuPolynomial::reset() {
clear(zRep_);
if (rRep_ != NULL)
rRepFree();
if (cRep_ != NULL)
cRepFree();
if (nRep_ != NULL)
nRepFree();
isProd_ = 0;
logq_ = -1;
domain_ = -1;
device_ = -1;
}
// Set Methods
void CuPolynomial::logq(int val) { logq_ = val;}
void CuPolynomial::domain(int val) { domain_ = val;}
void CuPolynomial::device(int val) { device_ = val;}
void CuPolynomial::isProd(bool val) { isProd_ = val;}
void CuPolynomial::zRep(ZZX val) { zRep_ = val;}
void CuPolynomial::rRep(uint32* val) { rRep_ = val;}
void CuPolynomial::cRep(uint32* val) { cRep_ = val;}
void CuPolynomial::nRep(uint64* val) { nRep_ = val;}
// Get Methods
int CuPolynomial::logq() { return logq_;}
int CuPolynomial::device() { return device_;}
int CuPolynomial::domain() { return domain_;}
bool CuPolynomial::isProd() { return isProd_;}
ZZX CuPolynomial::zRep() { return zRep_;}
uint32 * CuPolynomial::rRep() { return rRep_;}
uint32 * CuPolynomial::cRep() { return cRep_;}
uint64 * CuPolynomial::nRep() { return nRep_;}
// Domain Conversions
void CuPolynomial::z2r(cudaStream_t st) {
if (domain_ != 0) {
printf("Error: Not in domain ZZX!\n");
terminate();
}
CSC(cudaSetDevice(device_));
rRepCreate(st);
for(int i=0; i<param.rawLen; i++)
BytesFromZZ((uint8 *)(dhBuffer_[device_]+i*coeffWords()),
coeff(zRep_, i), coeffWords()*sizeof(uint32));
CSC(cudaMemcpyAsync(rRep_, dhBuffer_[device_], rRepSize(),
cudaMemcpyHostToDevice, st));
cudaStreamSynchronize(st);
clear(zRep_);
domain_ = 1;
}
void CuPolynomial::r2z(cudaStream_t st) {
if (domain_ != 1) {
printf("Error: Not in domain RAW!\n");
terminate();
}
CSC(cudaSetDevice(device_));
CSC(cudaMemcpyAsync(dhBuffer_[device_], rRep_, rRepSize(),
cudaMemcpyDeviceToHost, st));
cudaStreamSynchronize(st);
clear(zRep_);
for(int i=0; i<param.modLen; i++)
SetCoeff( zRep_, i, ZZFromBytes( (uint8 *)(dhBuffer_[device_]
+i*coeffWords() ), coeffWords()*sizeof(uint32)) );
rRepFree();
domain_ = 0;
}
void CuPolynomial::r2c(cudaStream_t st) {
if (domain_ != 1) {
printf("Error: Not in domain RAW!\n");
terminate();
}
if (logq_ > param.logCrtPrime) {
CSC(cudaSetDevice(device_));
cRepCreate(st);
crt(cRep_, rRep_, logq_, device_, st);
rRepFree();
}
else {
cRep_ = rRep_;
rRep_ = NULL;
}
domain_ = 2;
}
void CuPolynomial::c2r(cudaStream_t st) {
if (domain_ != 2) {
printf("Error: Not in domain CRT!\n");
terminate();
}
if (logq_ > param.logCrtPrime) {
CSC(cudaSetDevice(device_));
rRepCreate(st);
icrt(rRep_, cRep_, logq_, device_, st);
cRepFree();
}
else {
rRep_ = cRep_;
cRep_ = NULL;
}
domain_ = 1;
}
void CuPolynomial::c2n(cudaStream_t st) {
if (domain_ != 2) {
printf("Error: Not in domain CRT!\n");
terminate();
}
CSC(cudaSetDevice(device_));
nRepCreate(st);
ntt(nRep_, cRep_, logq_, device_, st);
cRepFree();
domain_ = 3;
}
void CuPolynomial::n2c(cudaStream_t st) {
if (domain_ != 3) {
printf("Error: Not in domain NTT!\n");
terminate();
}
CSC(cudaSetDevice(device_));
cRepCreate(st);
if (isProd_) {
inttMod(cRep_, nRep_, logq_, device_, st);
}
else {
intt(cRep_, nRep_, logq_, device_, st);
}
isProd_ = false;
nRepFree();
domain_ = 2;
}
void CuPolynomial::x2z(cudaStream_t st) {
if (domain_ == 0)
return;
else if (domain_ == 1)
r2z(st);
else if (domain_ == 2) {
c2r(st);
r2z(st);
}
else if (domain_ == 3) {
n2c(st);
c2r(st);
r2z(st);
}
}
void CuPolynomial::x2r(cudaStream_t st) {
if (domain_ == 1)
return;
else if (domain_ == 0)
z2r(st);
else if (domain_ == 2)
c2r(st);
else if (domain_ == 3) {
n2c(st);
c2r(st);
}
}
void CuPolynomial::x2c(cudaStream_t st) {
if (domain_ == 2)
return;
else if (domain_ == 0) {
z2r(st);
r2c(st);
}
else if (domain_ == 1)
r2c(st);
else if (domain_ == 3)
n2c(st);
}
void CuPolynomial::x2n(cudaStream_t st) {
if (domain_ == 3)
return;
else if (domain_ == 0) {
z2r(st);
r2c(st);
c2n(st);
}
else if (domain_ == 1) {
r2c(st);
c2n(st);
}
else if (domain_ == 2)
c2n(st);
}
// Memory management
void CuPolynomial::rRepCreate(cudaStream_t st) {
CSC(cudaSetDevice(device_));
if (deviceAllocatorIsOn())
rRep_ = (uint32 *)deviceMalloc(param.numCrtPrime*param.nttLen*sizeof(uint64));
else
CSC(cudaMalloc(&rRep_, rRepSize()));
CSC(cudaMemsetAsync(rRep_, 0, rRepSize(), st));
}
void CuPolynomial::cRepCreate(cudaStream_t st) {
CSC(cudaSetDevice(device_));
if (deviceAllocatorIsOn())
cRep_ = (uint32 *)deviceMalloc(param.numCrtPrime*param.nttLen*sizeof(uint64));
else
CSC(cudaMalloc(&cRep_, cRepSize()));
CSC(cudaMemsetAsync(cRep_, 0, cRepSize(), st));
}
void CuPolynomial::nRepCreate(cudaStream_t st) {
CSC(cudaSetDevice(device_));
if (deviceAllocatorIsOn())
nRep_ = (uint64 *)deviceMalloc(param.numCrtPrime*param.nttLen*sizeof(uint64));
else
CSC(cudaMalloc(&nRep_, nRepSize()));
CSC(cudaMemsetAsync(nRep_, 0, nRepSize(), st));
}
void CuPolynomial::rRepFree() {
CSC(cudaSetDevice(device_));
if (deviceAllocatorIsOn())
deviceFree(rRep_);
else
CSC(cudaFree(rRep_));
rRep_ = NULL;
}
void CuPolynomial::cRepFree() {
CSC(cudaSetDevice(device_));
if (deviceAllocatorIsOn())
deviceFree(cRep_);
else
CSC(cudaFree(cRep_));
cRep_ = NULL;
}
void CuPolynomial::nRepFree() {
CSC(cudaSetDevice(device_));
if (deviceAllocatorIsOn())
deviceFree(nRep_);
else
CSC(cudaFree(nRep_));
nRep_ = NULL;
}
// Utilities
int CuPolynomial::coeffWords() { return (logq_+31)/32;}
size_t CuPolynomial::rRepSize() { return param.rawLen*coeffWords()*sizeof(uint32);}
// @class CuCtxt
// Get Methods
void CuCtxt::setLevel(int lvl, int domain, int device, cudaStream_t st) {
level_ = lvl;
logq_ = param._logCoeff(lvl);
domain_ = domain;
device_ = device;
if (domain_ == 0)
clear (zRep_);
else if (domain_ == 1)
rRepCreate(st);
else if (domain_ == 2)
cRepCreate(st);
else if (domain_ == 3)
nRepCreate(st);
}
void CuCtxt::setLevel(int lvl, int device, ZZX val) {
level_ = lvl;
logq_ = param._logCoeff(lvl);
domain_ = 0;
device_ = device;
zRep_ = val;
}
int CuCtxt::level() { return level_;}
// Noise Control
void CuCtxt::modSwitch(cudaStream_t st) {
if (logq_ < param.logCoeffMin+param.logCoeffCut) {
printf("Error: Cannot do modSwitch on last level!\n");
terminate();
}
x2c();
CSC(cudaSetDevice(device_));
crtModSwitch(cRep_, cRep_, logq_, device_, st);
CSC(cudaStreamSynchronize(st));
logq_ -= param.logCoeffCut;
level_ ++;
}
void CuCtxt::modSwitch(int lvl, cudaStream_t st) {
if (lvl < level_ || lvl >= param.depth) {
printf("Error: ModSwitch to unavailable level!\n");
terminate();
}
else if (lvl == level_)
return;
x2c();
CSC(cudaSetDevice(device_));
while (lvl > level_) {
crtModSwitch(cRep_, cRep_, logq_, device_, st);
logq_ -= param.logCoeffCut;
}
CSC(cudaStreamSynchronize(st));
}
void CuCtxt::relin(cudaStream_t st) {
CSC(cudaSetDevice(device_));
x2r();
nRepCreate(st);
relinearization(nRep_, rRep_, level_, device_, st);
CSC(cudaStreamSynchronize(st));
rRepFree();
isProd_ = true;
domain_ = 3;
n2c();
CSC(cudaStreamSynchronize(st));
}
size_t CuCtxt::cRepSize() { return param._numCrtPrime(level_)*param.crtLen*sizeof(uint32);}
size_t CuCtxt::nRepSize() { return param._numCrtPrime(level_)*param.nttLen*sizeof(uint64);}
// @class CuPtxt
void CuPtxt::setLogq(int logq, int domain, int device, cudaStream_t st) {
logq_ = logq;
domain_ = domain;
device_ = device;
if (domain_ == 0)
clear (zRep_);
else if (domain_ == 1)
rRepCreate(st);
else if (domain_ == 2)
cRepCreate(st);
else if (domain_ == 3)
nRepCreate(st);
}
void CuPtxt::setLogq(int logq, int device, ZZX val) {
logq_ = logq;
domain_ = 0;
device_ = device;
zRep_ = val;
}
size_t CuPtxt::cRepSize() { return param.crtLen*sizeof(uint32);}
size_t CuPtxt::nRepSize() { return param.nttLen*sizeof(uint64);}
} // namespace cuHE
|
the_stack
|
#include "cudapoa_structs.cuh"
#include <claraparabricks/genomeworks/utils/cudautils.hpp>
#include <claraparabricks/genomeworks/utils/limits.cuh>
#include <stdio.h>
namespace claraparabricks
{
namespace genomeworks
{
namespace cudapoa
{
template <typename ScoreT>
__device__ __forceinline__ ScoreT* get_score_ptr(ScoreT* scores, int32_t row, int32_t column, int32_t band_start, int32_t band_width)
{
column = column == -1 ? 0 : column - band_start;
int64_t score_index = static_cast<int64_t>(column) + static_cast<int64_t>(row) * static_cast<int64_t>(band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING);
return &scores[score_index];
}
template <typename ScoreT>
__device__ __forceinline__ void set_score(ScoreT* scores,
int32_t row,
int32_t column,
int32_t value,
int32_t band_start,
int32_t band_width)
{
if (column == -1)
{
column = band_start;
}
else
{
column = column - band_start;
}
int64_t score_index = static_cast<int64_t>(column) + static_cast<int64_t>(row) * static_cast<int64_t>(band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING);
scores[score_index] = value;
}
__device__ __forceinline__ int32_t get_band_start_for_row(int32_t row, float gradient, int32_t band_width, int32_t band_shift, int32_t max_column)
{
int32_t diagonal_index = int32_t(row * gradient);
int32_t start_pos = max(0, diagonal_index - band_shift);
if (max_column < start_pos + band_width)
{
start_pos = max(0, max_column - band_width + CUDAPOA_CELLS_PER_THREAD);
}
start_pos = start_pos - (start_pos % CUDAPOA_CELLS_PER_THREAD);
return start_pos;
}
template <typename ScoreT>
__device__ __forceinline__ ScoreT get_score(ScoreT* scores,
int32_t row,
int32_t column,
int32_t band_width,
int32_t band_shift,
float gradient,
int32_t max_column,
const ScoreT min_score_value)
{
int32_t band_start = get_band_start_for_row(row, gradient, band_width, band_shift, max_column);
int32_t band_end = band_start + band_width;
band_end = min(band_end, max_column);
if ((column > band_end || column < band_start) && column != -1)
{
return min_score_value;
}
else
{
return *get_score_ptr(scores, row, column, band_start, band_width);
}
}
template <typename ScoreT>
__device__ __forceinline__ ScoreT4<ScoreT> get_scores(ScoreT* scores,
int32_t row,
int32_t column,
int32_t band_width,
int32_t band_shift,
float gradient,
int32_t max_column,
ScoreT default_value,
int32_t gap_score,
ScoreT4<ScoreT>& char_profile)
{
// The load instructions typically load data in 4B or 8B chunks.
// If data is 16b (2B), then a 4B load chunk is loaded into register
// and the necessary bits are extracted before returning. This wastes cycles
// as each read of 16b issues a separate load command.
// Instead it is better to load a 4B or 8B chunk into a register
// using a single load inst, and then extracting necessary part of
// of the data using bit arithmatic. Also reduces register count.
int32_t band_start = get_band_start_for_row(row, gradient, band_width, band_shift, max_column);
// subtract by CELLS_PER_THREAD to ensure score4_next is not pointing out of the corresponding band bounds
int32_t band_end = band_start + band_width - CUDAPOA_CELLS_PER_THREAD;
band_end = min(band_end, max_column);
if ((column > band_end || column < band_start) && column != -1)
{
return ScoreT4<ScoreT>{default_value, default_value, default_value, default_value};
}
else
{
ScoreT4<ScoreT>* pred_scores = (ScoreT4<ScoreT>*)get_score_ptr(scores, row, column, band_start, band_width);
// loads 8/16 consecutive bytes (4 ScoreTs)
ScoreT4<ScoreT> score4 = pred_scores[0];
// need to load the next chunk of memory as well
ScoreT4<ScoreT> score4_next = pred_scores[1];
ScoreT4<ScoreT> score;
score.s0 = max(score4.s0 + char_profile.s0,
score4.s1 + gap_score);
score.s1 = max(score4.s1 + char_profile.s1,
score4.s2 + gap_score);
score.s2 = max(score4.s2 + char_profile.s2,
score4.s3 + gap_score);
score.s3 = max(score4.s3 + char_profile.s3,
score4_next.s0 + gap_score);
return score;
}
}
template <typename ScoreT>
__device__ __forceinline__ void initialize_band(ScoreT* scores,
int32_t row,
int32_t min_score_value,
int32_t band_start,
int32_t band_width,
int32_t lane_idx)
{
int32_t band_end = band_start + band_width;
band_start = max(1, band_start);
set_score(scores, row, band_start, min_score_value, band_start, band_width);
if (lane_idx < CUDAPOA_BANDED_MATRIX_RIGHT_PADDING)
{
set_score(scores, row, lane_idx + band_end, min_score_value, band_start, band_width);
}
}
template <typename SeqT,
typename ScoreT,
typename SizeT,
bool Adaptive = true>
__device__ __forceinline__
int32_t
needlemanWunschBanded(SeqT* nodes,
SizeT* graph,
SizeT* node_id_to_pos,
int32_t graph_count,
uint16_t* incoming_edge_count,
SizeT* incoming_edges,
uint16_t* outgoing_edge_count,
SeqT* read,
int32_t read_length,
ScoreT* scores,
float max_buffer_size,
SizeT* alignment_graph,
SizeT* alignment_read,
int32_t band_width,
int32_t gap_score,
int32_t mismatch_score,
int32_t match_score,
int32_t rerun)
{
const ScoreT min_score_value = numeric_limits<ScoreT>::min() / 2;
int32_t lane_idx = threadIdx.x % WARP_SIZE;
// Calculate aspect ratio for the scores matrix
float gradient = float(read_length + 1) / float(graph_count + 1);
int32_t max_column = read_length + 1;
// Set band-width based on scores matrix aspect ratio
//---------------------------------------------------------
if (Adaptive)
{
if (gradient > 1.1) // ad-hoc rule 1.a
{
// ad-hoc rule 1.b
band_width = max(band_width, cudautils::align<int32_t, CUDAPOA_MIN_BAND_WIDTH>(max_column * 0.08 * gradient));
}
if (gradient < 0.8) // ad-hoc rule 2.a
{
// ad-hoc rule 2.b
band_width = max(band_width, cudautils::align<int32_t, CUDAPOA_MIN_BAND_WIDTH>(max_column * 0.1 / gradient));
}
// limit band-width for very large reads, ad-hoc rule 3
band_width = min(band_width, CUDAPOA_MAX_ADAPTIVE_BAND_WIDTH);
if (band_width == CUDAPOA_MAX_ADAPTIVE_BAND_WIDTH && rerun != 0)
{
// already we have tried with maximum allowed band-width, rerun won't help
return rerun;
}
}
// band_shift defines distance of band_start from the scores matrix diagonal, ad-hoc rule 4
int32_t band_shift = band_width / 2;
if (Adaptive)
{
// rerun code is defined in backtracking loop from previous alignment try
// SHIFT_ADAPTIVE_BAND_TO_LEFT means traceback path was too close to the left bound of band
// SHIFT_ADAPTIVE_BAND_TO_RIGHT means traceback path was too close to the right bound of band
// Therefore we rerun alignment of the same read, but this time with double band-width and band_shift further to
// the left for rerun == SHIFT_ADAPTIVE_BAND_TO_LEFT, and further to the right for rerun == SHIFT_ADAPTIVE_BAND_TO_RIGHT.
if (rerun == CUDAPOA_SHIFT_ADAPTIVE_BAND_TO_LEFT && band_width <= CUDAPOA_MAX_ADAPTIVE_BAND_WIDTH / 2)
{
// ad-hoc rule 5
band_width *= 2;
band_shift *= 2.5;
}
if (rerun == CUDAPOA_SHIFT_ADAPTIVE_BAND_TO_RIGHT && band_width <= CUDAPOA_MAX_ADAPTIVE_BAND_WIDTH / 2)
{
// ad-hoc rule 6
band_width *= 2;
band_shift *= 1.5;
}
// check required memory and return error if exceeding max_buffer_size
// using float to avoid 64-bit
float required_buffer_size = static_cast<float>(graph_count) * static_cast<float>(band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING);
if (required_buffer_size > max_buffer_size)
{
return CUDAPOA_KERNEL_NW_ADAPTIVE_STORAGE_FAILED;
}
}
//---------------------------------------------------------
// Initialise the horizontal boundary of the score matrix, initialising of the vertical boundary is done within the main for loop
for (int32_t j = lane_idx; j < band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING; j += WARP_SIZE)
{
scores[j] = static_cast<ScoreT>(j * gap_score);
}
#ifdef NW_VERBOSE_PRINT
if (lane_idx == 0)
{
printf("graph %d, read %d\n", graph_count, read_length);
}
#endif
__syncwarp();
// compute vertical and diagonal values in parallel.
for (int32_t graph_pos = 0; graph_pos < graph_count; graph_pos++)
{
int32_t node_id = graph[graph_pos];
int32_t score_gIdx = graph_pos + 1;
int32_t band_start = get_band_start_for_row(score_gIdx, gradient, band_width, band_shift, max_column);
int32_t pred_node_id = incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES];
initialize_band(scores, score_gIdx, min_score_value, band_start, band_width, lane_idx);
int32_t first_element_prev_score = 0;
uint16_t pred_count = 0;
int32_t pred_idx = 0;
if (lane_idx == 0)
{
int32_t penalty;
pred_count = incoming_edge_count[node_id];
if (pred_count == 0)
{
set_score(scores, score_gIdx, -1, gap_score, band_start, band_width);
}
else
{
pred_idx = node_id_to_pos[pred_node_id] + 1;
if (band_start > CUDAPOA_CELLS_PER_THREAD && pred_count == 1)
{
first_element_prev_score = min_score_value + gap_score;
}
else
{
penalty = max(min_score_value, get_score(scores, pred_idx, -1, band_width, band_shift, gradient, max_column, min_score_value));
// if pred_num > 1 keep checking to find max score as penalty
for (int32_t p = 0; p < pred_count; p++)
{
pred_node_id = incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES + p];
int32_t pred_idx_tmp = node_id_to_pos[pred_node_id] + 1;
penalty = max(penalty, get_score(scores, pred_idx_tmp, -1, band_width, band_shift, gradient, max_column, min_score_value));
}
first_element_prev_score = penalty + gap_score;
}
set_score(scores, score_gIdx, -1, first_element_prev_score, band_start, band_width);
}
}
pred_count = __shfl_sync(FULL_MASK, pred_count, 0);
pred_idx = __shfl_sync(FULL_MASK, pred_idx, 0);
//-------------------------------------------------------------
SeqT graph_base = nodes[node_id];
for (int32_t read_pos = lane_idx * CUDAPOA_CELLS_PER_THREAD + band_start; read_pos < band_start + band_width; read_pos += WARP_SIZE * CUDAPOA_CELLS_PER_THREAD)
{
SeqT4<SeqT>* d_read4 = (SeqT4<SeqT>*)read;
SeqT4<SeqT> read4 = d_read4[read_pos / CUDAPOA_CELLS_PER_THREAD];
ScoreT4<ScoreT> char_profile;
char_profile.s0 = (graph_base == read4.r0 ? match_score : mismatch_score);
char_profile.s1 = (graph_base == read4.r1 ? match_score : mismatch_score);
char_profile.s2 = (graph_base == read4.r2 ? match_score : mismatch_score);
char_profile.s3 = (graph_base == read4.r3 ? match_score : mismatch_score);
ScoreT4<ScoreT> score = get_scores(scores, pred_idx, read_pos, band_width, band_shift, gradient, max_column, min_score_value, gap_score, char_profile);
// Perform same score updates as above, but for rest of predecessors.
for (int32_t p = 1; p < pred_count; p++)
{
int32_t pred_idx2 = node_id_to_pos[incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES + p]] + 1;
ScoreT4<ScoreT> scores_4 = get_scores(scores, pred_idx2, read_pos, band_width, band_shift, gradient, max_column, min_score_value, gap_score, char_profile);
score.s0 = max(score.s0, scores_4.s0);
score.s1 = max(score.s1, scores_4.s1);
score.s2 = max(score.s2, scores_4.s2);
score.s3 = max(score.s3, scores_4.s3);
}
// While there are changes to the horizontal score values, keep updating the matrix.
// So loop will only run the number of time there are corrections in the matrix.
// The any_sync warp primitive lets us easily check if any of the threads had an update.
bool loop = true;
while (__any_sync(FULL_MASK, loop))
{
loop = false;
// Note: computation of s3 depends on s2, s2 depends on s1 and s1 on s0.
// If we reverse the order of computation in this loop from s3 to s0, it will increase
// ILP. However, in longer reads where indels are more frequent, this reverse computations
// results in larger number of iterations. Since if s0 is changed, value of s1, s2 and s3 which
// already have been computed in parallel need to be updated again.
// The shfl_up lets us grab a value from the lane below.
int32_t last_score = __shfl_up_sync(FULL_MASK, score.s3, 1);
if (lane_idx == 0)
{
last_score = first_element_prev_score;
}
score.s0 = max(last_score + gap_score, score.s0);
score.s1 = max(score.s0 + gap_score, score.s1);
score.s2 = max(score.s1 + gap_score, score.s2);
int32_t tscore = max(score.s2 + gap_score, score.s3);
if (tscore > score.s3)
{
score.s3 = tscore;
loop = true;
}
}
// Copy over the last element score of the last lane into a register of first lane
// which can be used to compute the first cell of the next warp.
first_element_prev_score = __shfl_sync(FULL_MASK, score.s3, WARP_SIZE - 1);
int64_t score_index = static_cast<int64_t>(read_pos + 1 - band_start) + static_cast<int64_t>(score_gIdx) * static_cast<int64_t>(band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING);
scores[score_index] = score.s0;
scores[score_index + 1L] = score.s1;
scores[score_index + 2L] = score.s2;
scores[score_index + 3L] = score.s3;
__syncwarp();
}
}
int32_t aligned_nodes = 0;
if (lane_idx == 0)
{
// Find location of the maximum score in the matrix.
int32_t i = 0;
int32_t j = read_length;
int32_t mscore = min_score_value;
for (int32_t idx = 1; idx <= graph_count; idx++)
{
if (outgoing_edge_count[graph[idx - 1]] == 0)
{
int32_t s = get_score(scores, idx, j, band_width, band_shift, gradient, max_column, min_score_value);
if (mscore < s)
{
mscore = s;
i = idx;
}
}
}
// Fill in traceback
int32_t prev_i = 0;
int32_t prev_j = 0;
int32_t next_node_id = i > 0 ? graph[i - 1] : 0;
int32_t loop_count = 0;
while (!(i == 0 && j == 0) && loop_count < static_cast<int32_t>(read_length + graph_count + 2))
{
loop_count++;
int32_t scores_ij = get_score(scores, i, j, band_width, band_shift, gradient, max_column, min_score_value);
bool pred_found = false;
// Check if move is diagonal.
if (i != 0 && j != 0)
{
if (Adaptive)
{
// no need to request rerun if (a) it's not the first run, (b) band_width == CUDAPOA_MAX_ADAPTIVE_BAND_WIDTH already
if (rerun == 0 && band_width < CUDAPOA_MAX_ADAPTIVE_BAND_WIDTH)
{
// check if traceback gets too close or hits the band limits, if so stop and rerun with extended band-width
// threshold for proximity to band limits works better if defined proportionate to the sequence length
int32_t threshold = max(1, max_column / 1024); // ad-hoc rule 7
if (j > threshold && j < max_column - threshold)
{
int32_t band_start = get_band_start_for_row(i, gradient, band_width, band_shift, max_column);
if (j <= band_start + threshold) // ad-hoc rule 8-a, too close to left bound
{
aligned_nodes = CUDAPOA_SHIFT_ADAPTIVE_BAND_TO_LEFT;
break;
}
if (j >= (band_start + band_width - threshold)) // ad-hoc rule 8-b, too close to right bound
{
aligned_nodes = CUDAPOA_SHIFT_ADAPTIVE_BAND_TO_RIGHT;
break;
}
}
}
}
int32_t node_id = next_node_id;
int32_t match_cost = (nodes[node_id] == read[j - 1] ? match_score : mismatch_score);
uint16_t pred_count = incoming_edge_count[node_id];
int32_t pred_i = (pred_count == 0 ? 0 : (node_id_to_pos[incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES]] + 1));
if (scores_ij == (get_score(scores, pred_i, j - 1, band_width, band_shift, gradient, max_column, min_score_value) + match_cost))
{
prev_i = pred_i;
prev_j = j - 1;
pred_found = true;
}
if (!pred_found)
{
for (int32_t p = 1; p < pred_count; p++)
{
pred_i = (node_id_to_pos[incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES + p]] + 1);
if (scores_ij == (get_score(scores, pred_i, j - 1, band_width, band_shift, gradient, max_column, min_score_value) + match_cost))
{
prev_i = pred_i;
prev_j = j - 1;
pred_found = true;
break;
}
}
}
}
// Check if move is vertical.
if (!pred_found && i != 0)
{
int32_t node_id = graph[i - 1];
uint16_t pred_count = incoming_edge_count[node_id];
int32_t pred_i = (pred_count == 0 ? 0 : node_id_to_pos[incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES]] + 1);
if (scores_ij == get_score(scores, pred_i, j, band_width, band_shift, gradient, max_column, min_score_value) + gap_score)
{
prev_i = pred_i;
prev_j = j;
pred_found = true;
}
if (!pred_found)
{
for (int32_t p = 1; p < pred_count; p++)
{
pred_i = node_id_to_pos[incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES + p]] + 1;
if (scores_ij == get_score(scores, pred_i, j, band_width, band_shift, gradient, max_column, min_score_value) + gap_score)
{
prev_i = pred_i;
prev_j = j;
pred_found = true;
break;
}
}
}
}
// Check if move is horizontal.
if (!pred_found && scores_ij == get_score(scores, i, j - 1, band_width, band_shift, gradient, max_column, min_score_value) + gap_score)
{
prev_i = i;
prev_j = j - 1;
pred_found = true;
}
next_node_id = graph[prev_i - 1];
alignment_graph[aligned_nodes] = (i == prev_i ? -1 : graph[i - 1]);
alignment_read[aligned_nodes] = (j == prev_j ? -1 : j - 1);
aligned_nodes++;
i = prev_i;
j = prev_j;
}
if (loop_count >= (read_length + graph_count + 2))
{
aligned_nodes = CUDAPOA_KERNEL_NW_BACKTRACKING_LOOP_FAILED;
}
#ifdef NW_VERBOSE_PRINT
printf("aligned nodes %d, loop count %d\n", aligned_nodes, loop_count);
#endif
}
aligned_nodes = __shfl_sync(FULL_MASK, aligned_nodes, 0);
return aligned_nodes;
}
// global kernel used in testing, hence uses int16_t for SizeT and ScoreT,
// may need to change if test inputs change to long reads
template <typename SizeT, bool Adaptive>
__global__ void runNeedlemanWunschBandedKernel(uint8_t* nodes,
SizeT* graph,
SizeT* node_id_to_pos,
int32_t graph_count,
uint16_t* incoming_edge_count,
SizeT* incoming_edges,
uint16_t* outgoing_edge_count,
uint8_t* read,
int32_t read_length,
int16_t* scores,
int32_t scores_width,
int32_t max_nodes_per_graph,
SizeT* alignment_graph,
SizeT* alignment_read,
int32_t band_width,
int32_t gap_score,
int32_t mismatch_score,
int32_t match_score,
SizeT* aligned_nodes)
{
static_assert(std::is_same<SizeT, int16_t>::value, "This function only accepts int16_t as SizeT.");
float banded_buffer_size = static_cast<float>(max_nodes_per_graph) * static_cast<float>(scores_width);
*aligned_nodes = needlemanWunschBanded<uint8_t, int16_t, int16_t, Adaptive>(nodes,
graph,
node_id_to_pos,
graph_count,
incoming_edge_count,
incoming_edges,
outgoing_edge_count,
read,
read_length,
scores,
banded_buffer_size,
alignment_graph,
alignment_read,
band_width,
gap_score,
mismatch_score,
match_score,
0);
}
// Host function that calls the kernel
template <typename SizeT>
void runNWbanded(uint8_t* nodes,
SizeT* graph,
SizeT* node_id_to_pos,
int32_t graph_count,
uint16_t* incoming_edge_count,
SizeT* incoming_edges,
uint16_t* outgoing_edge_count,
uint8_t* read,
int32_t read_length,
int16_t* scores,
int32_t scores_width,
int32_t max_nodes_per_graph,
SizeT* alignment_graph,
SizeT* alignment_read,
int32_t band_width,
int32_t gap_score,
int32_t mismatch_score,
int32_t match_score,
SizeT* aligned_nodes,
bool adaptive)
{
if (adaptive)
{
runNeedlemanWunschBandedKernel<SizeT, true><<<1, CUDAPOA_BANDED_THREADS_PER_BLOCK>>>(nodes,
graph,
node_id_to_pos,
graph_count,
incoming_edge_count,
incoming_edges,
outgoing_edge_count,
read,
read_length,
scores,
scores_width,
max_nodes_per_graph,
alignment_graph,
alignment_read,
band_width,
gap_score,
mismatch_score,
match_score,
aligned_nodes);
}
else
{
runNeedlemanWunschBandedKernel<SizeT, false><<<1, CUDAPOA_BANDED_THREADS_PER_BLOCK>>>(nodes,
graph,
node_id_to_pos,
graph_count,
incoming_edge_count,
incoming_edges,
outgoing_edge_count,
read,
read_length,
scores,
scores_width,
max_nodes_per_graph,
alignment_graph,
alignment_read,
band_width,
gap_score,
mismatch_score,
match_score,
aligned_nodes);
}
GW_CU_CHECK_ERR(cudaPeekAtLastError());
}
} // namespace cudapoa
} // namespace genomeworks
} // namespace claraparabricks
|
the_stack
|
#include <algorithm>
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <sys/stat.h>
namespace pqt {
PerturbationProTree::PerturbationProTree(uint _dim, uint _p, uint _p2) :
ProTree(_dim, _p, _p2), d_multiCodeBook(NULL), d_multiCodeBook2(NULL), d_codeBookDistL2(
NULL), d_codeBookDistL1L2(NULL), d_lineIdx(NULL), d_lineLambda(
NULL), d_l2Idx(NULL), d_lineParts(0) {
int y;
for (y = 0; y < 64; y++)
if (!((1 - 1) >> y))
break;
d_dimBits = y - 1;
d_dimBits = 7 - 1;
std::cout << "dimBits " << d_dimBits << std::endl;
d_nDBs = 1;
}
PerturbationProTree::~PerturbationProTree() {
if (d_lineIdx)
cudaFree(d_lineIdx);
if (d_lineLambda)
cudaFree(d_lineLambda);
if (d_l2Idx)
cudaFree(d_l2Idx);
if (d_codeBookDistL1L2)
cudaFree(d_codeBookDistL1L2);
if (d_codeBookDistL2)
cudaFree(d_codeBookDistL2);
if (d_multiCodeBook)
cudaFree(d_multiCodeBook);
if (d_multiCodeBook2)
cudaFree(d_multiCodeBook2);
}
void PerturbationProTree::writeTreeToFile(const std::string& _name) {
ofstream f(_name.c_str(), std::ofstream::out | std::ofstream::binary);
f << d_dim << std::endl;
f << d_p << std::endl;
f << d_p2 << std::endl;
f << d_nClusters << std::endl;
f << d_nClusters2 << std::endl;
f << d_nDBs << std::endl;
float * cb1Host = new float[d_nDBs * d_nClusters * d_dim];
float * cb2Host = new float[d_nDBs * d_nClusters * d_nClusters2 * d_dim];
cudaMemcpy(cb1Host, d_multiCodeBook,
d_nDBs * d_nClusters * d_dim * sizeof(float),
cudaMemcpyDeviceToHost);
cudaMemcpy(cb2Host, d_multiCodeBook2,
d_nDBs * d_nClusters * d_nClusters2 * d_dim * sizeof(float),
cudaMemcpyDeviceToHost);
char* cc = (char*) cb1Host;
for (int i = 0; i < 10; i++) {
std::cout << int(cc[i]) << " ";
}
std::cout << std::endl;
std::cout << "cb1[12]: " << cb1Host[12] << std::endl;
std::cout << "cb2[12]: " << cb2Host[12] << std::endl;
f.write((char*) cb1Host, d_nDBs * d_nClusters * d_dim * sizeof(float));
f.write((char*) cb2Host,
d_nDBs * d_nClusters * d_nClusters2 * d_dim * sizeof(float));
f.close();
delete[] cb2Host;
delete[] cb1Host;
if (d_sparseBin) {
uint nBins = pow(d_nClusters, d_p);
ofstream fs((_name + string("_sparse")).c_str(),
std::ofstream::out | std::ofstream::binary);
uint* sparseHost = new uint[nBins];
cudaMemcpy(sparseHost, d_sparseBin, nBins * sizeof(uint),
cudaMemcpyDeviceToHost);
fs.write((char*) sparseHost, nBins * sizeof(uint));
fs.close();
delete[] sparseHost;
}
}
void PerturbationProTree::readTreeFromFile(const std::string& _name) {
ifstream f(_name.c_str(), std::ofstream::in | std::ofstream::binary);
f >> d_dim;
f >> d_p;
f >> d_p2;
f >> d_nClusters;
f >> d_nClusters2;
f >> d_nDBs;
f.ignore(1);
std::cout << d_dim << std::endl;
std::cout << d_p << std::endl;
std::cout << d_p2 << std::endl;
std::cout << d_nClusters << std::endl;
std::cout << d_nClusters2 << std::endl;
std::cout << d_nDBs << std::endl;
d_vl = d_dim / d_p;
d_vl2 = d_dim / d_p2;
if (d_multiCodeBook)
cudaFree(d_multiCodeBook);
if (d_multiCodeBook2)
cudaFree(d_multiCodeBook2);
if (d_distSeq)
cudaFree(d_distSeq);
float * cb1Host = new float[d_nDBs * d_nClusters * d_dim];
float * cb2Host = new float[d_nDBs * d_nClusters * d_nClusters2 * d_dim];
cudaMalloc(&d_multiCodeBook, d_nDBs * d_nClusters * d_dim * sizeof(float));
cudaMalloc(&d_multiCodeBook2,
d_nDBs * d_nClusters * d_nClusters2 * d_dim * sizeof(float));
f.read((char*) cb1Host, d_nDBs * d_nClusters * d_dim * sizeof(float));
f.read((char*) cb2Host,
d_nDBs * d_nClusters * d_nClusters2 * d_dim * sizeof(float));
char* cc = (char*) cb1Host;
for (int i = 0; i < 10; i++) {
std::cout << int(cc[i]) << " ";
}
std::cout << std::endl;
std::cout << "cb perturbation 0:" << std::endl;
for (int i = 0; i < 100; i++)
std::cout << "\t" << cb1Host[i];
std::cout << std::endl;
std::cout << std::endl;
std::cout << "cb perturbation 1: " << std::endl;
for (int i = 0; i < 100; i++)
std::cout << "\t" << cb1Host[i + d_nClusters * d_dim];
std::cout << std::endl;
std::cout << std::endl;
std::cout << "cb1[12]: " << cb1Host[12] << std::endl;
std::cout << "cb2[12]: " << cb2Host[12] << std::endl;
cudaMemcpy(d_multiCodeBook, cb1Host,
d_nDBs * d_nClusters * d_dim * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(d_multiCodeBook2, cb2Host,
d_nDBs * d_nClusters * d_nClusters2 * d_dim * sizeof(float),
cudaMemcpyHostToDevice);
checkCudaErrors(cudaDeviceSynchronize());
f.close();
prepareDistSequence(d_nClusters2 * NUM_NEIGHBORS, d_groupParts);
delete[] cb2Host;
delete[] cb1Host;
string sparseName(_name + string("_sparse"));
struct stat buffer;
if (stat(sparseName.c_str(), &buffer) == 0) {
uint nBins = pow(d_nClusters, d_p);
ifstream fs((_name + string("_sparse")).c_str(),
std::ifstream::in | std::ifstream::binary);
uint* sparseHost = new uint[nBins];
fs.read((char*) sparseHost, nBins * sizeof(uint));
cudaMalloc(&d_sparseBin, nBins * sizeof(uint));
cudaMemcpy(d_sparseBin, sparseHost, nBins * sizeof(uint),
cudaMemcpyHostToDevice);
fs.close();
delete[] sparseHost;
}
}
__device__ uint pertIdx(uint _i, uint _dimBits, uint _cb) {
//!! TODO
// _cb = 1;
if (_cb == 0)
return _i;
// if (_i ==0) printf("bits: %d \n", _dimBits);
_cb -= 1;
uint maxBit = _i >> _dimBits;
uint mask = (1 << _dimBits) - 1;
uint remain = _i & mask;
mask = (1 << _cb) - 1;
return (maxBit << _cb) + ((remain >> _cb) << (_cb + 1)) + (remain & mask);
}
__global__ void perturbationKernel(float* _pertA, const float* _A, uint _N,
uint _dimBits, uint _pert) {
extern __shared__ float shm[];
for (int iter = blockIdx.x; iter < _N; iter += gridDim.x) {
__syncthreads();
shm[threadIdx.x] = _A[iter * blockDim.x + threadIdx.x];
uint pIdx = pertIdx(threadIdx.x, _dimBits, _pert);
__syncthreads();
_pertA[iter * blockDim.x + threadIdx.x] = shm[pIdx];
}
}
void PerturbationProTree::perturbVectors(float* _pertA, const float* _A,
uint _N, uint _pert) {
dim3 block(d_dim, 1, 1);
dim3 grid((_N < 1024) ? _N : 1024, 1, 1);
uint shm = (d_dim) * sizeof(float);
perturbationKernel<<<grid, block, shm>>>(_pertA, _A, _N, d_dimBits, _pert);
checkCudaErrors(cudaDeviceSynchronize());
}
void PerturbationProTree::createTree(uint _nClusters1, uint _nClusters2,
const float* _A, uint _N) {
if (!d_multiCodeBook)
cudaMalloc(&d_multiCodeBook,
d_nDBs * d_p * _nClusters1 * d_vl * sizeof(float));
if (!d_multiCodeBook2)
cudaMalloc(&d_multiCodeBook2,
d_nDBs * d_p * _nClusters1 * _nClusters2 * d_vl
* sizeof(float));
float* pertA;
cudaMalloc(&pertA, _N * d_dim * sizeof(float));
ProTree::createTree(_nClusters1, _nClusters2, pertA, _N);
cudaMemcpy(d_multiCodeBook , d_codeBook, _nClusters1 * d_dim * sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_multiCodeBook2 , d_codeBook2, _nClusters1 * _nClusters2 * d_dim * sizeof(float), cudaMemcpyDeviceToDevice);
checkCudaErrors(cudaDeviceSynchronize());
cudaFree(pertA);
cudaFree(d_codeBook);
cudaFree(d_codeBook2);
}
void PerturbationProTree::createTreeSplitSparse(uint _nClusters1,
uint _nClusters2, const float* _A, uint _N, bool _sparse) {
if (!d_multiCodeBook)
cudaMalloc(&d_multiCodeBook,
d_nDBs * d_p * _nClusters1 * d_vl * sizeof(float));
if (!d_multiCodeBook2)
cudaMalloc(&d_multiCodeBook2,
d_nDBs * d_p * _nClusters1 * _nClusters2 * d_vl
* sizeof(float));
float* pertA;
cudaMalloc(&pertA, _N * d_dim * sizeof(float));
for (int pert = 0; pert < d_nDBs; pert++) {
perturbVectors(pertA, _A, _N, pert);
// ProTree::createTree(_nClusters1, _nClusters2, pertA, _N);
ProTree::createTreeSplitSparse(_nClusters1, _nClusters2, pertA, _N, 0.3,
_sparse);
cudaMemcpy(d_multiCodeBook + d_dim * _nClusters1 * pert, d_codeBook,
_nClusters1 * d_dim * sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_multiCodeBook2 + d_dim * _nClusters1 * _nClusters2 * pert,
d_codeBook2, _nClusters1 * _nClusters2 * d_dim * sizeof(float),
cudaMemcpyDeviceToDevice);
}
checkCudaErrors(cudaDeviceSynchronize());
cudaFree(pertA);
cudaFree(d_codeBook);
cudaFree(d_codeBook2);
}
// each block is responsible for one vector, blockDim.x should be _dim
// requires _dim * float shared memory
// looping multiple times to process all B vectors
// _vl is the length of the _p vector segments (should be 2^n)
__global__ void assignPerturbationClusterKernel(uint *_assign, const float* _A,
const float* _B, uint _Arows, uint _Brows, uint _dim, uint _p, uint _vl,
uint _dimBits) {
extern __shared__ float shmb[];
float *shm = shmb + _dim;
float minVal;
uint minIdx;
// if ((blockIdx.x == 0) && (threadIdx.x == 0)) {
// for (int i = 0; i < _dim; i++)
// printf("%d: %d %d %d \n", i, pertIdx(i, _dimBits, 0),
// pertIdx(i, _dimBits, 1), pertIdx(i, _dimBits, 2)) ;
// }
//
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
__syncthreads();
// load the vector to shared mem
shmb[threadIdx.x] = _B[iter * _dim + threadIdx.x];
__syncthreads();
for (uint pert = 0; pert < 1; pert++) {
if (threadIdx.x < _p) {
minVal = 10000000.;
minIdx = 0;
}
uint pIdx = pertIdx(threadIdx.x, _dimBits, pert);
// load perturbed vector
float b = shmb[pIdx];
const float* A = _A + pert * _Arows * _dim;
// loop over all vectors of A
for (int a = 0; a < _Arows; a++) {
shm[threadIdx.x] = sqr(b - A[a * _dim + threadIdx.x]);
// compute the sum of differences for the vector segments
// i.e. all _p segments in parallel
for (uint stride = _vl >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if ((threadIdx.x) < _p * stride) {
uint p = threadIdx.x / stride * _vl;
uint bias = threadIdx.x % stride;
shm[p + bias] += shm[p + bias + stride];
}
}
__syncthreads();
if (threadIdx.x < _p) {
// select the minimum of each segment
float val = shm[threadIdx.x * _vl];
if (val < minVal) {
minVal = val;
minIdx = a;
}
}
__syncthreads();
}
// write out decision
if (threadIdx.x < _p) {
_assign[(iter * 1 + pert) * _p + threadIdx.x] =
minIdx;
}
} // perturbation
} // iter
}
void PerturbationProTree::getAssignment(uint *_assign, const float* _A,
const float* _B, uint _Arows, uint _Brows) const {
dim3 block(d_dim, 1, 1);
dim3 grid((_Brows < 1024) ? _Brows : 1024, 1, 1);
uint shm = 2 * d_dim * sizeof(float);
assignPerturbationClusterKernel<<<grid, block, shm>>>(_assign, _A, _B,
_Arows, _Brows, d_dim, d_p, d_vl, d_dimBits);
checkCudaErrors(cudaDeviceSynchronize());
}
// each block is responsible for one vector, blockDim.x should be _dim
// requires _dim * float shared memory
// looping multiple times to process all B vectors
// _vl is the length of the _p vector segments (should be 2^n)
__global__ void assignPerturbationClusterKernel2(uint *_assign,
const float* _cb2, const float* _B, uint _nClusters2, uint _Brows,
uint _dim, uint _p, uint _vl, const uint* _assign1, uint _nClusters1,
uint _dimBits) {
extern __shared__ float shmb[];
float* shm = shmb + _dim;
uint* code1 = (uint*) shm + _dim;
float minVal;
uint minIdx;
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
__syncthreads();
// load the vector to shared mem
shmb[threadIdx.x] = _B[iter * _dim + threadIdx.x];
for (uint pert = 0; pert < 1; pert++) {
__syncthreads();
if (threadIdx.x < _p) {
minVal = 10000000.;
minIdx = 0;
code1[threadIdx.x] = _assign1[(iter * 1 + pert)
* _p + threadIdx.x];
}
__syncthreads();
uint pIdx = pertIdx(threadIdx.x, _dimBits, pert);
// load perturbed vector
float b = shmb[pIdx];
const float* A = _cb2 + pert * _nClusters1 * _nClusters2 * _dim;
// each segment needs a different codebook
uint p = threadIdx.x / _vl;
const float* cb = A
+ getCBIdx(p, code1[p], _nClusters1, _vl, _nClusters2)
+ (threadIdx.x % _vl);
// loop over all vectors of A
for (int a = 0; a < _nClusters2; a++) {
float s = sqr(b - cb[a * _vl]);
shm[threadIdx.x] = s; // sqr(b - cb[a * _vl]);
// compute the sum of differences for the vector segments
// i.e. all _p segments in parallel
for (uint stride = _vl >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if ((threadIdx.x) < _p * stride) {
uint p = threadIdx.x / stride * _vl;
uint bias = threadIdx.x % stride;
shm[p + bias] += shm[p + bias + stride];
}
}
__syncthreads();
if (threadIdx.x < _p) {
// select the minimum of each segment
float val = shm[threadIdx.x * _vl];
if (val < minVal) {
minVal = val;
minIdx = a;
}
}
__syncthreads();
}
// write out decision
if (threadIdx.x < _p) {
_assign[(iter * 1 + pert) * _p + threadIdx.x] =
minIdx;
}
} // perturbation
} // iter
}
void PerturbationProTree::getAssignment2(uint *_assign2, const float* _A,
const float* _B, uint _Arows, uint _Brows, const uint *_assign1,
uint _nClusters1) const {
dim3 block(d_dim, 1, 1);
dim3 grid((_Brows < 1024) ? _Brows : 1024, 1, 1);
uint shm = (2 * d_dim + d_p) * sizeof(float);
assignPerturbationClusterKernel2<<<grid, block, shm>>>(_assign2, _A, _B,
_Arows, _Brows, d_dim, d_p, d_vl, _assign1, _nClusters1, d_dimBits);
checkCudaErrors(cudaDeviceSynchronize());
}
__device__ void calcIdx(volatile uint* _shm, const uint* _assign,
const uint* _assign2, uint _p, uint _nClusters, uint _nClusters2,
uint _iter, uint _pert, uint _nBins) {
// load assignment vector into shm;
if (threadIdx.x < _p) {
uint offs = (_iter * 1 + _pert) * _p + threadIdx.x;
_shm[threadIdx.x] = _assign[offs] * _nClusters2 + _assign2[offs];
}
// assume implicit synchronization as num threads is smaller than
if (threadIdx.x == 0) {
for (int p = 1; p < _p; p++)
_shm[0] = _shm[0] * _nClusters * _nClusters2 + _shm[p];
_shm[0] += _nBins * _pert;
#if USE_HASH
_shm[0] = _shm[0] % HASH_SIZE;
#endif
}
__syncthreads();
}
__global__ void countBinsKernel(uint* _bins, const uint* _assign,
uint* _assign2, uint _N, uint _p, uint _nClusters, uint _nClusters2,
uint _nBins) {
extern __shared__ float shmf[];
uint* shm = (uint*) shmf;
for (int iter = blockIdx.x; iter < _N; iter += gridDim.x) {
__syncthreads();
for (int pert = 0; pert < 1; pert++) {
calcIdx(shm, _assign, _assign2, _p, _nClusters, _nClusters2, iter,
pert, _nBins);
if (threadIdx.x == 0) {
atomicInc(_bins + shm[0], _N);
if (iter == 0) {
printf("bin: %d %d \n", pert, shm[0]);
}
}
}
}
}
void PerturbationProTree::countBins(uint* _bins, const uint* _assign,
uint* _assign2, uint _N) {
dim3 block(d_p + d_p, 1, 1);
dim3 grid((_N < 1024) ? _N : 1024, 1, 1);
uint shmsize = (d_p + d_p) * sizeof(uint);
if (!_bins) {
std::cout << "did not get binCount array " << std::endl;
exit(1);
}
#if USE_HASH
cudaMemset(_bins, 0, HASH_SIZE * sizeof(uint));
#else
cudaMemset(_bins, 0, d_nDBs * d_nBins * sizeof(uint));
#endif
countBinsKernel<<<grid, block, shmsize>>>(_bins, _assign, _assign2, _N, d_p,
d_nClusters, d_nClusters2, d_nBins);
checkCudaErrors(cudaDeviceSynchronize());
}
__global__ void countBinsKernel(uint* _bins, const int* _assignedBins, uint _N) {
for (int iter = blockIdx.x * blockDim.x + threadIdx.x;
iter < _N * 1; iter += gridDim.x * blockDim.x) {
if (_assignedBins[iter] >= 0)
atomicInc(_bins + _assignedBins[iter], _N);
}
}
void PerturbationProTree::countBins(uint* _bins, const int* _assignedBins,
uint _N) {
uint n = _N * d_nDBs;
uint nThreads = (n < 256) ? n : 256;
uint nBlocks = idiv(n, nThreads);
nBlocks = (nBlocks < 65000) ? nBlocks : 65000;
dim3 block(nThreads, 1, 1);
dim3 grid(nBlocks, 1, 1);
if (!_bins) {
std::cout << "did not get binCount array " << std::endl;
exit(1);
}
#if USE_HASH
cudaMemset(_bins, 0, HASH_SIZE * sizeof(uint));
#else
cudaMemset(_bins, 0, d_nDBs * d_nBins * sizeof(uint));
#endif
countBinsKernel<<<grid, block>>>(_bins, _assignedBins, _N);
checkCudaErrors(cudaDeviceSynchronize());
}
__global__ void sortIdxKernel(uint* _dbIdx, uint* _binCount,
const uint* _prefix, const uint* _assign, const uint* _assign2, uint _N,
uint _p, uint _nClusters, uint _nClusters2,
uint _nBins) {
extern __shared__ float shmf[];
uint* shm = (uint*) shmf;
for (int iter = blockIdx.x; iter < _N; iter += gridDim.x) {
for (int pert = 0; pert < 1; pert++) {
calcIdx(shm, _assign, _assign2, _p, _nClusters, _nClusters2, iter,
pert, _nBins);
if (threadIdx.x == 0) {
uint pos = atomicInc(_binCount + shm[0], _N);
if ((_prefix[shm[0]] + pos) > 1 * _N) {
printf("out of range!: %d, %d, %d \n", _prefix[shm[0]], pos,
shm[0]);
}
_dbIdx[_prefix[shm[0]] + pos] = iter;
}
}
}
}
void PerturbationProTree::sortIdx(uint* _dbIdx, const uint* _assign,
const uint* _assign2, uint _N) {
dim3 block(d_p + d_p, 1, 1);
dim3 grid((_N < 1024) ? _N : 1024, 1, 1);
uint shmsize = (d_p + d_p) * sizeof(uint);
#if USE_HASH
cudaMemset(d_binCounts, 0, HASH_SIZE * sizeof(uint));
#else
cudaMemset(d_binCounts, 0, d_nDBs * d_nBins * sizeof(uint));
#endif
sortIdxKernel<<<grid, block, shmsize>>>(_dbIdx, d_binCounts, d_binPrefix,
_assign, _assign2, _N, d_p, d_nClusters, d_nClusters2,
d_nBins);
checkCudaErrors(cudaDeviceSynchronize());
}
__global__ void sortIdxKernel(uint* _dbIdx, uint* _binCount,
const uint* _prefix, const int* _assignedBins, uint _N) {
for (int iter = blockIdx.x * blockDim.x + threadIdx.x;
iter < _N * 1; iter += gridDim.x * blockDim.x) {
if (_assignedBins[iter] >= 0) {
uint pos = atomicInc(_binCount + _assignedBins[iter], _N);
_dbIdx[_prefix[_assignedBins[iter]] + pos] = iter / 1;
}
}
}
void PerturbationProTree::sortIdx(uint* _dbIdx, const int* _assignedBins,
uint _N) {
uint n = _N * d_nDBs;
uint nThreads = (n < 1024) ? n : 1024;
uint nBlocks = idiv(n, nThreads);
nBlocks = (nBlocks < 65000) ? nBlocks : 65000;
dim3 block(nThreads, 1, 1);
dim3 grid(nBlocks, 1, 1);
#if USE_HASH
cudaMemset(d_binCounts, 0, HASH_SIZE * sizeof(uint));
#else
cudaMemset(d_binCounts, 0, d_nDBs * d_nBins * sizeof(uint));
#endif
sortIdxKernel<<<grid, block>>>(_dbIdx, d_binCounts, d_binPrefix,
_assignedBins, _N);
checkCudaErrors(cudaDeviceSynchronize());
}
void PerturbationProTree::buildDB(const float* _A, uint _N) {
uint* assignd;
uint* assignd2;
cudaMalloc(&assignd, _N * d_p * d_nDBs * sizeof(uint));
cudaMalloc(&assignd2, _N * d_p * d_nDBs * sizeof(uint));
if ((assignd == NULL) || (assignd2 == NULL)) {
std::cout << "buildDB did not get memory!" << std::endl;
exit(1);
}
getAssignment(assignd, d_multiCodeBook, _A, d_nClusters, _N);
// outputVecUint("assign", assignd, 256);
getAssignment2(assignd2, d_multiCodeBook2, _A, d_nClusters2, _N, assignd,
d_nClusters);
// outputVecUint("assignd2", assignd2, 200);
std::cout << "clusters: " << d_nClusters << " " << d_nClusters2 << std::endl;
std::cout << "number of data bases " << d_nDBs << std::endl;
d_nBins = pow(d_nClusters, d_p) * pow(d_nClusters2, d_p);
std::cout << "number of bins: " << d_nBins << std::endl;
#if USE_HASH
cudaMalloc(&d_binPrefix, HASH_SIZE * sizeof(uint));
cudaMalloc(&d_binCounts, HASH_SIZE * sizeof(uint));
cudaMalloc(&d_dbIdx, d_nDBs * _N * sizeof(uint));
countBins(d_binCounts, assignd, assignd2, _N);
histogram(HASH_SIZE);
cudaMemset(d_binPrefix, 0, HASH_SIZE * sizeof(uint));
scan(d_binPrefix, d_binCounts, HASH_SIZE, false);
#else
cudaMalloc(&d_binPrefix, d_nDBs * d_nBins * sizeof(uint));
cudaMalloc(&d_binCounts, d_nDBs * d_nBins * sizeof(uint));
cudaMalloc(&d_dbIdx, d_nDBs * _N * sizeof(uint));
countBins(d_binCounts, assignd, assignd2, _N);
histogram(d_nBins);
// outputVecUint("binCounts1: ", d_binCounts + 11000, 1000);
// outputVecUint("binCounts2: ", d_binCounts + 11000 + 16777216, 1000);
cudaMemset(d_binPrefix, 0, d_nDBs * d_nBins * sizeof(uint));
scan(d_binPrefix, d_binCounts, d_nDBs * d_nBins, false);
#endif
cudaMemset(d_dbIdx, 0, d_nDBs * _N * sizeof(uint));
sortIdx(d_dbIdx, assignd, assignd2, _N);
// store references to original vectors
d_dbVec = _A;
d_NdbVec = _N; // store number o orginal vectors
cudaFree(assignd2);
cudaFree(assignd);
std::cout << "done with buildDB " << std::endl;
}
__global__ void assignPerturbationBestBinKernel2(int *_assignBin,
const float* _cb2, const float* _B, uint _nClusters2, uint _Brows,
uint _dim, uint _p, uint _vl, const uint* _assign1, uint _nClusters1,
uint _k1, uint _NP2, uint _c1scale, uint _dimBits, uint _nBins,
uint* _binIdx = NULL, float* _binDist = NULL) {
extern __shared__ float shmb[];
float* shmIter = shmb + _dim;
float* shm = shmIter;
shmIter += blockDim.x;
uint* binL1 = (uint*) shmIter;
shmIter += _p;
float* val = shmIter;
shmIter += _p;
uint* idx = (uint*) shmIter;
shmIter += _p;
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
__syncthreads();
// load the vector to shared mem
if (threadIdx.x < _dim)
shmb[threadIdx.x] = _B[iter * _dim + threadIdx.x];
for (uint pert = 0; pert < 1; pert++) {
__syncthreads();
uint pIdx = pertIdx(threadIdx.x, _dimBits, pert);
// load perturbed vector
float b = shmb[pIdx];
// loop over the best k1 first-level bins
for (int k = 0; k < _k1; k++) {
if (threadIdx.x < _p) {
binL1[threadIdx.x] =
_assign1[(iter * 1 + pert) * _k1 * _p
+ k * _p + threadIdx.x];
}
__syncthreads();
const float* A = _cb2 + pert * _nClusters1 * _nClusters2 * _dim;
// each segment needs a different codebook
const float* cb;
if (threadIdx.x < _dim) {
uint p = threadIdx.x / _vl;
cb = A
+ getCBIdx(p, binL1[p], _nClusters1, _vl,
_nClusters2) + (threadIdx.x % _vl);
}
// loop over all vectors of A
for (int binL2 = 0; binL2 < _nClusters2; binL2++) {
if (threadIdx.x < _dim)
shm[threadIdx.x] = sqr(b - cb[binL2 * _vl]);
// compute the sum of differences for the vector segments
// i.e. all _p segments in parallel
for (uint stride = _vl >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if ((threadIdx.x) < _p * stride) {
uint p = threadIdx.x / stride * _vl;
uint bias = threadIdx.x % stride;
shm[p + bias] += shm[p + bias + stride];
}
}
__syncthreads();
// store the best result
if (threadIdx.x < _p) {
if ((val[threadIdx.x] > shm[threadIdx.x * _vl])
|| ((k + binL2) == 0)) {
val[threadIdx.x] = shm[threadIdx.x * _vl];
idx[threadIdx.x] = binL2
+ binL1[threadIdx.x] * _c1scale;
}
}
__syncthreads();
}
}
if (_binDist) {
if ((pert == 0) && (threadIdx.x < _p)) {
_binIdx[iter * _p + threadIdx.x] = idx[threadIdx.x];
_binDist[iter * _p + threadIdx.x] = val[threadIdx.x];
}
}
__syncthreads();
// write out the compact best bin index
if (threadIdx.x == 0) {
for (int p = 1; p < _p; p++)
idx[0] = idx[0] * _nClusters1 * _nClusters2 + idx[p];
idx[0] += _nBins * pert;
#if USE_HASH
idx[0] = idx[0] % HASH_SIZE;
#endif
_assignBin[(iter * 1 + pert)] = idx[0];
}
}
}
}
__global__ void assignPerturbationBestBinKernel2Sparse(int *_assignBin,
const float* _cb2, const float* _B, uint _nClusters2, uint _Brows,
uint _dim, uint _p, uint _vl, const uint* _assign1, uint _nClusters1,
uint _k1, uint _NP2, uint _c1scale, uint _dimBits, uint _nBins,
const uint *_sparseBin, bool _sparse,
uint* _binIdx = NULL, float* _binDist = NULL) {
extern __shared__ float shmb[];
float* shmIter = shmb + _dim;
float* shm = shmIter;
shmIter += blockDim.x;
uint* binL1 = (uint*) shmIter;
shmIter += _p;
float* val = shmIter;
shmIter += _p;
uint* idx = (uint*) shmIter;
shmIter += _p;
bool &process = *(bool*) shmIter;
shmIter += 1;
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
__syncthreads();
// check if this vector is actually in a sparse / dense region
if (threadIdx.x < _p) {
binL1[threadIdx.x] = _assign1[(iter * 1) * _k1 * _p
+ threadIdx.x];
}
if (threadIdx.x == 0) {
uint binIdx;
binIdx = binL1[0];
binIdx = binL1[1] + binIdx * _nClusters1;
binIdx = binL1[2] + binIdx * _nClusters1;
binIdx = binL1[3] + binIdx * _nClusters1;
process = true;
if (_sparseBin[binIdx] != _sparse) {
for (uint pert = 0; pert < 1; pert++) {
_assignBin[(iter * 1 + pert)] = -1;
}
process = false;
}
}
__syncthreads();
if (!process)
continue;
// load the vector to shared mem
if (threadIdx.x < _dim)
shmb[threadIdx.x] = _B[iter * _dim + threadIdx.x];
for (uint pert = 0; pert < 1; pert++) {
__syncthreads();
uint pIdx = pertIdx(threadIdx.x, _dimBits, pert);
// load perturbed vector
float b = shmb[pIdx];
// loop over the best k1 first-level bins
for (int k = 0; k < _k1; k++) {
if (threadIdx.x < _p) {
binL1[threadIdx.x] =
_assign1[(iter * 1 + pert) * _k1 * _p
+ k * _p + threadIdx.x];
}
__syncthreads();
const float* A = _cb2 + pert * _nClusters1 * _nClusters2 * _dim;
// each segment needs a different codebook
const float* cb;
if (threadIdx.x < _dim) {
uint p = threadIdx.x / _vl;
cb = A
+ getCBIdx(p, binL1[p], _nClusters1, _vl,
_nClusters2) + (threadIdx.x % _vl);
}
// loop over all vectors of A
for (int binL2 = 0; binL2 < _nClusters2; binL2++) {
if (threadIdx.x < _dim)
shm[threadIdx.x] = sqr(b - cb[binL2 * _vl]);
// compute the sum of differences for the vector segments
// i.e. all _p segments in parallel
for (uint stride = _vl >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if ((threadIdx.x) < _p * stride) {
uint p = threadIdx.x / stride * _vl;
uint bias = threadIdx.x % stride;
shm[p + bias] += shm[p + bias + stride];
}
}
__syncthreads();
// store the best result
if (threadIdx.x < _p) {
if ((val[threadIdx.x] > shm[threadIdx.x * _vl])
|| ((k + binL2) == 0)) {
val[threadIdx.x] = shm[threadIdx.x * _vl];
idx[threadIdx.x] = binL2
+ binL1[threadIdx.x] * _c1scale;
}
}
__syncthreads();
}
}
if (_binDist) {
if ((pert == 0) && (threadIdx.x < _p)) {
_binIdx[iter * _p + threadIdx.x] = idx[threadIdx.x];
_binDist[iter * _p + threadIdx.x] = val[threadIdx.x];
}
}
__syncthreads();
// write out the compact best bin index
if (threadIdx.x == 0) {
for (int p = 1; p < _p; p++)
idx[0] = idx[0] * _nClusters1 * _nClusters2 + idx[p];
idx[0] += _nBins * pert;
#if USE_HASH
idx[0] = idx[0] % HASH_SIZE;
#endif
_assignBin[(iter * 1 + pert)] = idx[0];
}
}
}
}
void PerturbationProTree::getBestBinAssignment2(int *_assignBin,
const float* _cb2, const float* _B, uint _nClusters2, uint _Brows,
const uint *_assign1, uint _k1, uint _nClusters1) const {
uint NP2 = log2(_k1 * _nClusters2);
std::cout << "NP2 " << NP2 << std::endl;
// assert(d_dim >= (2 * NP2));
int nThreads = (d_dim > (2 * NP2)) ? d_dim : (2 * NP2);
dim3 block(nThreads, 1, 1);
dim3 grid((_Brows < 1024) ? _Brows : 1024, 1, 1);
uint shm = (d_dim + nThreads + 3 * d_p) * sizeof(float);
std::cout << "shm" << shm << std::endl;
std::cout << "d_nBins" << d_nBins << std::endl;
uint c1scale = _nClusters2;
assignPerturbationBestBinKernel2<<<grid, block, shm>>>(_assignBin, _cb2, _B,
_nClusters2, _Brows, d_dim, d_p, d_vl, _assign1, _nClusters1, _k1,
NP2, c1scale, d_dimBits, d_nBins);
checkCudaErrors(cudaDeviceSynchronize());
}
void PerturbationProTree::getBestBinAssignment2Sparse(int *_assignBin,
const float* _cb2, const float* _B, uint _nClusters2, uint _Brows,
const uint *_assign1, uint _k1, uint _nClusters1, bool _sparse) const {
uint NP2 = log2(_k1 * _nClusters2);
std::cout << "NP2 " << NP2 << std::endl;
// assert(d_dim >= (2 * NP2));
int nThreads = (d_dim > (2 * NP2)) ? d_dim : (2 * NP2);
dim3 block(nThreads, 1, 1);
dim3 grid((_Brows < 1024) ? _Brows : 1024, 1, 1);
uint shm = (d_dim + nThreads + 3 * d_p + 1) * sizeof(float);
std::cout << "shm" << shm << std::endl;
std::cout << "d_nBins" << d_nBins << std::endl;
uint c1scale = _nClusters2;
assignPerturbationBestBinKernel2Sparse<<<grid, block, shm>>>(_assignBin,
_cb2, _B, _nClusters2, _Brows, d_dim, d_p, d_vl, _assign1,
_nClusters1, _k1, NP2, c1scale, d_dimBits, d_nBins,
d_sparseBin, _sparse);
checkCudaErrors(cudaDeviceSynchronize());
}
void PerturbationProTree::getBestBinLineAssignment2(int *_assignBin,
uint* _l2Idx, float* _l2dist, const float* _cb2, const float* _B,
uint _nClusters2, uint _Brows, const uint *_assign1, uint _k1,
uint _nClusters1) const {
uint NP2 = log2(_k1 * _nClusters2);
std::cout << "NP2 " << NP2 << std::endl;
// assert(d_dim >= (2 * NP2));
int nThreads = (d_dim > (2 * NP2)) ? d_dim : (2 * NP2);
dim3 block(nThreads, 1, 1);
dim3 grid((_Brows < 1024) ? _Brows : 1024, 1, 1);
uint shm = (d_dim + nThreads + 2 * d_p) * sizeof(float);
std::cout << "shm" << shm << std::endl;
std::cout << "d_nBins" << d_nBins << std::endl;
uint c1scale = _nClusters2;
assignPerturbationBestBinKernel2<<<grid, block, shm>>>(_assignBin, _cb2, _B,
_nClusters2, _Brows, d_dim, d_p, d_vl, _assign1, _nClusters1, _k1,
NP2, c1scale, d_dimBits, d_nBins, _l2Idx, _l2dist);
checkCudaErrors(cudaDeviceSynchronize());
}
void PerturbationProTree::setDB(uint _N, const uint* _prefix,
const uint* _counts, const uint* _dbIdx) {
d_nBins = pow(d_nClusters, d_p) * pow(d_nClusters2, d_p);
d_dbVec = NULL;
d_NdbVec = _N;
if (!d_binPrefix) {
cudaMalloc(&d_binPrefix, HASH_SIZE * sizeof(uint));
std::cout << "got binPrefix: " << d_binPrefix << std::endl;
cudaMalloc(&d_binCounts, HASH_SIZE * sizeof(uint));
cudaMalloc(&d_dbIdx, d_nDBs * _N * sizeof(uint));
}
if ((d_binPrefix == NULL) || (d_binCounts == NULL) || (d_dbIdx == NULL)) {
std::cout << "setDB did not get memory!" << std::endl;
std::cout << "sizes: " << (HASH_SIZE * sizeof(uint)) << std::endl;
std::cout << "sizes: " << (_N * d_nDBs * sizeof(uint)) << std::endl;
exit(1);
}
if (HASH_SIZE < 100000000) {
cudaMemcpy(d_binPrefix, _prefix, HASH_SIZE * sizeof(uint),
cudaMemcpyHostToDevice);
cudaMemcpy(d_binCounts, _counts, HASH_SIZE * sizeof(uint),
cudaMemcpyHostToDevice);
} else {
uint chunk = 100000000;
int chunks = HASH_SIZE / chunk;
for (int c = 0; c < chunks; c++) {
cudaMemcpy(d_binPrefix + c * chunk, _prefix + c * chunk,
chunk * sizeof(uint), cudaMemcpyHostToDevice);
cudaMemcpy(d_binCounts + c * chunk, _counts + c * chunk,
chunk * sizeof(uint), cudaMemcpyHostToDevice);
}
}
cudaMemcpy(d_dbIdx, _dbIdx, _N * sizeof(uint), cudaMemcpyHostToDevice);
histogram(HASH_SIZE);
}
void PerturbationProTree::buildKBestDB(const float* _A, uint _N) {
uint* assignd;
int* assignedBins;
uint k1 = 16;
d_nBins = pow(d_nClusters, d_p) * pow(d_nClusters2, d_p);
cudaMalloc(&assignd, k1 * _N * d_p * d_nDBs * sizeof(uint));
cudaMalloc(&assignedBins, _N * d_nDBs * sizeof(uint));
if ((assignd == NULL) || (assignedBins == NULL)) {
std::cout << "buildDB did not get memory!" << std::endl;
std::cout << "sizes: " << (k1 * _N * d_p * d_nDBs * sizeof(uint)) << std::endl;
std::cout << "sizes: " << (_N * d_nDBs * sizeof(uint)) << std::endl;
exit(1);
}
getKBestAssignment(assignd, d_multiCodeBook, _A, d_nClusters, _N, k1);
getBestBinAssignment2(assignedBins, d_multiCodeBook2, _A, d_nClusters2, _N,
assignd, k1, d_nClusters);
std::cout << "clusters: " << d_nClusters << " " << d_nClusters2 << std::endl;
std::cout << "number of data bases " << d_nDBs << std::endl;
std::cout << "number of bins: " << d_nBins << std::endl;
#if USE_HASH
if (!d_binPrefix) {
cudaMalloc(&d_binPrefix, HASH_SIZE * sizeof(uint));
cudaMalloc(&d_binCounts, HASH_SIZE * sizeof(uint));
cudaMalloc(&d_dbIdx, d_nDBs * _N * sizeof(uint));
}
countBins(d_binCounts, assignedBins, _N);
histogram(HASH_SIZE);
cudaMemset(d_binPrefix, 0, HASH_SIZE * sizeof(uint));
scan(d_binPrefix, d_binCounts, HASH_SIZE, false);
#else
cudaMalloc(&d_binPrefix, d_nDBs * d_nBins * sizeof(uint));
cudaMalloc(&d_binCounts, d_nDBs * d_nBins * sizeof(uint));
cudaMalloc(&d_dbIdx, d_nDBs * _N * sizeof(uint));
countBins(d_binCounts, assignedBins, _N);
histogram(d_nBins);
// outputVecUint("binCounts1: ", d_binCounts, 1000);
// outputVecUint("binCounts2: ", d_binCounts + 11000 + 16777216, 1000);
cudaMemset(d_binPrefix, 0, d_nDBs * d_nBins * sizeof(uint));
scan(d_binPrefix, d_binCounts, d_nDBs * d_nBins, false);
#endif
cudaMemset(d_dbIdx, 0, d_nDBs * _N * sizeof(uint));
sortIdx(d_dbIdx, assignedBins, _N);
// store references to original vectors
d_dbVec = _A;
d_NdbVec = _N; // store number o orginal vectors
cudaFree(assignedBins);
cudaFree(assignd);
std::cout << "done with buildDB " << std::endl;
}
void PerturbationProTree::buildKBestDBSparse(const float* _A, uint _N,
bool _sparse) {
uint* assignd;
int* assignedBins;
// TODO
uint k1 = 1;
// uint k1 = 8;
// k1 = 32;
d_nBins = pow(d_nClusters, d_p) * pow(d_nClusters2, d_p);
cudaMalloc(&assignd, k1 * _N * d_p * d_nDBs * sizeof(uint));
cudaMalloc(&assignedBins, _N * d_nDBs * sizeof(uint));
if ((assignd == NULL) || (assignedBins == NULL)) {
std::cout << "buildDB did not get memory!" << std::endl;
std::cout << "sizes: " << (k1 * _N * d_p * d_nDBs * sizeof(uint)) << std::endl;
std::cout << "sizes: " << (_N * d_nDBs * sizeof(uint)) << std::endl;
exit(1);
}
getKBestAssignment(assignd, d_multiCodeBook, _A, d_nClusters, _N, k1);
// outputVecUint("assignd", assignd, 1000);
#if 0
/*** new block to test lines */
float* assignVal;
uint* assignIdx;
cudaMalloc(&assignVal,
k1 * d_nClusters2 * _N * d_p * d_nDBs * sizeof(float));
cudaMalloc(&assignIdx,
k1 * d_nClusters2 * _N * d_p * d_nDBs * sizeof(uint));
if ((assignVal == NULL) || (assignIdx == NULL)) {
std::cout << "buildDB assignVal/assignIdx did not get memory!" << std::endl;
exit(1);
}
getKBestAssignment2(assignVal, assignIdx, d_multiCodeBook2, _A,
d_nClusters2, _N, assignd, d_nClusters, k1);
testLineDist(assignVal, assignIdx, k1, _N);
/*** end of block */
#endif
getBestBinAssignment2Sparse(assignedBins, d_multiCodeBook2, _A,
d_nClusters2, _N, assignd, k1, d_nClusters, _sparse);
// outputVecUint("assignedBins", assignedBins + 900000, 200);
outputVecInt("assignedBins", assignedBins, 1000);
std::cout << "clusters: " << d_nClusters << " " << d_nClusters2 << std::endl;
std::cout << "number of data bases " << d_nDBs << std::endl;
std::cout << "number of bins: " << d_nBins << std::endl;
#if USE_HASH
if (!d_binPrefix) {
cudaMalloc(&d_binPrefix, HASH_SIZE * sizeof(uint));
cudaMalloc(&d_binCounts, HASH_SIZE * sizeof(uint));
cudaMalloc(&d_dbIdx, d_nDBs * _N * sizeof(uint));
}
countBins(d_binCounts, assignedBins, _N);
histogram(HASH_SIZE);
cudaMemset(d_binPrefix, 0, HASH_SIZE * sizeof(uint));
scan(d_binPrefix, d_binCounts, HASH_SIZE, false);
#else
cudaMalloc(&d_binPrefix, d_nDBs * d_nBins * sizeof(uint));
cudaMalloc(&d_binCounts, d_nDBs * d_nBins * sizeof(uint));
cudaMalloc(&d_dbIdx, d_nDBs * _N * sizeof(uint));
countBins(d_binCounts, assignedBins, _N);
histogram(d_nBins);
// outputVecUint("binCounts1: ", d_binCounts, 1000);
// outputVecUint("binCounts2: ", d_binCounts + 11000 + 16777216, 1000);
cudaMemset(d_binPrefix, 0, d_nDBs * d_nBins * sizeof(uint));
scan(d_binPrefix, d_binCounts, d_nDBs * d_nBins, false);
#endif
cudaMemset(d_dbIdx, 0, d_nDBs * _N * sizeof(uint));
sortIdx(d_dbIdx, assignedBins, _N);
// store references to original vectors
d_dbVec = _A;
d_NdbVec = _N; // store number o orginal vectors
cudaFree(assignedBins);
cudaFree(assignd);
std::cout << "done with buildDBSparse " << std::endl;
}
void PerturbationProTree::buildKBestLineDB(const float* _A, uint _N) {
uint* assignd;
float* l1Dist;
float* l2Dist;
uint* l2Idx;
int* assignedBins;
uint k1 = 8;
d_nBins = pow(d_nClusters, d_p) * pow(d_nClusters2, d_p);
cudaMalloc(&assignd, k1 * _N * d_p * d_nDBs * sizeof(uint));
cudaMalloc(&l1Dist, d_nClusters * _N * d_p * sizeof(float));
cudaMalloc(&l2Dist, _N * d_p * sizeof(float));
cudaMalloc(&l2Idx, _N * d_p * sizeof(uint));
cudaMalloc(&assignedBins, _N * d_nDBs * sizeof(uint));
if ((assignd == NULL) || (assignedBins == NULL)) {
std::cout << "buildDB did not get memory!" << std::endl;
exit(1);
}
getKBestLineAssignment(assignd, l1Dist, d_multiCodeBook, _A, d_nClusters,
_N, k1);
// outputVecUint("assignd", assignd, 1000);
getBestBinLineAssignment2(assignedBins, l2Idx, l2Dist, d_multiCodeBook2, _A,
d_nClusters2, _N, assignd, k1, d_nClusters);
outputVecUint("l2Idx", l2Idx, 1000);
assembleLines(l1Dist, l2Idx, l2Dist, _N);
// outputVecUint("assignedBins", assignedBins + 900000, 200);
std::cout << "clusters: " << d_nClusters << " " << d_nClusters2 << std::endl;
std::cout << "number of data bases " << d_nDBs << std::endl;
std::cout << "number of bins: " << d_nBins << std::endl;
#if USE_HASH
cudaMalloc(&d_binPrefix, HASH_SIZE * sizeof(uint));
cudaMalloc(&d_binCounts, HASH_SIZE * sizeof(uint));
cudaMalloc(&d_dbIdx, d_nDBs * _N * sizeof(uint));
countBins(d_binCounts, assignedBins, _N);
histogram(HASH_SIZE);
cudaMemset(d_binPrefix, 0, HASH_SIZE * sizeof(uint));
scan(d_binPrefix, d_binCounts, HASH_SIZE, false);
#else
cudaMalloc(&d_binPrefix, d_nDBs * d_nBins * sizeof(uint));
cudaMalloc(&d_binCounts, d_nDBs * d_nBins * sizeof(uint));
cudaMalloc(&d_dbIdx, d_nDBs * _N * sizeof(uint));
countBins(d_binCounts, assignedBins, _N);
histogram(d_nBins);
// outputVecUint("binCounts1: ", d_binCounts, 1000);
// outputVecUint("binCounts2: ", d_binCounts + 11000 + 16777216, 1000);
cudaMemset(d_binPrefix, 0, d_nDBs * d_nBins * sizeof(uint));
scan(d_binPrefix, d_binCounts, d_nDBs * d_nBins, false);
#endif
cudaMemset(d_dbIdx, 0, d_nDBs * _N * sizeof(uint));
sortIdx(d_dbIdx, assignedBins, _N);
// store references to original vectors
d_dbVec = _A;
d_NdbVec = _N; // store number o orginal vectors
// cudaFree(l2Idx); // do not free as this array is stored in d_l2Idx;
cudaFree(l2Dist);
cudaFree(l1Dist);
cudaFree(assignedBins);
cudaFree(assignd);
std::cout << "done with buildDB " << std::endl;
}
// each block is responsible for one vector, blockDim.x should be _dim
// requires _dim * float shared memory
// looping multiple times to process all B vectors
// _vl is the length of the _p vector segments (should be 2^n)
__global__ void assignPerturbationKBestClusterKernel2(float *_assignVal,
uint* _assignIdx, const float* _cb2, const float* _B, uint _nClusters2,
uint _Brows, uint _dim, uint _p, uint _vl, const uint* _assign1,
uint _nClusters1, uint _k1, uint _NP2, uint _c1scale, uint _dimBits) {
extern __shared__ float shmb[];
float* shm = shmb + _dim;
float* shmIter = shm + blockDim.x;
uint* shmIdx = (uint*) shmIter;
shmIter += blockDim.x;
uint* binL1 = (uint*) shmIter;
shmIter += _p;
float* val = shmIter;
shmIter += _p * _k1 * _nClusters2;
uint* idx = (uint*) shmIter;
shmIter += _p * _k1 * _nClusters2;
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
__syncthreads();
// load the vector to shared mem
if (threadIdx.x < _dim)
shmb[threadIdx.x] = _B[iter * _dim + threadIdx.x];
for (uint pert = 0; pert < 1; pert++) {
__syncthreads();
uint pIdx = pertIdx(threadIdx.x, _dimBits, pert);
// load perturbed vector
float b = shmb[pIdx];
// loop over the best k1 first-level bins
for (int k = 0; k < _k1; k++) {
if (threadIdx.x < _p) {
binL1[threadIdx.x] =
_assign1[(iter * 1 + pert) * _k1 * _p
+ k * _p + threadIdx.x];
// printf(" npert: %d tid: %d pert: %d binL1 %d \n", 1, threadIdx.x, pert, binL1[threadIdx.x]);
}
__syncthreads();
const float* A = _cb2 + pert * _nClusters1 * _nClusters2 * _dim;
// each segment needs a different codebook
const float* cb;
if (threadIdx.x < _dim) {
uint p = threadIdx.x / _vl;
cb = A
+ getCBIdx(p, binL1[p], _nClusters1, _vl,
_nClusters2) + (threadIdx.x % _vl);
}
// loop over all vectors of A
for (int binL2 = 0; binL2 < _nClusters2; binL2++) {
if (threadIdx.x < _dim)
shm[threadIdx.x] = sqr(b - cb[binL2 * _vl]);
// compute the sum of differences for the vector segments
// i.e. all _p segments in parallel
for (uint stride = _vl >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if ((threadIdx.x) < _p * stride) {
uint p = threadIdx.x / stride * _vl;
uint bias = threadIdx.x % stride;
shm[p + bias] += shm[p + bias + stride];
}
}
__syncthreads();
// store the result
if (threadIdx.x < _p) {
val[binL2 + k * _nClusters2
+ threadIdx.x * _k1 * _nClusters2] =
shm[threadIdx.x * _vl];
idx[binL2 + k * _nClusters2
+ threadIdx.x * _k1 * _nClusters2] = binL2
+ binL1[threadIdx.x] * _c1scale;
}
__syncthreads();
}
}
// sort the results
for (int i = 0; i < _p; i++) {
if (threadIdx.x < _NP2)
shm[threadIdx.x] = 1000000000.;
// copy to original shm
if (threadIdx.x < _k1 * _nClusters2) {
shm[threadIdx.x] = val[threadIdx.x + i * _k1 * _nClusters2];
shmIdx[threadIdx.x] = idx[threadIdx.x
+ i * _k1 * _nClusters2];
}
__syncthreads();
bitonic3(shm, shmIdx, _NP2);
if (threadIdx.x < _k1 * _nClusters2) {
val[threadIdx.x + i * _k1 * _nClusters2] = shm[threadIdx.x];
idx[threadIdx.x + i * _k1 * _nClusters2] =
shmIdx[threadIdx.x];
}
__syncthreads();
}
// write out the sorted bins
for (int p = 0; p < _p; p++) {
if (threadIdx.x < _k1 * _nClusters2) {
_assignVal[(iter * 1 + pert) * _k1 * _p
* _nClusters2 + p * _k1 * _nClusters2 + threadIdx.x] =
val[threadIdx.x + p * _k1 * _nClusters2];
_assignIdx[(iter * 1 + pert) * _k1 * _p
* _nClusters2 + p * _k1 * _nClusters2 + threadIdx.x] =
idx[threadIdx.x + p * _k1 * _nClusters2];
}
}
}
}
}
void PerturbationProTree::getKBestAssignment2(float *_assignVal,
uint *_assignIdx, const float* _cb2, const float* _B, uint _nClusters2,
uint _Brows, const uint *_assign1, uint _nClusters1, uint _k1) const {
uint NP2 = log2(_k1 * _nClusters2);
std::cout << "NP2 " << NP2 << std::endl;
// assert(d_dim >= (2 * NP2));
int nThreads = (d_dim > (2 * NP2)) ? d_dim : (2 * NP2);
dim3 block(nThreads, 1, 1);
dim3 grid((_Brows < 1024) ? _Brows : 1024, 1, 1);
uint shm = (d_dim + 2 * nThreads + d_p + 2 * _k1 * d_p * d_nClusters2)
* sizeof(float);
std::cout << "shm" << shm << std::endl;
uint c1scale = d_nClusters2;
assignPerturbationKBestClusterKernel2<<<grid, block, shm>>>(_assignVal,
_assignIdx, _cb2, _B, _nClusters2, _Brows, d_dim, d_p, d_vl,
_assign1, _nClusters1, _k1, NP2, c1scale, d_dimBits);
checkCudaErrors(cudaDeviceSynchronize());
}
__device__ bool isTriangle(float _a2, float _b2, float _c2) {
float a, b, c;
a = sqrt(_a2);
b = sqrt(_b2);
c = sqrt(_c2);
if ((a + b) < c)
return false;
if ((b + c) < a)
return false;
if ((a + c) < b)
return false;
return true;
}
/** for each db point the function projects the point onto a line between the best cluster2 and any of the other selected cluster2.
* it returns the distance to the closest line and the index of the corresponding cluster id.
* This is only done for the first perturbation and for all parts individually.
*
* The kernel should only be called for blockDim <= 32 !
*/
__global__ void lineProjectionKernel(float *_lineDist, float* _clusterDist,
const float* _assignVal, const uint* _assignIdx, const float* _cbDist,
uint _p, uint _k1, uint _nClusters1, uint _nClusters2, uint _N) {
extern __shared__ float shm[];
float* shmIter = shm;
float* val = shmIter;
shmIter += blockDim.x;
uint* idx = (uint*) shmIter;
shmIter += blockDim.x;
float* lambda = shmIter;
shmIter += blockDim.x;
float* dist = shmIter;
shmIter += blockDim.x;
for (int iter = blockIdx.x; iter < _N; iter += gridDim.x) {
uint pert = 0;
float clusterDist = 0.;
float lineDist = 0.;
__syncthreads();
for (int p = 0; p < _p; p++) {
// read the sorted assignment
val[threadIdx.x] = _assignVal[(iter * 1 + pert) * _k1
* _p * _nClusters2 + p * _k1 * _nClusters2 + threadIdx.x];
idx[threadIdx.x] = _assignIdx[(iter * 1 + pert) * _k1
* _p * _nClusters2 + p * _k1 * _nClusters2 + threadIdx.x];
__syncthreads();
#if 1
if (threadIdx.x > 0) {
// const float* cbdist = _cbDist
// + idx[0] * _nClusters1 * _nClusters2 * _p
// + idx[threadIdx.x] * _p + p;
const float* cbdist = _cbDist
+ p * _nClusters1 * _nClusters2 * _nClusters1
* _nClusters2;
float c2 = *(cbdist + idx[0] * _nClusters1 * _nClusters2
+ idx[threadIdx.x]);
lambda[threadIdx.x] = project(val[0], val[threadIdx.x], c2,
dist[threadIdx.x]);
if (!isTriangle(val[0], val[threadIdx.x], c2))
printf("tIdx: %d %f (abc) %f %f %f \n", threadIdx.x,
dist[threadIdx.x], (val[0]), (val[threadIdx.x]),
(c2));
// if (iter == 0) printf( "tIdx: %d %f == %d \n", threadIdx.x, c2, idx[threadIdx.x]);
// dist[threadIdx.x] = c2;
} else {
clusterDist += val[0];
dist[0] = 123451234.;
}
#else
dist[threadIdx.x] = 123451234.;
if (threadIdx.x == 0)
clusterDist += val[0];
const float* cbdist = _cbDist
+ p * _nClusters1 * _nClusters2 * _nClusters1 * _nClusters2;
for (int j = 0; j < blockDim.x; j++) {
if (threadIdx.x != j) {
float c2 = *(cbdist + idx[j] * _nClusters1 * _nClusters2
+ idx[threadIdx.x]);
float d2;
lambda[threadIdx.x] = project(val[j], val[threadIdx.x], c2,
d2);
if (d2 < dist[threadIdx.x])
dist[threadIdx.x] = d2;
}
}
#endif
__syncthreads();
// reduction to find best axis
for (int stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (threadIdx.x < stride) {
if (dist[threadIdx.x] > dist[threadIdx.x + stride]) {
dist[threadIdx.x] = dist[threadIdx.x + stride];
idx[threadIdx.x] = idx[threadIdx.x + stride];
lambda[threadIdx.x] = lambda[threadIdx.x + stride];
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
if (iter == 0)
printf("%d %f %f \n ", p, dist[0], lambda[0]);
lineDist += dist[0];
}
__syncthreads();
}
if (threadIdx.x == 0) {
_lineDist[iter] = lineDist;
_clusterDist[iter] = clusterDist;
}
}
}
void PerturbationProTree::computeCBDist() {
if (d_codeBookDistL2)
cudaFree(d_codeBookDistL2);
cudaMalloc(&d_codeBookDistL2,
d_p * d_nClusters * d_nClusters * d_nClusters2 * d_nClusters2
* sizeof(float));
// consider codebook layout: one coodebook per part p0 cb, p1 cb, ...
uint distSize = d_nClusters * d_nClusters * d_nClusters2 * d_nClusters2;
for (int p = 0; p < d_p; p++) {
calcDist(d_codeBookDistL2 + p * distSize,
d_multiCodeBook2 + d_nClusters * d_nClusters2 * d_vl * p,
d_multiCodeBook2 + d_nClusters * d_nClusters2 * d_vl * p,
d_nClusters * d_nClusters2, d_nClusters * d_nClusters2, d_vl,
1);
}
}
void PerturbationProTree::computeCBL1L2Dist() {
if (d_codeBookDistL1L2)
cudaFree(d_codeBookDistL1L2);
cudaMalloc(&d_codeBookDistL1L2,
d_p * d_nClusters * d_nClusters * d_nClusters2 * sizeof(float));
// consider codebook layout: one coodebook per part p0 cb, p1 cb, ...
uint distSize = d_nClusters * d_nClusters * d_nClusters2;
for (int p = 0; p < d_p; p++) {
calcDist(d_codeBookDistL1L2 + p * distSize,
d_multiCodeBook2 + d_nClusters * d_nClusters2 * d_vl * p,
d_multiCodeBook + d_nClusters * d_vl * p,
d_nClusters * d_nClusters2, d_nClusters, d_vl, 1);
}
}
#if 0
void PerturbationProTree::computeCBL1L1Dist(uint _nParts) {
if (d_codeBookDistL1L2)
cudaFree(d_codeBookDistL1L2);
cudaMalloc(&d_codeBookDistL1L2,
_nParts * d_nClusters * d_nClusters * sizeof(float));
// consider codebook layout: one coodebook per part p0 cb, p1 cb, ...
uint distSize = d_nClusters * d_nClusters;
uint vl = d_dim / _nParts;
for (int p = 0; p < _nParts; p++) {
calcDist(d_codeBookDistL1L2 + p * distSize,
d_multiCodeBook + d_nClusters * vl * p,
d_multiCodeBook + d_nClusters * vl * p, d_nClusters,
d_nClusters, vl, 1);
}
outputVec("cbDist", d_codeBookDistL1L2, 16 * 4);
}
#endif
void PerturbationProTree::computeCBL1L1Dist(uint _nParts) {
if (d_codeBookDistL1L2)
cudaFree(d_codeBookDistL1L2);
cudaMalloc(&d_codeBookDistL1L2,
_nParts * d_nClusters * d_nClusters * sizeof(float));
// consider codebook layout: one coodebook per part p0 cb, p1 cb, ...
// uint distSize = d_nClusters * d_nClusters;
// uint vl = d_dim / _nParts;
calcDist(d_codeBookDistL1L2, d_multiCodeBook, d_multiCodeBook, d_nClusters,
d_nClusters, d_dim, _nParts);
outputVec("cbDist", d_codeBookDistL1L2, 16 * 4);
}
void PerturbationProTree::testLineDist(const float* _assignVal,
const uint* _assignIdx, uint _k1, uint _N) {
computeCBDist();
outputVec("cbdist", d_codeBookDistL2, 1000);
float* clusterDist;
float* lineDist;
cudaMalloc(&clusterDist, _N * sizeof(float));
cudaMalloc(&lineDist, _N * sizeof(float));
uint nThreads = 256;
nThreads = (nThreads > _k1 * d_nClusters2) ? _k1 * d_nClusters2 : nThreads;
dim3 block(nThreads, 1, 1);
dim3 grid((_N > 1024) ? 1024 : _N, 1, 1);
uint shmSize = 4 * block.x * sizeof(float);
lineProjectionKernel<<<grid, block, shmSize>>>(lineDist, clusterDist,
_assignVal, _assignIdx, d_codeBookDistL2, d_p, _k1, d_nClusters,
d_nClusters2, _N);
checkCudaErrors(cudaDeviceSynchronize());
float* cdist = new float[_N];
float* ldist = new float[_N];
cudaMemcpy(cdist, clusterDist, _N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(ldist, lineDist, _N * sizeof(float), cudaMemcpyDeviceToHost);
float minD, maxD, avgD;
minD = cdist[0];
maxD = cdist[0];
avgD = 0;
for (int i = 0; i < _N; i++) {
if (cdist[i] < minD)
minD = cdist[i];
if (cdist[i] > maxD)
maxD = cdist[i];
avgD += cdist[i];
}
std::cout << "cluster dist (min, max, avg) " << minD << " " << maxD << " "
<< (avgD / _N) << std::endl;
// for (int i = 0; i < 1000; i++)
// std::cout << "\t " << ldist[i];
//
// std::cout << std::endl;
minD = ldist[0];
maxD = ldist[0];
avgD = 0;
for (int i = 0; i < _N; i++) {
if (ldist[i] < minD)
minD = ldist[i];
if (ldist[i] > maxD)
maxD = ldist[i];
avgD += ldist[i];
}
std::cout << "line dist (min, max, avg) " << minD << " " << maxD << " "
<< (avgD / _N) << std::endl;
cudaFree(lineDist);
cudaFree(clusterDist);
}
/** for each db point the function projects the point onto a line between the best cluster2 and any of the other selected cluster2.
* it returns the distance to the closest line and the index of the corresponding cluster id.
* This is only done for the first perturbation and for all parts individually.
*
* The kernel should only be called for blockDim <= 32 !
*/
__global__ void lineProjectionKernel(uint *_lineIdx, float* _lineLambda,
const float* _cbDist, const float* _l1Dist, const uint* _l2Idx,
const float* _l2Dist, uint _nClusters1, uint _nClusters2, uint _p,
uint _N) {
extern __shared__ float shm[];
float* shmIter = shm;
float* l2Dist = shmIter;
shmIter += _p;
uint* idx = (uint*) shmIter;
shmIter += _p;
float* lambda = shmIter;
shmIter += blockDim.x;
float* dist = shmIter;
shmIter += blockDim.x;
float* lIdx = shmIter;
shmIter += blockDim.x;
for (int iter = blockIdx.x; iter < _N; iter += gridDim.x) {
__syncthreads();
if (threadIdx.x < _p) {
l2Dist[threadIdx.x] = _l2Dist[iter * _p + threadIdx.x];
idx[threadIdx.x] = _l2Idx[iter * _p + threadIdx.x];
}
__syncthreads();
float l1Dist = _l1Dist[iter * _nClusters1 * _p + threadIdx.x];
uint p = threadIdx.x / _nClusters1;
uint pIdx = threadIdx.x % _nClusters1;
if (idx[p] > (8 * 8 * 8 * 4)) {
printf("%d %d", p, idx[p]);
}
float c2 = _cbDist[p * _nClusters1 * _nClusters2 + idx[p] * _nClusters1
+ pIdx];
lambda[threadIdx.x] = project(l2Dist[threadIdx.x], l1Dist, c2,
dist[threadIdx.x]);
lIdx[threadIdx.x] = pIdx;
__syncthreads();
// reduction to find best axis
for (int stride = _nClusters1 >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (pIdx < stride) {
if (dist[threadIdx.x] > dist[threadIdx.x + stride]) {
dist[threadIdx.x] = dist[threadIdx.x + stride];
lIdx[threadIdx.x] = lIdx[threadIdx.x + stride];
lambda[threadIdx.x] = lambda[threadIdx.x + stride];
}
}
}
__syncthreads();
if (threadIdx.x < p) {
_lineIdx[iter * _p + threadIdx.x] = lIdx[threadIdx.x * _nClusters1];
_lineLambda[iter * _p + threadIdx.x] = lambda[threadIdx.x
* _nClusters1];
}
}
}
void PerturbationProTree::assembleLines(const float* _l1Dist, uint* _l2Idx,
const float* _l2Dist, uint _N) {
computeCBL1L2Dist();
if (!d_lineLambda)
cudaMalloc(&d_lineLambda, d_p * _N * sizeof(float));
if (!d_lineIdx)
cudaMalloc(&d_lineIdx, d_p * _N * sizeof(uint));
// if (!d_l2Idx)
// cudaMalloc(&d_l2Idx, d_p * _N * sizeof(uint));
d_l2Idx = _l2Idx;
uint nThreads = d_p * d_nClusters;
dim3 block(nThreads, 1, 1);
dim3 grid((_N > 1024) ? 1024 : _N, 1, 1);
uint shmSize = (2 * d_p + 3 * block.x) * sizeof(float);
lineProjectionKernel<<<grid, block, shmSize>>>(d_lineIdx, d_lineLambda,
d_codeBookDistL1L2, _l1Dist, _l2Idx, _l2Dist, d_nClusters,
d_nClusters2, d_p, _N);
checkCudaErrors(cudaDeviceSynchronize());
}
#if 0
__global__ void selectBinKernel(uint* _assign, uint* _nBins,
const float *_assignVal, const uint* _assignIdx,
const uint* _nElemPerBin, uint _Arows, uint _Brows, uint _p, uint _vl,
uint _nClusters1, uint _nClusters2, uint _k1, uint _k, uint _maxTrials,
uint _maxOutBin, uint _c1scale, const uint *_distSeq, uint _numDistSeq,
uint _distCluster, uint 1, uint _nBinsPerDB) {
// instead of the Dijkstra do the brute-force thing
extern __shared__ float shm[];
float* shmIter = shm;
float* val = shmIter;
shmIter += _p * _k1 * _Arows;
uint* idx = (uint*) shmIter;
shmIter += _p * _k1 * _Arows;
uint* numbers = (uint*) shmIter;
shmIter += _p;
uint* denom = (uint*) shmIter;
shmIter += _p;
uint* outIdx = (uint*) shmIter;
shmIter += blockDim.x;
float* dist = (float*) shmIter;
shmIter += blockDim.x;
uint* nElem = (uint*) shmIter;
shmIter += blockDim.x;
uint* outBin = (uint*) shmIter;
shmIter += _maxOutBin;
uint& nOutBins = *(uint*) shmIter;
shmIter += 1;
uint& nElements = *(uint*) shmIter;
shmIter += 1;
uint& nIter = *(uint*) shmIter;
shmIter += 1;
if (threadIdx.x < _p) {
numbers[threadIdx.x] = _distCluster;
}
__syncthreads();
if (threadIdx.x == 0) {
denom[0] = 1;
for (int i = 1; i < _p; i++) {
denom[i] = denom[i - 1] * numbers[i - 1];
}
}
__syncthreads();
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
for (uint pert = 0; pert < 1; pert++) {
__syncthreads();
// read the sorted assignment
for (int p = 0; p < _p; p++) {
if (threadIdx.x < _k1 * _Arows) {
val[threadIdx.x + p * _k1 * _Arows] = _assignVal[(iter
* 1 + pert) * _k1 * _p * _Arows
+ p * _k1 * _Arows + threadIdx.x];
idx[threadIdx.x + p * _k1 * _Arows] = _assignIdx[(iter
* 1 + pert) * _k1 * _p * _Arows
+ p * _k1 * _Arows + threadIdx.x];
}
}
// TODO loop multiple times to include sufficiently many bins at the end
if (threadIdx.x == 0) {
nOutBins = 0;
nElements = 0;
nIter = 0;
}
__syncthreads();
while ((nElements < _k) && (nIter < _maxTrials)
&& (nOutBins < _maxOutBin)) {
// generate all possible bins within the bounds given by numbers[]
// calc the corresponding binIdx in the DB and the distance to the cluster center
dist[threadIdx.x] = 0.;
// TODO fix 4
uint bin[4];// maximum number for p
for (int p = 0; p < _p; p++) {
bin[p] = (_distSeq[nIter * blockDim.x + threadIdx.x]
/ denom[p]) % numbers[p];
dist[threadIdx.x] += val[bin[p] + p * _k1 * _Arows];
bin[p] = idx[bin[p] + p * _k1 * _Arows];
}
if (threadIdx.x >= _numDistSeq) {
dist[threadIdx.x] = 99999999.;
}
__syncthreads();
// TODO _p1 + _p2
outIdx[threadIdx.x] = calcIdxSequential(bin, _p, _nClusters1,
_nClusters2, _c1scale) + pert * _nBinsPerDB;
#if USE_HASH
outIdx[threadIdx.x] = outIdx[threadIdx.x] % HASH_SIZE;
#endif
// printf("%d --- %d \n", threadIdx.x, outIdx[threadIdx.x]);
// if (threadIdx.x < 100)
// printf("%d: %d %d === %f -- %d \n", threadIdx.x, bin[0],
// bin[1], dist[threadIdx.x], outIdx[threadIdx.x]);
__syncthreads();
// sort all cluster centers based on the distance
bitonic3(dist, outIdx, blockDim.x);
// if (outIdx[threadIdx.x] < )
nElem[threadIdx.x] = _nElemPerBin[outIdx[threadIdx.x]];
__syncthreads();
// collect the number of vectors in all the bins
// prepare output of bins with one or more vectors until the maximum number of vectors is reached
// (performs a sequential reduction)
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x; i++) {
if ((nElements > _k) || (nOutBins > _maxOutBin))
break;
// if (i < 10)
// printf("outIdx: %d, %f, %d \n", outIdx[i], dist[i],
// nElem[i]);
int n = nElem[i];
if (n > 0) {
outBin[nOutBins] = outIdx[i];
nOutBins++;
nElements += n;
}
}
nIter++;
}
__syncthreads();
}
// write out result
for (int b = threadIdx.x; b < nOutBins; b += blockDim.x)
_assign[(iter * 1 + pert) * _maxOutBin + b] =
outBin[b];
__syncthreads();
if (threadIdx.x == 0) {
_nBins[iter * 1 + pert] = nOutBins;
}
}
}
}
#else
__global__ void selectBinKernel(uint* _assign, uint* _nBins,
const float *_assignVal, const uint* _assignIdx,
const uint* _nElemPerBin, uint _Arows, uint _Brows, uint _p, uint _vl,
uint _nClusters1, uint _nClusters2, uint _k1, uint _k, uint _maxTrials,
uint _maxOutBin, uint _c1scale, const uint *_distSeq, uint _numDistSeq,
uint _distCluster, uint _nBinsPerDB) {
// instead of the Dijkstra do the brute-force thing
extern __shared__ float shm[];
float* shmIter = shm;
float* val = shmIter;
shmIter += _p * _k1 * _Arows;
uint* idx = (uint*) shmIter;
shmIter += _p * _k1 * _Arows;
uint* numbers = (uint*) shmIter;
shmIter += _p;
uint* denom = (uint*) shmIter;
shmIter += _p;
uint* outIdx = (uint*) shmIter;
shmIter += blockDim.x;
float* dist = (float*) shmIter;
shmIter += blockDim.x;
uint* nElem = (uint*) shmIter;
shmIter += blockDim.x;
uint* sBins = (uint*) shmIter;
shmIter += 4 * blockDim.x;
uint& nOutBins = *(uint*) shmIter;
shmIter += 1;
uint& nElements = *(uint*) shmIter;
shmIter += 1;
uint& nIter = *(uint*) shmIter;
shmIter += 1;
if (threadIdx.x < _p) {
numbers[threadIdx.x] = _distCluster;
}
__syncthreads();
if (threadIdx.x == 0) {
denom[0] = 1;
for (int i = 1; i < _p; i++) {
denom[i] = denom[i - 1] * numbers[i - 1];
}
}
// if (threadIdx.x == 0)
// printf("string select Bin \n \n \n");
//
// __syncthreads();
__syncthreads();
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
// if (threadIdx.x == 0)
// printf("iter %d \n", iter);
__syncthreads();
for (uint pert = 0; pert < 1; pert++) {
__syncthreads();
// read the sorted assignment
for (int p = 0; p < _p; p++) {
if (threadIdx.x < _k1 * _Arows) {
val[threadIdx.x + p * _k1 * _Arows] = _assignVal[(iter
* 1 + pert) * _k1 * _p * _Arows
+ p * _k1 * _Arows + threadIdx.x];
idx[threadIdx.x + p * _k1 * _Arows] = _assignIdx[(iter
* 1 + pert) * _k1 * _p * _Arows
+ p * _k1 * _Arows + threadIdx.x];
}
}
// TODO loop multiple times to include sufficiently many bins at the end
if (threadIdx.x == 0) {
nOutBins = 0;
nElements = 0;
nIter = 0;
}
// if (threadIdx.x == 0)
// printf("before iter %d \n", nIter);
__syncthreads();
while ((nElements < _k) && (nIter < _maxTrials)
&& (nOutBins < _maxOutBin)) {
__syncthreads();
// if (threadIdx.x == 0)
// printf("iter %d \n", nIter);
// __syncthreads();
// generate all possible bins within the bounds given by numbers[]
// calc the corresponding binIdx in the DB and the distance to the cluster center
// dist[threadIdx.x] = 0.;
// TODO fix 4
// uint bin[4]; // maximum number for p
uint* bin = sBins + _p * threadIdx.x;
float ddd = 0.;
// for (int p = 0; p < _p; p++) {
// uint bp = (_distSeq[nIter * blockDim.x + threadIdx.x] / denom[p]) % numbers[p];
//
// uint ii = idx[bp + p * _k1 * _Arows];
// ddd += val[bp + p * _k1 * _Arows];
//
// bin[p] = ii;
//
// }
uint bp;
// explicitly unrolled to circumvent compiler segmentation fault
bp = (_distSeq[nIter * blockDim.x + threadIdx.x] / denom[0])
% numbers[0];
ddd += val[bp + 0 * _k1 * _Arows];
bin[0] = idx[bp + 0 * _k1 * _Arows];
bp = (_distSeq[nIter * blockDim.x + threadIdx.x] / denom[1])
% numbers[1];
ddd += val[bp + 1 * _k1 * _Arows];
bin[1] = idx[bp + 1 * _k1 * _Arows];
bp = (_distSeq[nIter * blockDim.x + threadIdx.x] / denom[2])
% numbers[2];
ddd += val[bp + 2 * _k1 * _Arows];
bin[2] = idx[bp + 2 * _k1 * _Arows];
bp = (_distSeq[nIter * blockDim.x + threadIdx.x] / denom[3])
% numbers[3];
ddd += val[bp + 3 * _k1 * _Arows];
bin[3] = idx[bp + 3 * _k1 * _Arows];
dist[threadIdx.x] = ddd;
//
// if (threadIdx.x >= _numDistSeq) {
// dist[threadIdx.x] = 99999999.;
// }
__syncthreads();
#if 1
// TODO _p1 + _p2
outIdx[threadIdx.x] = calcIdxSequential(bin, _p, _nClusters1,
_nClusters2, _c1scale) + pert * _nBinsPerDB;
#if USE_HASH
outIdx[threadIdx.x] = outIdx[threadIdx.x] % HASH_SIZE;
#endif
// printf("%d --- %d \n", threadIdx.x, outIdx[threadIdx.x]);
// if (threadIdx.x < 100)
// printf("%d: %d %d === %f -- %d \n", threadIdx.x, bin[0],
// bin[1], dist[threadIdx.x], outIdx[threadIdx.x]);
__syncthreads();
// sort all cluster centers based on the distance
bitonic3(dist, outIdx, blockDim.x);
// if (outIdx[threadIdx.x] < )
nElem[threadIdx.x] = 0;
nElem[threadIdx.x] = _nElemPerBin[outIdx[threadIdx.x]];
uint maxVecPB = 2048;
nElem[threadIdx.x] =
(nElem[threadIdx.x] < maxVecPB) ?
nElem[threadIdx.x] : maxVecPB;
// nElem[threadIdx.x] = 1;
uint nElReg = nElem[threadIdx.x];
// if ((blockIdx.x == 2) && (threadIdx.x == 0)) {
// for (int k = 0; k < blockDim.x; k++)
// printf("ne %d %d %d %5d \n ", _k, outIdx[k], nElem[k], nElements );
//
// printf("\n");
// }
__syncthreads();
scan_block2(nElem, true);
if ((threadIdx.x > 0)
&& (nElem[threadIdx.x - 1] + nElements) >= _k) {
nElReg = 0;
}
__syncthreads();
if (threadIdx.x == 0)
nElements += nElem[blockDim.x - 1];
__syncthreads();
if (nElReg)
nElem[threadIdx.x] = 1;
else
nElem[threadIdx.x] = 0;
__syncthreads();
scan_block2(nElem, false);
if (nElReg) {
uint pos = nElem[threadIdx.x] + nOutBins;
if (pos < _maxOutBin) {
_assign[(iter * 1 + pert) * _maxOutBin
+ pos] = outIdx[threadIdx.x];
// if (iter == 3) {
// printf("outputBin %d at pos %d %d \n", outIdx[threadIdx.x], pos, ((iter * 1 + pert) * _maxOutBin
// + pos) );
// }
}
}
__syncthreads();
if (threadIdx.x == 0) {
nOutBins += nElem[blockDim.x - 1];
nIter++;
// printf("iter %d, nOutBins %d, nEelm: %d \n", nIter,
// nOutBins, nElements);
}
#endif
__syncthreads();
}
__syncthreads();
if (threadIdx.x == 0) {
_nBins[iter * 1 + pert] =
(nOutBins > _maxOutBin) ? _maxOutBin : nOutBins;
// printf("iter %d, nBins %d, nElements: %d \n ", nIter, nOutBins,
// nElements);
}
}
}
}
#endif
__global__ void binSortKernel(uint* _assign, const float* _dists,
const uint* _nBins, uint _maxBin, uint _N) {
extern __shared__ float shm[];
float* shmIter = shm;
float* dist = shmIter;
shmIter += 4096;
uint* idx = (uint*) shmIter;
shmIter += 4096;
if ((blockIdx.x == 0) && (threadIdx.x == 0))
printf("binSortKernel \n");
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _N; iter += gridDim.x) {
uint nB = (_nBins[iter] < 4096) ? _nBins[iter] : 4096;
// load bin sequence;
uint p = threadIdx.x;
for (; p < nB; p += blockDim.x) {
dist[p] = _dists[iter * _maxBin + p];
idx[p] = _assign[iter * _maxBin + p];
}
for (; p < 4096; p += blockDim.x) {
dist[p] = 10000000000.;
idx[p] = 0;
}
__syncthreads();
bitonicLarge(dist, idx, 4096);
// write out the result
for (int p = threadIdx.x; p < _nBins[iter]; p += blockDim.x) {
_assign[iter * _maxBin + p] = idx[threadIdx.x];
}
__syncthreads();
}
}
__global__ void selectBinKernelUnsorted(uint* _assign, float* _dists,
uint* _nBins, const float *_assignVal, const uint* _assignIdx,
const uint* _nElemPerBin, uint _Arows, uint _Brows, uint _p, uint _vl,
uint _nClusters1, uint _nClusters2, uint _k1, uint _kMax, uint _k,
uint _maxTrials, uint _maxOutBin, uint _c1scale, const uint *_distSeq,
uint _numDistSeq, uint _distCluster,
uint _nBinsPerDB) {
// instead of the Dijkstra do the brute-force thing
extern __shared__ float shm[];
float* shmIter = shm;
float* val = shmIter;
shmIter += _p * _kMax;
uint* idx = (uint*) shmIter;
shmIter += _p * _kMax;
uint* numbers = (uint*) shmIter;
shmIter += _p;
uint* denom = (uint*) shmIter;
shmIter += _p;
uint* outIdx = (uint*) shmIter;
shmIter += blockDim.x;
float* dist = (float*) shmIter;
shmIter += blockDim.x;
uint* nElem = (uint*) shmIter;
shmIter += blockDim.x;
uint* sBins = (uint*) shmIter;
shmIter += 4 * blockDim.x;
uint& nOutBins = *(uint*) shmIter;
shmIter += 1;
uint& nElements = *(uint*) shmIter;
shmIter += 1;
uint& nIter = *(uint*) shmIter;
shmIter += 1;
if (threadIdx.x < _p) {
numbers[threadIdx.x] = _distCluster;
}
__syncthreads();
if (threadIdx.x == 0) {
denom[0] = 1;
for (int i = 1; i < _p; i++) {
denom[i] = denom[i - 1] * numbers[i - 1];
}
}
// if (threadIdx.x == 0)
// printf("string select Bin \n \n \n");
//
// __syncthreads();
__syncthreads();
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
// if (threadIdx.x == 0)
// printf("iter %d \n", iter);
__syncthreads();
for (uint pert = 0; pert < 1; pert++) {
__syncthreads();
// read the sorted assignment
for (int p = 0; p < _p; p++) {
if (threadIdx.x < _kMax) {
val[threadIdx.x + p * _kMax] = _assignVal[(iter
* 1 + pert) * _k1 * _p * _Arows
+ p * _k1 * _Arows + threadIdx.x];
idx[threadIdx.x + p * _kMax] = _assignIdx[(iter
* 1 + pert) * _k1 * _p * _Arows
+ p * _k1 * _Arows + threadIdx.x];
}
}
// TODO loop multiple times to include sufficiently many bins at the end
if (threadIdx.x == 0) {
nOutBins = 0;
nElements = 0;
nIter = 0;
}
// if (threadIdx.x == 0)
// printf("before iter %d \n", nIter);
__syncthreads();
while ((nElements < _k) && (nIter < _maxTrials)
&& (nOutBins < _maxOutBin)) {
__syncthreads();
// if (threadIdx.x == 0)
// printf("iter %d \n", nIter);
// __syncthreads();
// generate all possible bins within the bounds given by numbers[]
// calc the corresponding binIdx in the DB and the distance to the cluster center
// dist[threadIdx.x] = 0.;
// TODO fix 4
// uint bin[4]; // maximum number for p
uint* bin = sBins + _p * threadIdx.x;
float ddd = 0.;
// for (int p = 0; p < _p; p++) {
// uint bp = (_distSeq[nIter * blockDim.x + threadIdx.x] / denom[p]) % numbers[p];
//
// uint ii = idx[bp + p * _k1 * _Arows];
// ddd += val[bp + p * _k1 * _Arows];
//
// bin[p] = ii;
//
// }
uint bp;
// explicitly unrolled to circumvent compiler segmentation fault
bp = (_distSeq[nIter * blockDim.x + threadIdx.x] / denom[0])
% numbers[0];
ddd += val[bp + 0 * _kMax];
bin[0] = idx[bp + 0 * _kMax];
bp = (_distSeq[nIter * blockDim.x + threadIdx.x] / denom[1])
% numbers[1];
ddd += val[bp + 1 * _kMax];
bin[1] = idx[bp + 1 * _kMax];
bp = (_distSeq[nIter * blockDim.x + threadIdx.x] / denom[2])
% numbers[2];
ddd += val[bp + 2 * _kMax];
bin[2] = idx[bp + 2 * _kMax];
bp = (_distSeq[nIter * blockDim.x + threadIdx.x] / denom[3])
% numbers[3];
ddd += val[bp + 3 * _kMax];
bin[3] = idx[bp + 3 * _kMax];
dist[threadIdx.x] = ddd;
//
// if (threadIdx.x >= _numDistSeq) {
// dist[threadIdx.x] = 99999999.;
// }
__syncthreads();
#if 1
// TODO _p1 + _p2
outIdx[threadIdx.x] = calcIdxSequential(bin, _p, _nClusters1,
_nClusters2, _c1scale) + pert * _nBinsPerDB;
__syncthreads();
#if USE_HASH
outIdx[threadIdx.x] = outIdx[threadIdx.x] % HASH_SIZE;
#endif
if ((blockIdx.x == 0) && (threadIdx.x == 0)) {
printf("binIdx: %d %d %d %d -> %d \n", bin[0], bin[1],
bin[2], bin[3], outIdx[threadIdx.x]);
}
// printf("%d --- %d \n", threadIdx.x, outIdx[threadIdx.x]);
// if (threadIdx.x < 100)
// printf("%d: %d %d === %f -- %d \n", threadIdx.x, bin[0],
// bin[1], dist[threadIdx.x], outIdx[threadIdx.x]);
__syncthreads();
// if (outIdx[threadIdx.x] < )
nElem[threadIdx.x] = 0;
nElem[threadIdx.x] = _nElemPerBin[outIdx[threadIdx.x]];
uint maxVecPB = 2;
nElem[threadIdx.x] =
(nElem[threadIdx.x] < maxVecPB) ?
nElem[threadIdx.x] : maxVecPB;
// nElem[threadIdx.x] = 1;
uint nElReg = nElem[threadIdx.x];
// if ((blockIdx.x == 2) && (threadIdx.x == 0)) {
// for (int k = 0; k < blockDim.x; k++)
// printf("ne %d %d %d %5d \n ", _k, outIdx[k], nElem[k], nElements );
//
// printf("\n");
// }
__syncthreads();
scan_block2(nElem, true);
if ((threadIdx.x > 0)
&& (nElem[threadIdx.x - 1] + nElements) >= _k) {
nElReg = 0;
}
__syncthreads();
if (threadIdx.x == 0)
nElements += nElem[blockDim.x - 1];
__syncthreads();
if (nElReg)
nElem[threadIdx.x] = 1;
else
nElem[threadIdx.x] = 0;
__syncthreads();
scan_block2(nElem, false);
if (nElReg) {
uint pos = nElem[threadIdx.x] + nOutBins;
if (pos < _maxOutBin) {
_assign[(iter * 1 + pert) * _maxOutBin
+ pos] = outIdx[threadIdx.x];
_dists[(iter * 1 + pert) * _maxOutBin
+ pos] = dist[threadIdx.x];
// if (iter == 3) {
// printf("outputBin %d at pos %d %d \n", outIdx[threadIdx.x], pos, ((iter * 1 + pert) * _maxOutBin
// + pos) );
// }
}
}
__syncthreads();
if (threadIdx.x == 0) {
nOutBins += nElem[blockDim.x - 1];
nIter++;
// printf("iter %d, nOutBins %d, nEelm: %d \n", nIter,
// nOutBins, nElements);
}
#endif
__syncthreads();
}
__syncthreads();
if (threadIdx.x == 0) {
_nBins[iter * 1 + pert] =
(nOutBins > _maxOutBin) ? _maxOutBin : nOutBins;
printf("iter %d, nBins %d, nElements: %d \n ", nIter, nOutBins,
nElements);
}
}
}
}
/** approximates the slope corresponding to the triangle with the smallest elements in the tensor product matrix spanned by val0 and val1.
* The slope is sampled at position p = sqrt(2.f * _N) and p-1 for robustness.
* The resulting slope index is stored in *_slopeIdx.
*/
inline __device__ void computeSlopeIdx(uint* _slopeIdx, const float *_val0,
const float* _val1, uint _N) {
// estimates the slope from the two last rows / column
if (threadIdx.x == 0) {
uint sampleIdx = sqrtf(2.f * _N);
float slope = (_val1[sampleIdx] + _val1[sampleIdx - 1] - 2 * _val1[0])
/ (_val0[sampleIdx] + _val0[sampleIdx - 1] - 2 * _val0[0]);
// slope = 1./slope;
int si = roundf(logf(slope) / logf(ANISO_BASE)) + (NUM_ANISO_DIR / 2);
si = (si >= NUM_ANISO_DIR) ? (NUM_ANISO_DIR - 1) : si;
si = (si < 0) ? 0 : si;
// si = 5;
*_slopeIdx = si;
// printf("slope %d %f -> %d \n ", _N, slope, si) ;
}
__syncthreads();
}
/** given the two vectors val0 and val1 with corresponding index data idx0 and idx1 the function returns an almost sorted ascending list of N elements of the tensor product matrix and the corresponding index information.
* It uses the predefined heuristic distSeq (to be selected for the actual slope) with padding distCluster.
* Indices are computed as idx0[i] * factor + idx1[j] for multi-dimensional addressing
*
*/
inline __device__ void generate2DBins(float* _dist, uint* _bin, uint _N,
const float *_val0, const float* _val1, const uint* _idx0,
const uint* _idx1, const uint* _distSeq, uint _distCluster,
uint _factor) {
if (threadIdx.x < _N) {
uint seqIdx = _distSeq[threadIdx.x];
_dist[threadIdx.x] = _val0[seqIdx % _distCluster]
+ _val1[seqIdx / _distCluster];
_bin[threadIdx.x] = (_idx0[seqIdx % _distCluster] * _factor
+ _idx1[seqIdx / _distCluster]);
}
__syncthreads();
}
inline __device__ void generate2DBins(float* _dist, uint* _bin, uint _N,
const float *_val0, const float* _val1, const uint* _idx0,
const uint* _idx1, const uint* _distSeq, uint _distCluster,
uint _factor, uint _maxCoord) {
if (threadIdx.x < _N) {
uint seqIdx = _distSeq[threadIdx.x];
uint x = seqIdx % _distCluster;
uint y = seqIdx / _distCluster;
if ((x < _maxCoord) && (y < _maxCoord)) {
_dist[threadIdx.x] = _val0[x] + _val1[y];
_bin[threadIdx.x] = (_idx0[x] * _factor + _idx1[y]);
} else {
_dist[threadIdx.x] = 99999999999.;
_bin[threadIdx.x] = 0.;
}
}
__syncthreads();
}
__global__ void selectBinKernel2D2Parts(uint* _assign, float* _dists,
uint _maxOutBin, uint _nOutBin, const float *_assignVal,
const uint* _assignIdx, uint _Arows, uint _Brows, uint _p,
uint _nClusters1, uint _nClusters2, uint _k1, uint _kMax,
const uint *_distSeq, uint _distCluster) {
assert((2 * _nOutBin) < _maxOutBin);
// prepare two-times 2D sorted lists using anisotropic heuristics and sorting
extern __shared__ float shm[];
float* shmIter = shm;
float* val = shmIter;
shmIter += 2 * _kMax;
uint* idx = (uint*) shmIter;
shmIter += 2 * _kMax;
uint* outIdx = (uint*) shmIter;
shmIter += blockDim.x;
float* dist = (float*) shmIter;
shmIter += blockDim.x;
uint* slopeIdx = (uint*) shmIter;
shmIter += 1;
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
// if (threadIdx.x == 0)
// printf("iter %d \n", iter);
__syncthreads();
for (uint pert = 0; pert < 1; pert++) {
__syncthreads();
// create sorted bin sequence for every two parts
for (int pIter = 0; pIter < (_p / 2); pIter++) {
// read the sorted assignment
if (threadIdx.x < _kMax) {
val[threadIdx.x] =
_assignVal[(iter * 1 + pert) * _k1
* _p * _Arows + (2 * pIter) * _k1 * _Arows
+ threadIdx.x];
idx[threadIdx.x] =
_assignIdx[(iter * 1 + pert) * _k1
* _p * _Arows + (2 * pIter) * _k1 * _Arows
+ threadIdx.x];
val[threadIdx.x + _kMax] = _assignVal[(iter
* 1 + pert) * _k1 * _p * _Arows
+ (2 * pIter + 1) * _k1 * _Arows + threadIdx.x];
idx[threadIdx.x + _kMax] = _assignIdx[(iter
* 1 + pert) * _k1 * _p * _Arows
+ (2 * pIter + 1) * _k1 * _Arows + threadIdx.x];
}
__syncthreads();
// generate first sequence
computeSlopeIdx(slopeIdx, val, val + _kMax, _nOutBin);
generate2DBins(dist, outIdx, _nOutBin, val, val + _kMax, idx,
idx + _kMax, _distSeq + *slopeIdx * NUM_DISTSEQ,
_distCluster, _nClusters1 * _nClusters2, _kMax);
if ((blockIdx.x == 0) && (threadIdx.x == 0)) {
printf("2Parts: binIdx: %d %d -> %d \n", idx[0], idx[_kMax],
outIdx[0]);
printf("scale: %d \n", _nClusters1 * _nClusters2);
}
bitonic3(dist, outIdx, _nOutBin);
// store intermediate result
if (threadIdx.x < _nOutBin) {
uint pos = (iter * 1 + pert) * _maxOutBin
+ pIter * _nOutBin + threadIdx.x;
_dists[pos] = dist[threadIdx.x];
_assign[pos] = outIdx[threadIdx.x];
}
__syncthreads();
}
}
}
}
/** produces a sequence of non-empty (almost sorted) candidate bins
* It assumes that the sorted merged lists of the parts 0,1 and 2,3 are stored in _assign and dists
*
*/
__global__ void selectBinKernel2DFinal(uint* _assign, float* _dists,
uint* _nBins, const uint* _nElemPerBin, uint _nInBin, uint _Brows,
uint _nClusters1, uint _nClusters2, uint _maxTrials, uint _k,
uint _maxOutBin, const uint *_distSeq, uint _distCluster,
uint _nBinsPerDB) {
// prepare two-times 2D sorted lists using anisotropic heuristics and sorting
extern __shared__ float shm[];
float* shmIter = shm;
float* val = shmIter;
shmIter += 2 * _nInBin;
uint* idx = (uint*) shmIter;
shmIter += 2 * _nInBin;
uint* outIdx = (uint*) shmIter;
shmIter += blockDim.x;
float* dist = (float*) shmIter;
shmIter += blockDim.x;
uint* nElem = (uint*) shmIter;
shmIter += blockDim.x;
uint* slopeIdx = (uint*) shmIter;
shmIter += 1;
uint& nOutBins = *(uint*) shmIter;
shmIter += 1;
uint& nElements = *(uint*) shmIter;
shmIter += 1;
uint& nIter = *(uint*) shmIter;
shmIter += 1;
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
// if (threadIdx.x == 0)
// printf("iter %d \n", iter);
__syncthreads();
for (uint pert = 0; pert < 1; pert++) {
__syncthreads();
// read the sorted output from the previous 2D merges.
if (threadIdx.x < _nInBin) {
uint pos = (iter * 1 + pert) * _maxOutBin
+ threadIdx.x;
// read first vector
val[threadIdx.x] = _dists[pos];
idx[threadIdx.x] = _assign[pos];
// read second vector
val[threadIdx.x + _nInBin] = _dists[pos + _nInBin];
idx[threadIdx.x + _nInBin] = _assign[pos + _nInBin];
}
__syncthreads();
#if 1
// determine Slope for the entire sequence (same slope for all iterations)
computeSlopeIdx(slopeIdx, val, val + _nInBin, 1024);
// loop multiple times to include sufficiently many bins at the end
if (threadIdx.x == 0) {
nOutBins = 0;
nElements = 0;
nIter = 0;
if (blockIdx.x == 0)
printf("slope %d \n ", *slopeIdx);
}
__syncthreads();
while ((nElements < _k) && (nIter < _maxTrials)
&& (nOutBins < _maxOutBin)) {
__syncthreads();
generate2DBins(dist, outIdx, blockDim.x, val, val + _nInBin,
idx, idx + _nInBin,
_distSeq + *slopeIdx * NUM_DISTSEQ + nIter * blockDim.x,
_distCluster,
_nClusters1 * _nClusters2 * _nClusters1 * _nClusters2,
_nInBin);
outIdx[threadIdx.x] = outIdx[threadIdx.x] % HASH_SIZE;
bitonic3(dist, outIdx, blockDim.x);
nElem[threadIdx.x] = _nElemPerBin[outIdx[threadIdx.x]];
// if (nElem[threadIdx.x] >= 32)
// nElem[threadIdx.x] = 0;
uint maxVecPB = 2;
nElem[threadIdx.x] =
(nElem[threadIdx.x] < maxVecPB) ?
nElem[threadIdx.x] : maxVecPB;
// // TODO
// nElem[threadIdx.x] = 1;
uint nElReg = nElem[threadIdx.x];
__syncthreads();
scan_block2(nElem, true);
if ((threadIdx.x > 0)
&& (nElem[threadIdx.x - 1] + nElements) >= _k) {
nElReg = 0;
}
__syncthreads();
if (threadIdx.x == 0)
nElements += nElem[blockDim.x - 1];
__syncthreads();
if (nElReg)
nElem[threadIdx.x] = 1;
else
nElem[threadIdx.x] = 0;
__syncthreads();
scan_block2(nElem, false);
if (nElReg) {
uint pos = nElem[threadIdx.x] + nOutBins;
if (pos < _maxOutBin) {
_assign[(iter * 1 + pert) * _maxOutBin
+ pos] = outIdx[threadIdx.x];
_dists[(iter * 1 + pert) * _maxOutBin
+ pos] = dist[threadIdx.x];
// if (iter == 3) {
// printf("outputBin %d at pos %d %d \n", outIdx[threadIdx.x], pos, ((iter * 1 + pert) * _maxOutBin
// + pos) );
// }
}
}
__syncthreads();
if (threadIdx.x == 0) {
nOutBins += nElem[blockDim.x - 1];
nIter++;
// printf("iter %d, nOutBins %d, nEelm: %d \n", nIter,
// nOutBins, nElements);
}
__syncthreads();
}
#endif
__syncthreads();
if (threadIdx.x == 0) {
_nBins[iter * 1 + pert] =
(nOutBins > _maxOutBin) ? _maxOutBin : nOutBins;
// printf("iter %d, nBins %d, nElements: %d \n ", nIter, nOutBins,
// nElements);
}
}
}
}
__global__ void selectBinKernelFast(uint* _assign, uint* _nBins,
const float *_assignVal, const uint* _assignIdx,
const uint* _nElemPerBin, uint _Arows, uint _Brows, uint _p, uint _vl,
uint _nClusters1, uint _nClusters2, uint _k1, uint _k, uint _maxTrials,
uint _maxOutBin, uint _c1scale, const uint *_distSeq, uint _numDistSeq,
uint _distCluster, uint _nBinsPerDB) {
// instead of the Dijkstra do the brute-force thing
extern __shared__ float shm[];
float* shmIter = shm;
float* val = shmIter;
shmIter += _p * _k1 * _Arows;
uint* idx = (uint*) shmIter;
shmIter += _p * _k1 * _Arows;
uint* numbers = (uint*) shmIter;
shmIter += _p;
uint* denom = (uint*) shmIter;
shmIter += _p;
uint* outIdx = (uint*) shmIter;
shmIter += blockDim.x;
float* dist = (float*) shmIter;
shmIter += blockDim.x;
uint* nElem = (uint*) shmIter;
shmIter += blockDim.x;
uint& nOutBins = *(uint*) shmIter;
shmIter += 1;
uint& nElements = *(uint*) shmIter;
shmIter += 1;
uint& nIter = *(uint*) shmIter;
shmIter += 1;
if (threadIdx.x < _p) {
numbers[threadIdx.x] = _distCluster;
}
__syncthreads();
if (threadIdx.x == 0) {
denom[0] = 1;
for (int i = 1; i < _p; i++) {
denom[i] = denom[i - 1] * numbers[i - 1];
}
}
__syncthreads();
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
// if (threadIdx.x == 0)
// printf("iter %d \n", iter);
//
// __syncthreads();
for (uint pert = 0; pert < 1; pert++) {
__syncthreads();
// read the sorted assignment
for (int p = 0; p < _p; p++) {
if (threadIdx.x < _k1 * _Arows) {
val[threadIdx.x + p * _k1 * _Arows] = _assignVal[(iter
* 1 + pert) * _k1 * _p * _Arows
+ p * _k1 * _Arows + threadIdx.x];
idx[threadIdx.x + p * _k1 * _Arows] = _assignIdx[(iter
* 1 + pert) * _k1 * _p * _Arows
+ p * _k1 * _Arows + threadIdx.x];
}
}
// TODO loop multiple times to include sufficiently many bins at the end
if (threadIdx.x == 0) {
nOutBins = 0;
nElements = 0;
nIter = 0;
}
__syncthreads();
while ((nElements < _k) && (nIter < _maxTrials)
&& (nOutBins < _maxOutBin)) {
// generate all possible bins within the bounds given by numbers[]
// calc the corresponding binIdx in the DB and the distance to the cluster center
// dist[threadIdx.x] = 0.;
// TODO fix 4
// uint bin[4]; // maximum number for p
float ddd = 0.;
uint oIdx = 0;
for (int p = 0; p < _p; p++) {
uint bp = (_distSeq[nIter * blockDim.x + threadIdx.x]
/ denom[p]) % numbers[p];
ddd += val[bp + p * _k1 * _Arows];
oIdx = (oIdx * _nClusters1 * _nClusters2)
+ idx[bp + p * _k1 * _Arows];
}
dist[threadIdx.x] = ddd;
outIdx[threadIdx.x] = oIdx % HASH_SIZE;
__syncthreads();
// sort all cluster centers based on the distance
// bitonic3(dist, outIdx, blockDim.x);
// if (outIdx[threadIdx.x] < )
// nElem[threadIdx.x] = _nElemPerBin[outIdx[threadIdx.x]];
nElem[threadIdx.x] = 1;
#if 0
uint nElReg = nElem[threadIdx.x];
__syncthreads();
scan_block2(nElem, true);
if ((nElem[threadIdx.x] + nElements) >= _k) {
nElReg = 0;
}
__syncthreads();
if (threadIdx.x == 0)
nElements += nElem[blockDim.x - 1];
__syncthreads();
if (nElReg)
nElem[threadIdx.x] = 1;
else
nElem[threadIdx.x] = 0;
__syncthreads();
scan_block2(nElem, true);
if (nElReg) {
uint pos = nElem[threadIdx.x] + nOutBins;
if (pos < _maxOutBin)
_assign[(iter * 1 + pert) * _maxOutBin
+ pos] = outIdx[threadIdx.x];
}
__syncthreads();
if (threadIdx.x == 0) {
nOutBins += nElem[blockDim.x - 1];
nIter++;
}
#else
uint pos = threadIdx.x + nOutBins;
if (pos < _maxOutBin)
_assign[(iter * 1 + pert) * _maxOutBin + pos] =
outIdx[threadIdx.x];
if (threadIdx.x == 0) {
nOutBins += blockDim.x;
nIter++;
}
#endif
__syncthreads();
}
__syncthreads();
if (threadIdx.x == 0) {
_nBins[iter * 1 + pert] =
(nOutBins > _maxOutBin) ? _maxOutBin : nOutBins;
// printf("nBins %d, nElements: %d \n ", nOutBins, nElements);
}
}
}
}
__global__ void selectBinKernelFast2(uint* _assign, uint* _nBins,
const float *_assignVal, const uint* _assignIdx,
const uint* _nElemPerBin, uint _Arows, uint _Brows, uint _p, uint _vl,
uint _nClusters1, uint _nClusters2, uint _k1, uint _k, uint _maxTrials,
uint _maxOutBin, uint _c1scale, const uint *_distSeq, uint _numDistSeq,
uint _distCluster, uint _nBinsPerDB) {
// instead of the Dijkstra do the brute-force thing
extern __shared__ float shm[];
float* shmIter = shm;
float* val = shmIter;
shmIter += _p * _k1 * _Arows;
uint* idx = (uint*) shmIter;
shmIter += _p * _k1 * _Arows;
uint* numbers = (uint*) shmIter;
shmIter += _p;
uint* denom = (uint*) shmIter;
shmIter += _p;
uint* outIdx = (uint*) shmIter;
shmIter += blockDim.x;
float* dist = (float*) shmIter;
shmIter += blockDim.x;
uint* nElem = (uint*) shmIter;
shmIter += blockDim.x;
uint& nOutBins = *(uint*) shmIter;
shmIter += 1;
uint& nElements = *(uint*) shmIter;
shmIter += 1;
uint& nIter = *(uint*) shmIter;
shmIter += 1;
if (threadIdx.x < _p) {
numbers[threadIdx.x] = _distCluster;
}
__syncthreads();
if (threadIdx.x == 0) {
denom[0] = 1;
for (int i = 1; i < _p; i++) {
denom[i] = denom[i - 1] * numbers[i - 1];
}
}
__syncthreads();
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
// if (threadIdx.x == 0)
// printf("iter %d \n", iter);
//
// __syncthreads();
for (uint pert = 0; pert < 1; pert++) {
__syncthreads();
// read the sorted assignment
for (int p = 0; p < _p; p++) {
if (threadIdx.x < _k1 * _Arows) {
val[threadIdx.x + p * _k1 * _Arows] = _assignVal[(iter
* 1 + pert) * _k1 * _p * _Arows
+ p * _k1 * _Arows + threadIdx.x];
idx[threadIdx.x + p * _k1 * _Arows] = _assignIdx[(iter
* 1 + pert) * _k1 * _p * _Arows
+ p * _k1 * _Arows + threadIdx.x];
}
}
// TODO loop multiple times to include sufficiently many bins at the end
if (threadIdx.x == 0) {
nOutBins = 0;
nElements = 0;
nIter = 0;
}
__syncthreads();
while ((nElements < _k) && (nIter < _maxTrials)
&& (nOutBins < _maxOutBin)) {
// generate all possible bins within the bounds given by numbers[]
// calc the corresponding binIdx in the DB and the distance to the cluster center
// dist[threadIdx.x] = 0.;
// TODO fix 4
// uint bin[4]; // maximum number for p
float ddd = 0.;
uint oIdx = 0;
for (int p = 0; p < _p; p++) {
uint bp = (_distSeq[nIter * blockDim.x + threadIdx.x]
/ denom[p]) % numbers[p];
ddd += val[bp + p * _k1 * _Arows];
oIdx = (oIdx * _nClusters1 * _nClusters2)
+ idx[bp + p * _k1 * _Arows];
}
dist[threadIdx.x] = ddd;
outIdx[threadIdx.x] = oIdx % HASH_SIZE;
__syncthreads();
// sort all cluster centers based on the distance
// bitonic3(dist, outIdx, blockDim.x);
// if (outIdx[threadIdx.x] < )
// nElem[threadIdx.x] = _nElemPerBin[outIdx[threadIdx.x]];
// nElem[threadIdx.x] = 1;
#if 1
uint nElReg = _nElemPerBin[oIdx % HASH_SIZE];
if (nElReg)
nElem[threadIdx.x] = 1;
else
nElem[threadIdx.x] = 0;
__syncthreads();
scan_block2(nElem, true);
if (nElReg) {
uint pos = nElem[threadIdx.x] + nOutBins;
if (pos < _maxOutBin)
_assign[(iter * 1 + pert) * _maxOutBin
+ pos] = outIdx[threadIdx.x];
}
__syncthreads();
if (threadIdx.x == 0) {
nOutBins += nElem[blockDim.x - 1];
// nOutBins += blockDim.x;
nIter++;
}
#else
uint pos = threadIdx.x + nOutBins;
if (pos < _maxOutBin)
_assign[(iter * 1 + pert) * _maxOutBin + pos] =
outIdx[threadIdx.x];
__syncthreads();
if (threadIdx.x == 0) {
nOutBins += blockDim.x;
nIter++;
}
#endif
__syncthreads();
}
__syncthreads();
if (threadIdx.x == 0) {
_nBins[iter * 1 + pert] =
(nOutBins > _maxOutBin) ? _maxOutBin : nOutBins;
// printf("nBins %d, nElements: %d \n ", nOutBins, nElements);
}
}
}
}
void PerturbationProTree::getBins(uint *_bins, uint *_nBins,
const float *_assignVal, const uint *_assignIdx, uint _N, uint _k1,
uint _k2, uint _maxBins) {
// uint nThreads = 64; // 32;
uint nThreads = 1024;
dim3 block(nThreads, 1, 1);
dim3 grid((_N < 1024) ? _N : 1024, 1, 1);
cudaMemset(_bins, 0, 1 * _N * _maxBins * sizeof(uint));
cudaMemset(_nBins, 0, 1 * _N * sizeof(uint));
// uint maxTrials = 20 * idiv(_maxBins, nThreads);
uint maxTrials = 5 * idiv(_maxBins, nThreads);
maxTrials = 16; // TODO
uint shm = (2 * d_p * _k1 * d_nClusters2 + 2 * d_p + 3 * nThreads + nThreads
+ 3 + 4 * nThreads) * sizeof(float);
uint c1scale = d_nClusters2;
std::cout << "getBins shmSize " << shm << std::endl;
// selectBinKernel<<<grid, block, shm>>>(_bins, _nBins, _assignVal, _assignIdx,
// d_binCounts, d_nClusters2, _N, d_p, d_vl, d_nClusters, d_nClusters2,
// _k1, _k2, maxTrials, _maxBins, c1scale, d_distSeq, d_numDistSeq,
// d_distCluster, d_nDBs, d_nBins);
shm = (2 * d_p * _k1 * d_nClusters2 + 2 * d_p + 3 * nThreads + 3)
* sizeof(float);
selectBinKernelFast2<<<grid, block, shm>>>(_bins, _nBins, _assignVal,
_assignIdx, d_binCounts, d_nClusters2, _N, d_p, d_vl, d_nClusters,
d_nClusters2, _k1, _k2, maxTrials, _maxBins, c1scale, d_distSeq,
d_numDistSeq, d_distCluster, d_nBins);
checkCudaErrors(cudaDeviceSynchronize());
}
void PerturbationProTree::getBIGBins(uint *_bins, uint *_nBins,
const float *_assignVal, const uint *_assignIdx, uint _N, uint _k1,
uint _k2, uint _maxBins) {
// uint nThreads = 64; // 32;
uint nThreads = 1024;
// uint nThreads = 512;
dim3 block(nThreads, 1, 1);
dim3 grid((_N < 1024) ? _N : 1024, 1, 1);
cudaMemset(_bins, 0, 1 * _N * _maxBins * sizeof(uint));
cudaMemset(_nBins, 0, 1 * _N * sizeof(uint));
// uint maxTrials = 20 * idiv(_maxBins, nThreads);
uint maxTrials = 5 * idiv(_maxBins, nThreads);
maxTrials = 16;
maxTrials = 64;
maxTrials = 256;
uint shm = (2 * d_p * _k1 * d_nClusters2 + 2 * d_p + 3 * nThreads + nThreads
+ 3 + 4 * nThreads) * sizeof(float);
uint c1scale = d_nClusters2;
std::cout << "getBins shmSize " << shm << std::endl;
selectBinKernel<<<grid, block, shm>>>(_bins, _nBins, _assignVal, _assignIdx,
d_binCounts, d_nClusters2, _N, d_p, d_vl, d_nClusters, d_nClusters2,
_k1, _k2, maxTrials, _maxBins, c1scale, d_distSeq, d_numDistSeq,
d_distCluster, d_nBins);
// shm = (2 * d_p * _k1 * d_nClusters2 + 2 * d_p + 3 * nThreads + 3)
// * sizeof(float);
// selectBIGBinKernelFast2<<<grid, block, shm>>>(_bins, _nBins, _assignVal,
// _assignIdx, d_binCounts, d_nClusters2, _N, d_p, d_vl, d_nClusters,
// d_nClusters2, _k1, _k2, maxTrials, _maxBins, c1scale, d_distSeq,
// d_numDistSeq, d_distCluster, d_nDBs, d_nBins);
checkCudaErrors(cudaDeviceSynchronize());
// outputVecUint("origBin: ", _bins, 200);
countZeros("origBins: ", _bins, _N * _maxBins);
}
void PerturbationProTree::getBIGBinsSorted(uint *_bins, uint *_nBins,
const float *_assignVal, const uint *_assignIdx, uint _N, uint _k1,
uint _k2, uint _maxBins) {
// uint nThreads = 64; // 32;
uint nThreads = 1024;
// uint nThreads = 512;
dim3 block(nThreads, 1, 1);
dim3 grid((_N < 1024) ? _N : 1024, 1, 1);
float* dists;
cudaMalloc(&dists, 1 * _N * _maxBins * sizeof(uint));
cudaMemset(_bins, 0, 1 * _N * _maxBins * sizeof(uint));
cudaMemset(_nBins, 0, 1 * _N * sizeof(uint));
// uint maxTrials = 20 * idiv(_maxBins, nThreads);
uint maxTrials = 5 * idiv(_maxBins, nThreads);
maxTrials = 16;
maxTrials = 64;
maxTrials = 2560;
uint kMax = 32;
uint shm = (2 * d_p * kMax + 2 * d_p + 3 * nThreads + nThreads + 3
+ 4 * nThreads) * sizeof(float);
uint c1scale = d_nClusters2;
std::cout << "getBins shmSize " << shm << std::endl;
selectBinKernelUnsorted<<<grid, block, shm>>>(_bins, dists, _nBins,
_assignVal, _assignIdx, d_binCounts, d_nClusters2, _N, d_p, d_vl,
d_nClusters, d_nClusters2, _k1, kMax, _k2, maxTrials, _maxBins,
c1scale, d_distSeq, d_numDistSeq, d_distCluster, d_nBins);
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "now sorting " << std::endl;
block = dim3(1024, 1, 1);
shm = (2 * 4096) * sizeof(float);
binSortKernel<<<grid, block, shm>>>(_bins, dists, _nBins, _maxBins, _N);
checkCudaErrors(cudaDeviceSynchronize());
outputVec("origDist: ", dists, 20);
outputVecUint("origBin: ", _bins, 200);
countZeros("origBins: ", _bins, _N * _maxBins);
cudaFree(dists);
}
void PerturbationProTree::getBIGBins2D(uint *_bins, uint *_nBins,
const float *_assignVal, const uint *_assignIdx, uint _N, uint _k1,
uint _k2, uint _maxBins) {
// uint nThreads = 64; // 32;
uint nThreads = 1024;
// uint nThreads = 512;
dim3 block(nThreads, 1, 1);
dim3 grid((_N < 1024) ? _N : 1024, 1, 1);
float* dists;
cudaMalloc(&dists, 1 * _N * _maxBins * sizeof(uint));
cudaMemset(_bins, 0, 1 * _N * _maxBins * sizeof(uint));
cudaMemset(_nBins, 0, 1 * _N * sizeof(uint));
// uint maxTrials = 20 * idiv(_maxBins, nThreads);
uint maxTrials = 5 * idiv(_maxBins, nThreads);
maxTrials = 16;
maxTrials = 64;
maxTrials = 2560;
uint kMax = 64;
uint shm = (4 * kMax + 2 * nThreads + 2) * sizeof(float);
// uint c1scale = d_nClusters2;
uint nIntermediateBin = 256;
// uint nIntermediateBin = 1024;
std::cout << "getBins2D shmSize " << shm << std::endl;
selectBinKernel2D2Parts<<<grid, block, shm>>>(_bins, dists, _maxBins,
nIntermediateBin, _assignVal, _assignIdx, d_nClusters2, _N, d_p,
d_nClusters, d_nClusters2, _k1, kMax, d_distSeq, d_distCluster);
checkCudaErrors(cudaDeviceSynchronize());
// outputVecUint("origBin: ", _bins, 20);
// outputVec("dists2D ", dists, nIntermediateBin);
// outputVec("dists2D -1 ", dists + nIntermediateBin, nIntermediateBin);
nThreads = 1024;
block = dim3(nThreads, 1, 1);
shm = (4 * nIntermediateBin + 3 * nThreads + 5) * sizeof(float);
std::cout << "selectBinKernel2DFinal shm: " << shm << std::endl;
// _k2 = 4096;
std::cout << "k2: " << _k2 << " maxBins: " << _maxBins << std::endl;
selectBinKernel2DFinal<<<grid, block, shm>>>(_bins, dists, _nBins,
d_binCounts, nIntermediateBin, _N, d_nClusters, d_nClusters2,
maxTrials, _k2, _maxBins, d_distSeq, d_distCluster,
d_nBins);
checkCudaErrors(cudaDeviceSynchronize());
outputVecUint("finalBin: ", _bins, 20);
// outputVec("dists2D - final", dists, (_maxBins > 500) ? 500 : _maxBins);
// outputVec("dists2D - final", dists, 2000);
// outputVecUint("origBin: ", _bins, 200);
countZeros("origBins: ", _bins, _N * _maxBins);
cudaFree(dists);
}
__global__ void getKVectorIDsKernel(uint* _bestIdx, uint* _nVec,
const uint* _dbIdx, const uint* _binPrefix, const uint* _binCounts,
uint _nDBBins, const float* _Q, const uint* _assignedBins,
const uint* _assignedNBins, uint _QN, uint _dim, uint _maxBins, uint _k,
uint _maxVecConsider, uint _maxVecOut,
uint _maxNVecPerBin) {
extern __shared__ float shm[];
float* shmIter = shm;
uint* nBins = (uint*) shmIter;
shmIter += 1;
uint ¤tBin(*(uint*) shmIter);
shmIter++;
uint &nVec(*(uint*) shmIter);
shmIter++;
uint* idx = (uint*) shmIter;
shmIter += _maxVecConsider;
uint* val = (uint*) shmIter;
shmIter += _maxVecConsider;
uint* hash = val;
// in shm;
uint count;
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _QN; iter += gridDim.x) {
__syncthreads();
if (threadIdx.x < 1) {
nBins[threadIdx.x] = _assignedNBins[iter * 1
+ threadIdx.x];
nBins[threadIdx.x] =
(nBins[threadIdx.x] < _maxBins) ?
nBins[threadIdx.x] : _maxBins;
}
val[threadIdx.x] = _maxVecConsider;
__syncthreads();
// loop over the best assigned bins
// do round robin over the different permutations
uint _maxVecConsiderPerPert = _maxVecConsider / 1;
uint offs = 0;
for (int pert = 0; pert < 1; pert++) {
// for (int pert = 1; pert < 2; pert++) {
// for (int pert = 0; pert < 1; pert++) {
count = 0;
for (int bin = 0;
// (bin < _maxBins) && (count < _maxVecConsiderPerPert);
(bin < nBins[pert]) && (count < _maxVecConsiderPerPert);
bin++) {
//
// if (count >= _maxVecConsiderPerPert)
// break;
// if (bin >= nBins[pert])
// continue;
if (threadIdx.x == 0) {
currentBin = _assignedBins[(iter * 1 + pert)
* _maxBins + bin];
nVec = _binCounts[currentBin];
nVec = (nVec > _maxNVecPerBin) ? _maxNVecPerBin : nVec;
if ((count + nVec) >= _maxVecConsiderPerPert)
nVec = _maxVecConsiderPerPert - count - 1;
}
__syncthreads();
// fetch all the vector indices for the selected bin
for (uint v = threadIdx.x; v < nVec; v += blockDim.x) {
idx[offs + count + v] = _dbIdx[_binPrefix[currentBin] + v];
val[offs + count + v] = (count + v) * 1
+ pert;
}
count += nVec;
__syncthreads();
}
offs += count;
}
count = offs;
__syncthreads();
// if (threadIdx.x == 0) {
// for (int i = 0; i < _maxVecConsider; i++) {
// printf("prev: %d %d %d \n", i, idx[i], val[i]);
// }
// }
// resort the array
uint myIdx = idx[threadIdx.x];
uint myVal = val[threadIdx.x];
__syncthreads();
val[threadIdx.x] = 0;
__syncthreads();
// write in correct order
if (myVal < _maxVecConsider) {
idx[myVal] = myIdx;
val[myVal] = 2;
}
__syncthreads();
// if (threadIdx.x == 0) {
// for (int i = 0; i < _maxVecConsider; i++) {
// printf( "num: %d %d %d \n", i, idx[i], val[i]);
// }
// }
myIdx = idx[threadIdx.x];
myVal = val[threadIdx.x];
uint maxHash = 2048;
uint myHash = myIdx % maxHash;
// remove duplicates using hash
do {
__syncthreads();
currentBin = 0;
// everyone tries to write to the hash at the same time
if (myVal == 2) {
hash[myHash] = threadIdx.x;
}
// make sure the smallest index is placed in the hash map
for (int j = 0; j < 1 - 1; j++) {
__syncthreads();
if (myVal == 2) {
if (idx[hash[myHash]] == myIdx) {
if (hash[myHash] > threadIdx.x)
hash[myHash] = threadIdx.x;
}
}
__syncthreads();
}
if (myVal == 2) {
// if threadIdx.x was the smallest, keep the value
if (hash[myHash] == threadIdx.x) {
myVal = 1;
} else if ((hash[myHash] < threadIdx.x)
&& (idx[hash[myHash]] == myIdx))
myVal = 0; // otherwise it's a duplicate
}
// check if there is still something to be done
uint vote = __any(myVal == 2);
if (vote && (threadIdx.x % 32 == 0))
currentBin = 1;
__syncthreads();
} while (currentBin);
// if (myVal == 2) {
// printf( "not yet done %d \n", threadIdx.x);
// }
__syncthreads();
val[threadIdx.x] = myVal;
// __syncthreads();
//
// if (threadIdx.x == 0) {
// for (int i = 0; i < _maxVecConsider; i++) {
// printf("after: %d %d %d \n", i, idx[i], val[i]);
// }
//
// }
__syncthreads();
scan_block2(val, false);
if ((myVal == 1) && (val[threadIdx.x] < _maxVecOut)) {
_bestIdx[iter * _maxVecOut + val[threadIdx.x]] = myIdx;
}
if (threadIdx.x == 0) {
count = (val[blockDim.x - 1] < _maxVecOut) ?
val[blockDim.x - 1] : _maxVecOut;
_nVec[iter] = count;
}
#if 0
// if ((threadIdx.x == 0) && (blockIdx.x == 0)) {
// for (int i = 0; i < count; i++)
// printf("presort: %d %d \n", i, idx[i]);
// }
// sort the results
if ((threadIdx.x >= count) && (threadIdx.x < _maxVecConsider))
val[threadIdx.x] = 999999999.;
__syncthreads();
// sort all vectors by vector ID
bitonic3(val, idx, _maxVecConsider);
// each vector should only appear at maximum _nPerturbation times
// count occurences
if (threadIdx.x < count) {
val[threadIdx.x] = 1.;
} else if (threadIdx.x < _maxVecConsider)
val[threadIdx.x] = 0.;
for (int db = 1; db < 1; db++) {
if ((threadIdx.x + db) < count) {
if (idx[threadIdx.x] == idx[threadIdx.x + db]) {
val[threadIdx.x]++;
}
}
}
__syncthreads();
// make sure to consider only the first occurence of each vectorID
// (remove duplicates)
if ((threadIdx.x + 1) < count) {
if (idx[threadIdx.x] == idx[threadIdx.x + 1]) {
val[threadIdx.x + 1] = 0.;
}
}
// sort all vectorIDs descending by occurence
bitonic3Descending(val, idx, _maxVecConsider);
if (threadIdx.x == 0) {
count = (count < _maxVecOut) ? count : _maxVecOut;
}
if ((threadIdx.x == 0) && (blockIdx.x == 0)) {
for (int i = 0; i < count; i++)
printf("i: %d %d %d \n", i, idx[i], val[i]);
}
__syncthreads();
if (threadIdx.x < count) {
if (val[threadIdx.x] > 0.) {
_bestIdx[iter * _maxVecOut + threadIdx.x] = idx[threadIdx.x];
val[threadIdx.x] = 1.;
}
} else if (threadIdx.x < _maxVecConsider)
val[threadIdx.x] = 0.;
// count number of valid vectors (reduction)
for (uint stride = _maxVecConsider >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (threadIdx.x < stride)
val[threadIdx.x] += val[threadIdx.x + stride];
}
__syncthreads();
if (threadIdx.x == 0) {
_nVec[iter] = (uint) val[0];
}
#endif
}
}
__global__ void getKVectorIDsKernelLarge(uint* _bestIdx, uint* _nVec,
const uint* _dbIdx, const uint* _binPrefix, const uint* _binCounts,
uint _nDBBins, const float* _Q, const uint* _assignedBins,
const uint* _assignedNBins, uint _QN, uint _dim, uint _maxBins, uint _k,
uint _maxVecConsider, uint _maxVecOut,
uint _maxNVecPerBin) {
extern __shared__ float shm[];
float* shmIter = shm;
uint* nBins = (uint*) shmIter;
shmIter += 1;
uint ¤tBin(*(uint*) shmIter);
shmIter++;
uint &nVec(*(uint*) shmIter);
shmIter++;
uint* idx = (uint*) shmIter;
shmIter += _maxVecConsider;
uint* val = (uint*) shmIter;
shmIter += _maxVecConsider;
uint* hash = val;
// in shm;
uint count;
uint nIter = _k / blockDim.x;
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _QN; iter += gridDim.x) {
__syncthreads();
if (threadIdx.x < 1) {
nBins[threadIdx.x] = _assignedNBins[iter * 1
+ threadIdx.x];
nBins[threadIdx.x] =
(nBins[threadIdx.x] < _maxBins) ?
nBins[threadIdx.x] : _maxBins;
}
for (int i = threadIdx.x; i < _maxVecConsider; i += blockDim.x)
val[i] = _maxVecConsider;
__syncthreads();
// loop over the best assigned bins
// do round robin over the different permutations
uint _maxVecConsiderPerPert = _maxVecConsider / 1;
uint offs = 0;
for (int pert = 0; pert < 1; pert++) {
count = 0;
for (int bin = 0;
(bin < nBins[pert]) && (count < _maxVecConsiderPerPert);
bin++) {
if (threadIdx.x == 0) {
currentBin = _assignedBins[(iter * 1 + pert)
* _maxBins + bin];
nVec = _binCounts[currentBin];
nVec = (nVec > _maxNVecPerBin) ? _maxNVecPerBin : nVec;
if ((count + nVec) >= _maxVecConsiderPerPert)
nVec = _maxVecConsiderPerPert - count - 1;
}
__syncthreads();
// fetch all the vector indices for the selected bin
for (uint v = threadIdx.x; v < nVec; v += blockDim.x) {
idx[offs + count + v] = _dbIdx[_binPrefix[currentBin] + v];
val[offs + count + v] = (count + v) * 1
+ pert;
}
count += nVec;
__syncthreads();
}
offs += count;
}
count = offs;
__syncthreads();
// resort the array
uint myIdx[4];
uint myVal[4];
for (int i = 0; i < nIter; i++) {
myIdx[i] = idx[threadIdx.x + i * blockDim.x];
myVal[i] = val[threadIdx.x + i * blockDim.x];
val[threadIdx.x + i * blockDim.x] = 0;
}
__syncthreads();
// write in correct order
for (int i = 0; i < nIter; i++) {
if (myVal[i] < _maxVecConsider) {
idx[myVal[i]] = myIdx[i];
val[myVal[i]] = 2;
}
}
__syncthreads();
// if (threadIdx.x == 0) {
// for (int i = 0; i < _maxVecConsider; i++) {
// printf("num: %d %d %d \n", i, idx[i], val[i]);
// }
// }
for (int i = 0; i < nIter; i++) {
myIdx[i] = idx[threadIdx.x + i * blockDim.x];
myVal[i] = val[threadIdx.x + i * blockDim.x];
}
uint maxHash = 2048;
uint myHash[4];
for (int i = 0; i < nIter; i++) {
myHash[i] = myIdx[i] % maxHash;
}
#if 1
// remove duplicates using hash
do {
__syncthreads();
currentBin = 0;
// everyone tries to write to the hash at the same time
for (int i = 0; i < nIter; i++) {
if (myVal[i] == 2) {
hash[myHash[i]] = threadIdx.x + i * blockDim.x;
}
}
// make sure the smallest index is placed in the hash map
for (int j = 0; j < 1 - 1; j++) {
__syncthreads();
for (int i = 0; i < nIter; i++) {
if (myVal[i] == 2) {
if (idx[hash[myHash[i]]] == myIdx[i]) {
if (hash[myHash[i]]
> (threadIdx.x + i * blockDim.x))
hash[myHash[i]] =
(threadIdx.x + i * blockDim.x);
}
}
}
__syncthreads();
}
for (int i = 0; i < nIter; i++) {
if (myVal[i] == 2) {
// if threadIdx.x was the smallest, keep the value
if (hash[myHash[i]] == (threadIdx.x + i * blockDim.x)) {
myVal[i] = 1;
} else if ((hash[myHash[i]] < (threadIdx.x + i * blockDim.x))
&& (idx[hash[myHash[i]]] == myIdx[i]))
myVal[i] = 0; // otherwise it's a duplicate
}
}
// check if there is still something to be done
uint vote = __any(myVal[0] == 2);
for (int i = 1; i < nIter; i++) {
vote = vote || __any(myVal[i] == 2);
}
if (vote && (threadIdx.x % 32 == 0))
currentBin = 1;
__syncthreads();
} while (currentBin);
#endif
// if (myVal == 2) {
// printf( "not yet done %d \n", threadIdx.x);
// }
__syncthreads();
for (int i = 0; i < nIter; i++)
val[threadIdx.x + i * blockDim.x] = myVal[i];
__syncthreads();
scan_blockLarge(val, false, _maxVecConsider);
// if (threadIdx.x == 0) {
// for (int i = 0; i < _maxVecConsider; i++) {
// printf("after: %d %d %d \n", i, idx[i], val[i]);
// }
//
// }
//
// __syncthreads();
for (int i = 0; i < nIter; i++)
if ((myVal[i] == 1)
&& (val[threadIdx.x + i * blockDim.x] < _maxVecOut)) {
_bestIdx[iter * _maxVecOut + val[threadIdx.x + i * blockDim.x]] =
myIdx[i];
}
if (threadIdx.x == 0) {
count = (val[_maxVecConsider - 1] < _maxVecOut) ?
val[_maxVecConsider - 1] : _maxVecOut;
_nVec[iter] = count;
}
}
}
#if 1
__global__ void getKVectorIDsKernelFast(uint* _bestIdx, uint* _nVec,
const uint* _dbIdx, const uint* _binPrefix, const uint* _binCounts,
uint _nDBBins, const float* _Q, const uint* _assignedBins,
const uint* _assignedNBins, uint _QN, uint _dim, uint _maxBins, uint _k,
uint _maxVecConsider, uint _maxVec,
uint _maxNVecPerBin) {
extern __shared__ float shm[];
float* shmIter = shm;
uint *currentBin = (uint*) shmIter;
shmIter += blockDim.x;
uint *nVec = (uint*) shmIter;
shmIter += blockDim.x;
uint *pos = (uint*) shmIter;
shmIter += blockDim.x;
uint &nBins(*(uint*) shmIter);
shmIter++;
uint &binsLeft(*(uint*) shmIter);
shmIter++;
uint &binIter(*(uint*) shmIter);
shmIter++;
uint &offset(*(uint*) shmIter);
shmIter++;
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _QN; iter += gridDim.x) {
__syncthreads();
if (threadIdx.x == 0) {
nBins = _assignedNBins[iter];
nBins = (nBins < _maxBins) ? nBins : _maxBins;
binIter = nBins / blockDim.x + 1;
offset = 0;
}
__syncthreads();
for (uint bIter = 0; bIter < binIter; bIter++) {
uint bin = bIter * blockDim.x + threadIdx.x;
uint nV = 0;
if (bin < nBins) {
currentBin[threadIdx.x] = _assignedBins[iter * _maxBins + bin];
nV = (_binCounts[currentBin[threadIdx.x]] < _maxNVecPerBin) ?
_binCounts[currentBin[threadIdx.x]] : _maxNVecPerBin;
}
pos[threadIdx.x] = nV;
if (threadIdx.x == 0) {
binsLeft =
(nBins <= (blockDim.x + bin)) ?
(nBins - bin) : blockDim.x;
// if (blockIdx.x == 2) {
// printf("currentBin: %d counts: %d limited: %d \n", currentBin[threadIdx.x], _binCounts[currentBin[threadIdx.x]], nV );
// }
// printf("maxVec: %d offset: %d pos: %d \n", _maxVec, offset,
// pos[0]);
}
__syncthreads();
scan_block2(pos, false);
pos[threadIdx.x] += offset;
if ((pos[threadIdx.x] + nV) > _maxVec) {
nV = (pos[threadIdx.x] >= _maxVec) ?
0 : (_maxVec - pos[threadIdx.x]);
}
nVec[threadIdx.x] = nV;
__syncthreads();
// if (threadIdx.x == 0) {
// for (int i = 0; i < ((nBins < blockDim.x) ? nBins : blockDim.x); i++)
// printf(" %5d ", nVec[i]);
// printf("\n");
// }
if (bin < nBins)
for (int v = 0; v < nVec[threadIdx.x]; v++) {
_bestIdx[iter * _maxVec + pos[threadIdx.x] + v] =
_dbIdx[_binPrefix[currentBin[threadIdx.x]] + v];
}
__syncthreads();
if (threadIdx.x == 0) {
offset = pos[blockDim.x - 1] + nVec[blockDim.x - 1];
}
}
if (threadIdx.x == 0) {
_nVec[iter] = (offset > _maxVec) ? _maxVec : offset;
}
}
}
#else
__global__ void getKVectorIDsKernelFast(uint* _bestIdx, uint* _nVec,
const uint* _dbIdx, const uint* _binPrefix, const uint* _binCounts,
uint _nDBBins, const float* _Q, const uint* _assignedBins,
const uint* _assignedNBins, uint _QN, uint _dim, uint _maxBins, uint _k,
uint _maxVecConsider, uint _maxVec, uint 1,
uint _maxNVecPerBin) {
extern __shared__ float shm[];
const uint laneSize = 32;
float* shmIter = shm;
uint *currentBin = (uint*) shmIter;
shmIter += blockDim.x;
uint *nVec = (uint*) shmIter;
shmIter += blockDim.x;
uint *pos = (uint*) shmIter;
shmIter += blockDim.x;
volatile uint *binProc = (uint*) shmIter;
shmIter += blockDim.x / laneSize;
uint &nBins(*(uint*) shmIter);
shmIter++;
uint &binsLeft(*(uint*) shmIter);
shmIter++;
uint &binIter(*(uint*) shmIter);
shmIter++;
uint &offset(*(uint*) shmIter);
shmIter++;
uint* nProcessed = (uint*) shmIter;
shmIter++;
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _QN; iter += gridDim.x) {
__syncthreads();
if (threadIdx.x == 0) {
nBins = _assignedNBins[iter];
nBins = (nBins < _maxBins) ? nBins : _maxBins;
binIter = nBins / blockDim.x + 1;
offset = 0;
}
__syncthreads();
uint laneIdx = threadIdx.x % laneSize;
uint lane = threadIdx.x / laneSize;
for (uint bIter = 0; bIter < binIter; bIter++) {
uint bin = bIter * blockDim.x + threadIdx.x;
uint nV = 0;
if (bin < nBins) {
currentBin[threadIdx.x] = _assignedBins[iter * _maxBins + bin];
nV = (_binCounts[currentBin[threadIdx.x]] < _maxNVecPerBin) ? _binCounts[currentBin[threadIdx.x]] : _maxNVecPerBin;
}
pos[threadIdx.x] = nV;
if (threadIdx.x == 0) {
nProcessed = 0;
binsLeft =
(nBins <= (blockDim.x + bin)) ?
(nBins - bin) : blockDim.x;
}
__syncthreads();
scan_block2(pos, false);
pos[threadIdx.x] += offset;
if ((pos[threadIdx.x] + nV) > _maxVec) {
nV = (pos[threadIdx.x] >= _maxVec) ?
0 : (_maxVec - pos[threadIdx.x]);
}
nVec[threadIdx.x] = nV;
__syncthreads();
#if 1
if (bin < nBins)
for (int v = 0; v < nVec[threadIdx.x]; v++) {
_bestIdx[iter * _maxVec + pos[threadIdx.x] + v] =
_dbIdx[_binPrefix[currentBin[threadIdx.x]] + v];
}
#else
// each lane is now responsible to copy the vectorIDs for one bin
do {
// fetch next bin to be processed
if (laneIdx == 0) {
binProc[lane] = atomicInc(nProcessed, 100000);
// if ((iter ==2 ) && (binProc[lane] < binsLeft)) {
// printf("%d %d %d \n", binProc[lane], currentBin[binProc[lane]], nVec[binProc[lane]]);
// }
}
if (binProc[lane] < binsLeft) {
for (int v = laneIdx; v < nVec[binProc[lane]]; v +=
laneSize)
_bestIdx[iter * _maxVec + pos[binProc[lane]] + v] =
_dbIdx[_binPrefix[currentBin[binProc[lane]]] + v];
}
}while (binProc[lane] < binsLeft);
#endif
__syncthreads();
if (threadIdx.x == 0) {
offset = pos[blockDim.x - 1] + nVec[blockDim.x - 1];
}
}
if (threadIdx.x == 0) {
_nVec[iter] = (offset > _maxVec) ? _maxVec : offset;
}
}
}
#endif
__global__ void getKBinVectorIDsKernelFast(uint* _bestIdx, uint* _nVec,
const float *_assignVal, const uint* _assignIdx, uint _p, uint _k1,
uint _nClusters1, uint _nClusters2, const uint* _dbIdx,
const uint* _binPrefix, const uint* _binCounts, uint _nDBBins,
const uint *_distSeq, uint _numDistSeq, uint _distCluster, uint _QN,
uint _maxBins, uint _maxVec, uint _maxNVecPerBin) {
extern __shared__ float shm[];
float* shmIter = shm;
// float* val = shmIter;
// shmIter += _p * _k1 * _Arows;
uint* idx = (uint*) shmIter;
shmIter += _p * _k1 * _nClusters2;
uint* numbers = (uint*) shmIter;
shmIter += _p;
uint* denom = (uint*) shmIter;
shmIter += _p;
uint *nVec = (uint*) shmIter;
shmIter += blockDim.x;
uint *pos = (uint*) shmIter;
shmIter += blockDim.x;
// uint &nBins(*(uint*) shmIter);
// shmIter++;
// uint &binsLeft(*(uint*) shmIter);
// shmIter++;
// uint &binIter(*(uint*) shmIter);
// shmIter++;
uint &offset(*(uint*) shmIter);
shmIter++;
if (threadIdx.x < _p) {
numbers[threadIdx.x] = _distCluster;
}
__syncthreads();
if (threadIdx.x == 0) {
denom[0] = 1;
for (int i = 1; i < _p; i++) {
denom[i] = denom[i - 1] * numbers[i - 1];
}
}
__syncthreads();
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _QN; iter += gridDim.x) {
__syncthreads();
for (int p = 0; p < _p; p++) {
// load the best indices
if (threadIdx.x < _k1 * _nClusters2) {
// val[threadIdx.x + p * _k1 * _Arows] = _assignVal[(iter) * _k1
// * _p * _Arows + p * _k1 * _Arows + threadIdx.x];
idx[threadIdx.x + p * _k1 * _nClusters2] = _assignIdx[(iter)
* _k1 * _p * _nClusters2 + p * _k1 * _nClusters2
+ threadIdx.x];
}
}
if (threadIdx.x == 0) {
offset = 0;
}
__syncthreads();
for (uint bIter = 0; bIter < _maxBins; bIter += blockDim.x) {
if (offset > _maxVec)
break;
__syncthreads();
uint bin = bIter + threadIdx.x;
uint nV = 0;
uint currentBin;
// if (bin < nBins)
{
// float ddd = 0.;
uint oIdx = 0;
for (int p = 0; p < _p; p++) {
uint bp = (_distSeq[bin] / denom[p]) % numbers[p];
// ddd += val[bp + p * _k1 * _Arows];
oIdx = (oIdx * _nClusters1 * _nClusters2)
+ idx[bp + p * _k1 * _nClusters2];
}
// dist[threadIdx.x] = ddd;
currentBin = oIdx % HASH_SIZE;
nV = (_binCounts[currentBin] < _maxNVecPerBin) ?
_binCounts[currentBin] : _maxNVecPerBin;
// if (iter == 0)
// printf( "%d %d %d %d \n", bIter, offset, nV, oIdx);
}
pos[threadIdx.x] = nV;
__syncthreads();
scan_block2(pos, false);
pos[threadIdx.x] += offset;
if ((pos[threadIdx.x] + nV) > _maxVec) {
nV = (pos[threadIdx.x] >= _maxVec) ?
0 : (_maxVec - pos[threadIdx.x]);
}
nVec[threadIdx.x] = nV;
// if (bin < nBins)
for (int v = 0; v < nVec[threadIdx.x]; v++) {
_bestIdx[iter * _maxVec + pos[threadIdx.x] + v] =
_dbIdx[_binPrefix[currentBin] + v];
}
__syncthreads();
if (threadIdx.x == 0) {
offset = pos[blockDim.x - 1] + nVec[blockDim.x - 1];
}
}
__syncthreads();
if (threadIdx.x == 0) {
_nVec[iter] = (offset > _maxVec) ? _maxVec : offset;
}
}
}
__global__ void getPerturbationKBestVectorsKernel(float *_bestDist,
uint* _bestIdx, const uint* _inIdx, const uint* _nVec, uint _maxVecIn,
const float* _dbVec, const float* _Q, uint _QN, uint _dim,
uint _maxBins, uint _k, uint _maxVec) {
extern __shared__ float shm[];
float* shmIter = shm + _dim;
float* val = shmIter;
shmIter += _maxVec;
uint* idx = (uint*) shmIter;
shmIter += _maxVec;
// in shm;
uint &nVec(*(uint*) shmIter);
shmIter++;
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _QN; iter += gridDim.x) {
__syncthreads();
if (threadIdx.x == 0) {
nVec = _nVec[iter];
nVec = (nVec < _maxVec) ? nVec : _maxVec;
if (iter % 1000 == 0) {
printf("nVec: %d \n", nVec);
}
}
__syncthreads();
// load query vector
float b;
if (threadIdx.x < _dim)
b = _Q[iter * _dim + threadIdx.x];
// load all indices
for (int a = threadIdx.x; a < nVec; a += blockDim.x) {
idx[a] = _inIdx[iter * _maxVecIn + a];
if (idx[a] >= 1000000) {
printf("panic: %d %d %d %d \n ", idx[a], iter, a, nVec);
}
}
__syncthreads();
// if (threadIdx.x == 0) {
// for (int a = 0; a < nVec; a++) {
// printf("idx: %d %d \n", a, idx[a]);
// }
// }
// loop over all selected vectors
for (int a = 0; a < nVec; a++) {
// compute the distance to the vector
// if (threadIdx.x < _dim) {
// uint loc = idx[a] * _dim + threadIdx.x;
// float v = _dbVec[loc];
//
//// if ((blockIdx.x == 90) && (a == 110))
//// printf( "got: %d %d %f \n", loc, idx[a], v);
//
// shm[threadIdx.x] = sqr( b - v );
// }
if (threadIdx.x < _dim) {
// if (idx[a] < 1000000)
shm[threadIdx.x] = sqr(b - _dbVec[idx[a] * _dim + threadIdx.x]);
}
for (uint stride = _dim >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (threadIdx.x < stride)
shm[threadIdx.x] += shm[threadIdx.x + stride];
}
__syncthreads();
// store the result
if (threadIdx.x == 0) {
val[a] = shm[0];
// printf("idx: %d dist: %f \n", idx[a], val[a]);
}
__syncthreads();
}
// sort the results
if ((threadIdx.x >= nVec) && (threadIdx.x < _maxVec))
val[threadIdx.x] = 10000000.;
__syncthreads();
bitonic3(val, idx, _maxVec);
if ((threadIdx.x >= nVec) && (threadIdx.x < _maxVec))
val[threadIdx.x] = 0.;
if (threadIdx.x < _k) {
_bestDist[iter * _k + threadIdx.x] = val[threadIdx.x];
_bestIdx[iter * _k + threadIdx.x] = idx[threadIdx.x];
}
__syncthreads();
}
}
__global__ void getPerturbationKBestVectorsKernelLarge(float *_bestDist,
uint* _bestIdx, const uint* _inIdx, const uint* _nVec, uint _maxVecIn,
const float* _dbVec, const float* _Q, uint _QN, uint _dim,
uint _maxBins, uint _k, uint _maxVec) {
extern __shared__ float shm[];
float* shmIter = shm + _dim;
float* val = shmIter;
shmIter += _maxVec;
uint* idx = (uint*) shmIter;
shmIter += _maxVec;
// in shm;
uint &nVec(*(uint*) shmIter);
shmIter++;
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _QN; iter += gridDim.x) {
__syncthreads();
if (threadIdx.x == 0) {
nVec = _nVec[iter];
nVec = (nVec < _maxVec) ? nVec : _maxVec;
if (iter % 1000 == 0) {
printf("nVec: %d \n", nVec);
}
}
__syncthreads();
// load query vector
float b;
if (threadIdx.x < _dim)
b = _Q[iter * _dim + threadIdx.x];
// load all indices
for (int a = threadIdx.x; a < nVec; a += blockDim.x) {
idx[a] = _inIdx[iter * _maxVecIn + a];
if (idx[a] >= 1000000) {
printf("panic: %d %d %d %d \n ", idx[a], iter, a, nVec);
}
}
__syncthreads();
// if (threadIdx.x == 0) {
// for (int a = 0; a < nVec; a++) {
// printf("idx: %d %d \n", a, idx[a]);
// }
// }
// loop over all selected vectors
for (int a = 0; a < nVec; a++) {
// compute the distance to the vector
// if (threadIdx.x < _dim) {
// uint loc = idx[a] * _dim + threadIdx.x;
// float v = _dbVec[loc];
//
//// if ((blockIdx.x == 90) && (a == 110))
//// printf( "got: %d %d %f \n", loc, idx[a], v);
//
// shm[threadIdx.x] = sqr( b - v );
// }
if (threadIdx.x < _dim) {
// if (idx[a] < 1000000)
shm[threadIdx.x] = sqr(b - _dbVec[idx[a] * _dim + threadIdx.x]);
}
for (uint stride = _dim >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (threadIdx.x < stride)
shm[threadIdx.x] += shm[threadIdx.x + stride];
}
__syncthreads();
// store the result
if (threadIdx.x == 0) {
val[a] = shm[0];
// printf("idx: %d dist: %f \n", idx[a], val[a]);
}
__syncthreads();
}
// sort the results
for (int i = threadIdx.x; i < _maxVec; i += blockDim.x)
if (i >= nVec)
val[i] = 10000000.;
__syncthreads();
bitonicLarge(val, idx, _maxVec);
// for (int i = threadIdx.x; i < _maxVec; i += blockDim)
// if (threadIdx.x >= nVec)
// val[i] = 0.;
for (uint i = threadIdx.x; i < _k; i += blockDim.x)
if (i < _k) {
_bestDist[iter * _k + i] = val[i];
_bestIdx[iter * _k + i] = idx[i];
}
__syncthreads();
}
}
void PerturbationProTree::getKBestVectors(float *_bestDist, uint *_bestIdx,
const uint *_bins, const uint *_nBins, uint _maxBins, const float* _Q,
uint _QN, uint _k) {
uint nnn = log2(_k);
// nnn = 1024;
std::cout << "nnn: " << nnn << std::endl;
uint maxVecConsider = nnn;
uint maxVecOut = nnn;
uint nThreads = (maxVecConsider < 1024) ? maxVecConsider : 1024;
dim3 block(nThreads, 1, 1);
dim3 grid((_QN < 1024) ? _QN : 1024, 1, 1);
uint hash = 2048;
uint shmSize = (maxVecConsider
+ ((hash > maxVecConsider) ? hash : maxVecConsider) + 1 + 10)
* sizeof(float);
uint *selectIdx; // array for storing nn vector IDs (size maxVecOut * _QN)
uint *nVec;
cudaMalloc(&selectIdx, _QN * _k * sizeof(uint));
cudaMalloc(&nVec, _QN * sizeof(uint));
if ((!selectIdx) || (!nVec)) {
std::cout << "getKBestVectors: did not get memory !" << std::endl;
exit(1);
}
cudaMemset(selectIdx, 0, _QN * _k * sizeof(uint));
getKVectorIDsKernel<<<grid, block, shmSize>>>(selectIdx, nVec, d_dbIdx,
d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins, _QN, d_dim,
_maxBins, _k, maxVecConsider, maxVecOut, 80);
checkCudaErrors(cudaDeviceSynchronize());
// std::cout << "multi Vector IDs done" << std::endl;
//// _QN = 1;
//
// outputVecUint("selectIdx", selectIdx, 100);
// uint maxVec = 2 * log2(_k);
uint maxVec = log2(_k);
nThreads = (maxVec > d_dim) ? maxVec : d_dim;
nThreads = (nThreads < 1024) ? nThreads : 1024;
block = dim3(nThreads, 1, 1);
shmSize = (d_dim + 2 * maxVec + 10) * sizeof(float);
std::cout << "maxVec: " << maxVec << " shm: " << shmSize << std::endl;
getPerturbationKBestVectorsKernel<<<grid, block, shmSize>>>(_bestDist,
_bestIdx, selectIdx, nVec, maxVecOut, d_dbVec, _Q, _QN, d_dim,
_maxBins, _k, maxVec);
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "multiKBestVectors done " << std::endl;
cudaFree(nVec);
cudaFree(selectIdx);
}
void PerturbationProTree::getKBestVectorsLarge(float *_bestDist, uint *_bestIdx,
const uint *_bins, const uint *_nBins, uint _maxBins, const float* _Q,
uint _QN, uint _k) {
uint nnn = log2(_k);
std::cout << "large: nnn: " << nnn << " k: " << _k << std::endl;
uint maxVecConsider = nnn;
uint maxVecOut = nnn;
uint nThreads = (maxVecConsider < 1024) ? maxVecConsider : 1024;
dim3 block(nThreads, 1, 1);
dim3 grid((_QN < 1024) ? _QN : 1024, 1, 1);
uint hash = 2048;
uint shmSize = (maxVecConsider
+ ((hash > maxVecConsider) ? hash : maxVecConsider) + 1 + 10)
* sizeof(float);
uint *selectIdx; // array for storing nn vector IDs (size maxVecOut * _QN)
uint *nVec;
cudaMalloc(&selectIdx, _QN * _k * sizeof(uint));
cudaMalloc(&nVec, _QN * sizeof(uint));
if ((!selectIdx) || (!nVec)) {
std::cout << "getKBestVectors: did not get memory !" << std::endl;
exit(1);
}
cudaMemset(selectIdx, 0, _QN * _k * sizeof(uint));
// uint* binStart;
// cudaMalloc(&binStart, d_nDBs * sizeof(uint));
// cudaMemset(binStart, 0, d_nDBs * sizeof(uint));
getKVectorIDsKernelLarge<<<grid, block, shmSize>>>(selectIdx, nVec, d_dbIdx,
d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins, _QN, d_dim,
_maxBins, _k, maxVecConsider, maxVecOut, 80);
checkCudaErrors(cudaDeviceSynchronize());
// std::cout << "multi Vector IDs done" << std::endl;
//// _QN = 1;
//
// outputVecUint("selectIdx", selectIdx, 1751);
// uint maxVec = 2 * log2(_k);
uint maxVec = log2(_k);
nThreads = (maxVec > d_dim) ? maxVec : d_dim;
nThreads = (nThreads < 1024) ? nThreads : 1024;
block = dim3(nThreads, 1, 1);
shmSize = (d_dim + 2 * maxVec + 10) * sizeof(float);
std::cout << "maxVec: " << maxVec << " shm: " << shmSize << std::endl;
getPerturbationKBestVectorsKernelLarge<<<grid, block, shmSize>>>(_bestDist,
_bestIdx, selectIdx, nVec, maxVecOut, d_dbVec, _Q, _QN, d_dim,
_maxBins, _k, maxVec);
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "multiKBestVectors done " << std::endl;
cudaFree(nVec);
cudaFree(selectIdx);
}
__global__ void rerankKernel(float* _bestDist, uint* _bestIdx,
const uint* _inIdx, const uint* _nVec, uint _maxVecIn,
const float* _cbDist, uint _nClusters, uint _lineParts,
const float* _lineLambda, const uint* _lineP1, const uint* _lineP2,
const float* _queryL1, uint _QN, uint _dim, uint _maxBins, uint _k,
uint _maxVec) {
extern __shared__ float shm[];
float* shmIter = shm;
float* queryDist = shmIter;
shmIter += _lineParts * _nClusters;
uint* idx = (uint*) shmIter;
shmIter += _maxVec;
float* val = shmIter;
shmIter += _maxVec;
uint &nVec(*(uint*) shmIter);
shmIter++;
for (uint iter = blockIdx.x; iter < _QN; iter += gridDim.x) {
__syncthreads();
// load queryDistance
for (int p = 0; p < _lineParts; p++) {
if (threadIdx.x < _nClusters)
queryDist[p * _nClusters + threadIdx.x] =
_queryL1[iter * _lineParts * _nClusters + p * _nClusters
+ threadIdx.x];
//
// if (threadIdx.x == 0) {
// for (int c = 0; c < _nClusters; c++)
// printf("\t %f", queryDist[p * _nClusters + c]);
// printf("\n");
// }
}
if (threadIdx.x == 0) {
nVec = _nVec[iter];
nVec = (nVec < _maxVec) ? nVec : _maxVec;
}
__syncthreads();
// compute the distances to all line approximations
for (int a = threadIdx.x; a < nVec; a += blockDim.x) {
idx[a] = _inIdx[iter * _maxVecIn + a];
float totalDist = 0.;
for (uint p = 0; p < _lineParts; p++) {
uint l1 = _lineP1[idx[a] * _lineParts + p];
uint l2 = _lineP2[idx[a] * _lineParts + p];
float lambda = _lineLambda[idx[a] * +_lineParts + p];
float c2 = _cbDist[l2 * _nClusters * _lineParts
+ l1 * _lineParts + p];
float d = dist(queryDist[p * _nClusters + l1],
queryDist[p * _nClusters + l2], c2, lambda);
totalDist += d;
if (!isTriangle(queryDist[p * _nClusters + l1],
queryDist[p * _nClusters + l2], c2))
printf("non-triangle: l1/l2 %d %d === %f %f %f = %f %f \n",
l1, l2, queryDist[p * _nClusters + l1],
queryDist[p * _nClusters + l2], c2, d, lambda);
}
val[a] = totalDist;
}
// sort the results
for (int i = threadIdx.x; i < _maxVec; i += blockDim.x)
if (i >= nVec)
val[i] = 10000000.;
__syncthreads();
if (_maxVec <= blockDim.x)
bitonic3(val, idx, _maxVec);
else
bitonicLarge(val, idx, _maxVec);
for (uint i = threadIdx.x; i < _k; i += blockDim.x)
if (i < _k) {
_bestDist[iter * _k + i] = val[i];
_bestIdx[iter * _k + i] = idx[i];
}
__syncthreads();
}
}
__device__ void myAdd(volatile float* _a, volatile float* _b) {
*_a += *_b;
}
__inline__ __device__ float warpReduceSum(float _val, int _p) {
for (int stride = _p >> 1; stride > 0; stride >>= 1)
_val += __shfl_down(_val, stride);
return _val;
}
__global__ void rerankKernelFast(float* _bestDist, uint* _bestIdx,
const uint* _inIdx, const uint* _nVec, uint _maxVecIn,
const float* _cbDist, uint _nClusters, uint _lineParts,
const float* _lineLambda, const float* _queryL1, uint _QN, uint _dim,
uint _maxBins, uint _k, uint _maxVec) {
extern __shared__ float shm[];
float* shmIter = shm;
float* queryDist = shmIter;
shmIter += _lineParts * _nClusters;
uint* idx = (uint*) shmIter;
shmIter += _maxVec;
float* val = shmIter;
shmIter += _maxVec;
uint* laneA = (uint*) shmIter;
shmIter += blockDim.x / _lineParts;
// volatile float* d = shmIter;
// shmIter += blockDim.x;
uint &nVec(*(uint*) shmIter);
shmIter++;
uint* nProcessed = (uint*) shmIter;
shmIter++;
for (uint iter = blockIdx.x; iter < _QN; iter += gridDim.x) {
__syncthreads();
// load queryDistance
for (int p = 0; p < _lineParts; p++) {
if (threadIdx.x < _nClusters)
queryDist[p * _nClusters + threadIdx.x] =
_queryL1[iter * _lineParts * _nClusters + p * _nClusters
+ threadIdx.x];
}
if (threadIdx.x == 0) {
nVec = _nVec[iter];
nVec = (nVec < _maxVec) ? nVec : _maxVec;
*nProcessed = 0;
}
__syncthreads();
#if 0
// compute the distances to all line approximations
for (int a = threadIdx.x; a < nVec; a += blockDim.x) {
idx[a] = _inIdx[iter * _maxVecIn + a];
float totalDist = 0.;
for (uint p = 0; p < _lineParts; p++) {
float l = _lineLambda[idx[a] * +_lineParts + p];
lineDescr& line( *( (lineDescr*)&l));
uint l1 = line.p1;
uint l2 = line.p2;
float lambda = toFloat(line.lambda);
float c2 = _cbDist[l2 * _nClusters * _lineParts
+ l1 * _lineParts + p];
float d = dist(queryDist[p * _nClusters + l1],
queryDist[p * _nClusters + l2], c2, lambda);
totalDist += d;
//
// if (!isTriangle(queryDist[p * _nClusters + l1],
// queryDist[p * _nClusters + l2], c2))
// printf("non-triangle: l1/l2 %d %d === %f %f %f = %f %f \n",
// l1, l2, queryDist[p * _nClusters + l1],
// queryDist[p * _nClusters + l2], c2, d, lambda);
}
// if (iter == 0)
// printf("%d %f \n", a, totalDist);
val[a] = totalDist;
}
#else
// compute the distance in parallel
// p threads work on one proposed vectorID
uint p = threadIdx.x % _lineParts;
uint lane = threadIdx.x / _lineParts;
// fetch next a;
if (p == 0) {
laneA[lane] = atomicInc(nProcessed, 100000);
if (laneA[lane] < nVec)
idx[laneA[lane]] = _inIdx[iter * _maxVecIn + laneA[lane]];
}
// __syncthreads();
float ddd = 0.;
if (laneA[lane] < nVec)
do {
float l = _lineLambda[idx[laneA[lane]] * _lineParts + p];
lineDescr& line(*((lineDescr*) &l));
uint l1 = line.p1;
uint l2 = line.p2;
float lambda = toFloat(line.lambda);
float c2 = _cbDist[l2 * _nClusters * _lineParts
+ l1 * _lineParts + p];
ddd = dist(queryDist[p * _nClusters + l1],
queryDist[p * _nClusters + l2], c2, lambda);
ddd = warpReduceSum(ddd, _lineParts);
// store result
if (p == 0) {
val[laneA[lane]] = ddd;
//
// if (iter == 0)
// printf("%d %f \n", laneA[lane], ddd );
laneA[lane] = atomicInc(nProcessed, 100000);
if (laneA[lane] < nVec)
idx[laneA[lane]] =
_inIdx[iter * _maxVecIn + laneA[lane]];
}
} while (laneA[lane] < nVec);
#endif
// __syncthreads();
// sort the results
for (int i = threadIdx.x; i < _maxVec; i += blockDim.x)
if (i >= nVec)
val[i] = 10000000.;
__syncthreads();
if (_maxVec <= blockDim.x)
bitonic3(val, idx, _maxVec);
else
bitonicLarge(val, idx, _maxVec);
for (uint i = threadIdx.x; i < _k; i += blockDim.x)
if (i < _k) {
_bestDist[iter * _k + i] = val[i];
_bestIdx[iter * _k + i] = idx[i];
}
__syncthreads();
}
}
__global__ void rerankBIGKernelFast(float* _bestDist, uint* _bestIdx,
const uint* _inIdx, const uint* _nVec, uint _maxVecIn,
const float* _cbDist, uint _nClusters, uint _lineParts,
const float* _lineLambda, const float* _queryL1, uint _QN, uint _dim,
uint _maxBins, uint _k, uint _maxVec) {
extern __shared__ float shm[];
float* shmIter = shm;
float* queryDist = shmIter;
shmIter += _lineParts * _nClusters;
uint* idx = (uint*) shmIter;
shmIter += _maxVec;
float* val = shmIter;
shmIter += _maxVec;
uint* laneA = (uint*) shmIter;
shmIter += blockDim.x / _lineParts;
// volatile float* d = shmIter;
// shmIter += blockDim.x;
uint &nVec(*(uint*) shmIter);
shmIter++;
uint* nProcessed = (uint*) shmIter;
shmIter++;
for (uint iter = blockIdx.x; iter < _QN; iter += gridDim.x) {
__syncthreads();
// load queryDistance
for (int p = 0; p < _lineParts; p++) {
if (threadIdx.x < _nClusters)
queryDist[p * _nClusters + threadIdx.x] =
_queryL1[iter * _lineParts * _nClusters + p * _nClusters
+ threadIdx.x];
}
if (threadIdx.x == 0) {
nVec = _nVec[iter];
nVec = (nVec < _maxVec) ? nVec : _maxVec;
*nProcessed = 0;
}
__syncthreads();
#if 0
// compute the distances to all line approximations
for (int a = threadIdx.x; a < nVec; a += blockDim.x) {
idx[a] = _inIdx[iter * _maxVecIn + a];
float totalDist = 0.;
for (uint p = 0; p < _lineParts; p++) {
float l = _lineLambda[idx[a] * +_lineParts + p];
lineDescr& line( *( (lineDescr*)&l));
uint l1 = line.p1;
uint l2 = line.p2;
float lambda = toFloat(line.lambda);
float c2 = _cbDist[l2 * _nClusters * _lineParts
+ l1 * _lineParts + p];
float d = dist(queryDist[p * _nClusters + l1],
queryDist[p * _nClusters + l2], c2, lambda);
totalDist += d;
//
// if (!isTriangle(queryDist[p * _nClusters + l1],
// queryDist[p * _nClusters + l2], c2))
// printf("non-triangle: l1/l2 %d %d === %f %f %f = %f %f \n",
// l1, l2, queryDist[p * _nClusters + l1],
// queryDist[p * _nClusters + l2], c2, d, lambda);
}
// if (iter == 0)
// printf("%d %f \n", a, totalDist);
val[a] = totalDist;
}
#else
// compute the distance in parallel
// p threads work on one proposed vectorID
uint p = threadIdx.x % _lineParts;
uint lane = threadIdx.x / _lineParts;
// fetch next a;
if (p == 0) {
laneA[lane] = atomicInc(nProcessed, 100000);
if (laneA[lane] < nVec)
idx[laneA[lane]] = _inIdx[iter * _maxVecIn + laneA[lane]];
}
// __syncthreads();
#if 1
float ddd = 0.;
if (laneA[lane] < nVec)
do {
size_t offset = idx[laneA[lane]];
offset *= _lineParts;
offset += p;
// float l = _lineLambda[idx[laneA[lane]] * _lineParts + p];
float l = _lineLambda[offset];
lineDescr& line(*((lineDescr*) &l));
uint l1 = line.p1;
uint l2 = line.p2;
float lambda = toFloat(line.lambda);
// if (threadIdx.x == 0) {
// if ((p ==0) && (iter == 0)) {
// printf("lam%f l1%d l2%d \n", lambda, l1, l2);
// }
float c2 = _cbDist[l2 * _nClusters * _lineParts
+ l1 * _lineParts + p];
ddd = dist(queryDist[p * _nClusters + l1],
queryDist[p * _nClusters + l2], c2, lambda);
ddd = warpReduceSum(ddd, _lineParts);
// store result
if (p == 0) {
val[laneA[lane]] = ddd;
// if (iter == 0)
// printf("%d %f \n", laneA[lane], ddd );
laneA[lane] = atomicInc(nProcessed, 100000);
if (laneA[lane] < nVec)
idx[laneA[lane]] =
_inIdx[iter * _maxVecIn + laneA[lane]];
}
} while (laneA[lane] < nVec);
#endif
#endif
// __syncthreads();
// sort the results
for (int i = threadIdx.x; i < _maxVec; i += blockDim.x)
if (i >= nVec)
val[i] = 10000000.;
__syncthreads();
if (_maxVec <= blockDim.x)
bitonic3(val, idx, _maxVec);
else
bitonicLarge(val, idx, _maxVec);
for (uint i = threadIdx.x; i < _k; i += blockDim.x)
if (i < _k) {
_bestDist[iter * _k + i] = val[i];
_bestIdx[iter * _k + i] = idx[i];
}
__syncthreads();
}
}
/** assumes vectors in pinned memory, only outputs the best result */
__global__ void rerankBIGKernelPerfect(float* _bestDist, uint* _bestIdx,
const uint* _inIdx, const uint* _nVec, uint _maxVecIn, const float* _Q, uint _QN, const float* _dbVec,
uint _dim, uint _maxBins, uint _k, uint _maxVec) {
extern __shared__ float shm[];
float* shmIter = shm;
uint8_t* vec = (uint8_t*) shmIter;
float* vecf = (float*) vec;
shmIter += _dim / 4;
float* dist = shmIter;
shmIter += blockDim.x;
uint &nVec(*(uint*) shmIter);
shmIter++;
for (uint iter = blockIdx.x; iter < _QN; iter += gridDim.x) {
float minD = 99999999999.;
uint minIdx = 0;
__syncthreads();
// load query vector
float v = _Q[iter * _dim + threadIdx.x];
if (threadIdx.x == 0) {
nVec = _nVec[iter];
nVec = (nVec < _maxVec) ? nVec : _maxVec;
}
__syncthreads();
for (int i = 0; i < nVec; i++) {
__syncthreads();
// load from CPU to shm (assumes uint8_t[_dim])
if (threadIdx.x < _dim / 4)
vecf[threadIdx.x] = _dbVec[_inIdx[iter * _maxVecIn + i]
* _dim / 4 + threadIdx.x];
__syncthreads();
// compute distance
float d = v - (float) vec[threadIdx.x];
dist[threadIdx.x] = d * d;
// reduction
for (int stride = _dim >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (threadIdx.x < stride)
dist[threadIdx.x] += dist[threadIdx.x + stride];
}
__syncthreads();
if (threadIdx.x == 0) {
if ((blockIdx.x == 0) && (iter == 0)) {
printf( "%d: processing %d ( %d %d %d ) = %f\n", i, _inIdx[iter* _maxVecIn +i], vec[0], vec[1], vec[2], dist[0]);
}
if (dist[0] < minD) {
minD = dist[0];
minIdx = _inIdx[iter * _maxVecIn + i];
}
}
}
__syncthreads();
// store single output
if (threadIdx.x == 0) {
printf("%d: %d %f \n", (iter+9000), minIdx, minD );
_bestDist[iter * _k] = minD;
_bestIdx[iter * _k] = minIdx;
}
__syncthreads();
}
}
__global__ void rerankKernelDirectFast(float* _bestDist, uint* _bestIdx,
const uint* _inIdx, const uint* _nVec, uint _maxVecIn,
const float* _cbDist, uint _nClusters, uint _lineParts,
const float* _lineLambda, const float* _queryL1, uint _QN, uint _dim,
uint _maxBins, uint _k, uint _maxVec) {
extern __shared__ float shm[];
float* shmIter = shm;
float* queryDist = shmIter;
shmIter += _lineParts * _nClusters;
uint* idx = (uint*) shmIter;
shmIter += _maxVec;
float* val = shmIter;
shmIter += _maxVec;
// uint* laneA = (uint*) shmIter;
shmIter += blockDim.x / _lineParts;
// volatile float* d = shmIter;
// shmIter += blockDim.x;
uint &nVec(*(uint*) shmIter);
shmIter++;
uint* nProcessed = (uint*) shmIter;
shmIter++;
for (uint iter = blockIdx.x; iter < _QN; iter += gridDim.x) {
__syncthreads();
// load queryDistance
for (int p = 0; p < _lineParts; p++) {
if (threadIdx.x < _nClusters)
queryDist[p * _nClusters + threadIdx.x] =
_queryL1[iter * _lineParts * _nClusters + p * _nClusters
+ threadIdx.x];
}
if (threadIdx.x == 0) {
nVec = _nVec[iter];
nVec = (nVec < _maxVec) ? nVec : _maxVec;
*nProcessed = 0;
}
__syncthreads();
// compute the distance in parallel
// p threads work on one proposed vectorID
uint p = threadIdx.x % _lineParts;
uint lane = threadIdx.x / _lineParts;
#if 0
// fetch next a;
if (p == 0) {
laneA[lane] = atomicInc(nProcessed, 100000);
if (laneA[lane] < nVec)
idx[laneA[lane]] = _inIdx[iter * _maxVecIn + laneA[lane]];
}
// __syncthreads();
float ddd = 0.;
if (laneA[lane] < nVec)
do {
// uint ii = iter * _maxVecIn + laneA[lane];
// float l = _lineLambda[ii * _lineParts + p];
// lineDescr& line(*((lineDescr*) &l));
//
// uint l1 = line.p1;
// uint l2 = line.p2;
// float lambda = toFloat(line.lambda);
//
// float c2 = _cbDist[l2 * _nClusters * _lineParts
// + l1 * _lineParts + p];
//
// ddd = dist(queryDist[p * _nClusters + l1],
// queryDist[p * _nClusters + l2], c2, lambda);
//
// ddd = warpReduceSum(ddd, _lineParts);
// store result
if (p == 0) {
if (laneA[lane] == 3494) {
printf("3494: %d \n", idx[laneA[lane]]);
}
val[laneA[lane]] = ddd;
//
// if (iter == 0)
// printf("%d %f \n", laneA[lane], ddd );
laneA[lane] = atomicInc(nProcessed, 100000);
if (laneA[lane] < nVec)
idx[laneA[lane]] =
_inIdx[iter * _maxVecIn + laneA[lane]];
}
}while (laneA[lane] < nVec);
#endif
__syncthreads();
uint la;
// float ddd = 0.;
for (la = lane; la < nVec; la += blockDim.x / _lineParts) {
if (p == 0) {
// printf("lane: %d %d %d \n", lane, la, nVec);
// if ((la == 3494)) {
// printf("3494 in: %d %d \n", iter, _inIdx[ iter * _maxVecIn + la]);
// }
// val[la] = ddd;
//
// if (iter == 0)
// printf("%d %f \n", laneA[lane], ddd );
idx[la] = _inIdx[iter * _maxVecIn + la];
}
}
__syncthreads();
// sort the results
// for (int i = threadIdx.x; i < _maxVec; i += blockDim.x)
// if (i >= nVec)
// val[i] = 10000000.;
__syncthreads();
// if (_maxVec <= blockDim.x)
// bitonic3(val, idx, _maxVec);
// else
// bitonicLarge(val, idx, _maxVec);
for (uint i = threadIdx.x; i < nVec; i += blockDim.x) {
_bestDist[iter * _k + i] = val[i];
_bestIdx[iter * _k + i] = idx[i];
if ((iter * _k + i) == 3494) {
printf("3494 out: %d \n", idx[i]);
}
}
__syncthreads();
}
}
__global__ void rerankKernelFastLoop(float* _bestDist, uint* _bestIdx,
const uint* _inIdx, const uint* _nVec, uint _maxVecIn,
const float* _cbDist, uint _nClusters, uint _lineParts,
const float* _lineLambda, const float* _queryL1, uint _QN, uint _dim,
uint _maxBins, uint _k, uint _maxVec) {
extern __shared__ float shm[];
float* shmIter = shm;
float* queryDist = shmIter;
shmIter += _lineParts * _nClusters;
uint* idx = (uint*) shmIter;
shmIter += 4096;
float* val = shmIter;
shmIter += 4096;
uint* laneA = (uint*) shmIter;
shmIter += blockDim.x / _lineParts;
// volatile float* d = shmIter;
// shmIter += blockDim.x;
uint &nVec(*(uint*) shmIter);
shmIter++;
uint* nProcessed = (uint*) shmIter;
shmIter++;
for (uint iter = blockIdx.x; iter < _QN; iter += gridDim.x) {
__syncthreads();
// load queryDistance
for (int p = 0; p < _lineParts; p++) {
if (threadIdx.x < _nClusters)
queryDist[p * _nClusters + threadIdx.x] =
_queryL1[iter * _lineParts * _nClusters + p * _nClusters
+ threadIdx.x];
}
for (uint vIter = 0; vIter < _maxVec / 4096; vIter++) {
__syncthreads();
uint vOffs = vIter * 4096;
if (threadIdx.x == 0) {
nVec = _nVec[iter] - vOffs;
nVec = (nVec < _maxVec) ? nVec : _maxVec;
nVec = (nVec < 4096) ? nVec : 4096;
*nProcessed = 0;
}
__syncthreads();
#if 0
// compute the distances to all line approximations
for (int a = threadIdx.x; a < nVec; a += blockDim.x) {
idx[a] = _inIdx[iter * _maxVecIn + a + vOffs];
float totalDist = 0.;
for (uint p = 0; p < _lineParts; p++) {
float l = _lineLambda[idx[a] * +_lineParts + p];
lineDescr& line( *( (lineDescr*)&l));
uint l1 = line.p1;
uint l2 = line.p2;
float lambda = toFloat(line.lambda);
float c2 = _cbDist[l2 * _nClusters * _lineParts
+ l1 * _lineParts + p];
float d = dist(queryDist[p * _nClusters + l1],
queryDist[p * _nClusters + l2], c2, lambda);
totalDist += d;
//
// if (!isTriangle(queryDist[p * _nClusters + l1],
// queryDist[p * _nClusters + l2], c2))
// printf("non-triangle: l1/l2 %d %d === %f %f %f = %f %f \n",
// l1, l2, queryDist[p * _nClusters + l1],
// queryDist[p * _nClusters + l2], c2, d, lambda);
}
// if (iter == 0)
// printf("%d %f \n", a, totalDist);
val[a] = totalDist;
}
#else
// compute the distance in parallel
// p threads work on one proposed vectorID
uint p = threadIdx.x % _lineParts;
uint lane = threadIdx.x / _lineParts;
// fetch next a;
if (p == 0) {
laneA[lane] = atomicInc(nProcessed, 100000);
if (laneA[lane] < nVec)
idx[laneA[lane]] = _inIdx[iter * _maxVecIn + laneA[lane]
+ vOffs];
}
// __syncthreads();
float ddd = 0.;
if (laneA[lane] < nVec)
do {
float l = _lineLambda[idx[laneA[lane]] * _lineParts + p];
lineDescr& line(*((lineDescr*) &l));
uint l1 = line.p1;
uint l2 = line.p2;
float lambda = toFloat(line.lambda);
float c2 = _cbDist[l2 * _nClusters * _lineParts
+ l1 * _lineParts + p];
ddd = dist(queryDist[p * _nClusters + l1],
queryDist[p * _nClusters + l2], c2, lambda);
ddd = warpReduceSum(ddd, _lineParts);
// store result
if (p == 0) {
val[laneA[lane]] = ddd;
//
// if (iter == 0)
// printf("%d %f \n", laneA[lane], ddd );
laneA[lane] = atomicInc(nProcessed, 100000);
if (laneA[lane] < nVec)
idx[laneA[lane]] = _inIdx[iter * _maxVecIn
+ laneA[lane]];
}
} while (laneA[lane] < nVec);
#endif
// __syncthreads();
// sort the results
for (int i = threadIdx.x; i < 4096; i += blockDim.x)
if (i >= nVec)
val[i] = 10000000.;
__syncthreads();
// bitonicLarge(val, idx, 4096);
for (uint i = threadIdx.x; i < 4096; i += blockDim.x) {
_bestDist[iter * _k + i + vOffs] = val[i];
_bestIdx[iter * _k + i + vOffs] = idx[i];
}
__syncthreads();
}
}
}
#if 1
__global__ void rerankPlusVecKernelFast(float* _bestDist, uint* _bestIdx,
const uint* _dbIdx, const uint* _binPrefix, const uint* _binCounts,
const uint* _assignedBins, const uint* _assignedNBins, uint _maxBins,
uint _maxNVecPerBin,
const float* _cbDist, uint _nClusters, uint _lineParts,
const float* _lineLambda, const float* _queryL1, uint _QN, uint _dim,
uint _k, uint _maxVec) {
extern __shared__ float shm[];
float* shmIter = shm;
float* queryDist = shmIter;
shmIter += _lineParts * _nClusters;
uint* idx = (uint*) shmIter;
shmIter += _maxVec;
float* val = shmIter;
shmIter += _maxVec;
// uint* laneA = (uint*) shmIter;
shmIter += blockDim.x / _lineParts;
// volatile float* d = shmIter;
// shmIter += blockDim.x;
uint &nVec(*(uint*) shmIter);
shmIter++;
uint &nBins(*(uint*) shmIter);
shmIter++;
uint ¤tBin(*(uint*) shmIter);
shmIter++;
uint &totalVec(*(uint*) shmIter);
shmIter++;
uint* nProcessed = (uint*) shmIter;
shmIter++;
for (uint iter = blockIdx.x; iter < _QN; iter += gridDim.x) {
__syncthreads();
// load queryDistance
for (int p = 0; p < _lineParts; p++) {
if (threadIdx.x < _nClusters)
queryDist[p * _nClusters + threadIdx.x] =
_queryL1[iter * _lineParts * _nClusters + p * _nClusters
+ threadIdx.x];
}
if (threadIdx.x == 0) {
nBins = _assignedNBins[iter];
totalVec = 0;
}
syncthreads();
// loop over the best assigned bins
for (int bin = 0; (bin < nBins) && (totalVec < _maxVec); bin++) {
if (threadIdx.x == 0) {
currentBin = _assignedBins[iter * _maxBins + bin];
nVec = _binCounts[currentBin];
if ((totalVec + nVec) >= _maxVec)
nVec = _maxVec - totalVec;
*nProcessed = 0;
}
__syncthreads();
if (nVec == 0)
continue;
uint* inIdx = idx + totalVec;
for (int v = threadIdx.x; v < nVec; v += blockDim.x)
inIdx[v] = _dbIdx[_binPrefix[currentBin] + v];
__syncthreads();
#if 1
// compute the distances to all line approximations
for (int a = threadIdx.x; a < nVec; a += blockDim.x) {
float totalDist = 0.;
for (uint p = 0; p < _lineParts; p++) {
float l = _lineLambda[inIdx[a] * +_lineParts + p];
lineDescr& line(*((lineDescr*) &l));
uint l1 = line.p1;
uint l2 = line.p2;
float lambda = toFloat(line.lambda);
float c2 = _cbDist[l2 * _nClusters * _lineParts
+ l1 * _lineParts + p];
float d = dist(queryDist[p * _nClusters + l1],
queryDist[p * _nClusters + l2], c2, lambda);
totalDist += d;
}
val[totalVec + a] = totalDist;
}
#else
// compute the distance in parallel
// p threads work on one proposed vectorID
uint p = threadIdx.x % _lineParts;
uint lane = threadIdx.x / _lineParts;
// fetch next a;
if (p == 0) {
laneA[lane] = atomicInc(nProcessed, 100000);
}
// __syncthreads();
float ddd = 0.;
if (laneA[lane] < nVec)
do {
float l = _lineLambda[inIdx[laneA[lane]] * _lineParts + p];
lineDescr& line(*((lineDescr*) &l));
uint l1 = line.p1;
uint l2 = line.p2;
float lambda = toFloat(line.lambda);
float c2 = _cbDist[l2 * _nClusters * _lineParts
+ l1 * _lineParts + p];
ddd = dist(queryDist[p * _nClusters + l1],
queryDist[p * _nClusters + l2], c2, lambda);
ddd = warpReduceSum(ddd, _lineParts);
// store result
if (p == 0) {
val[totalVec + laneA[lane]] = ddd;
//
// if (iter == 0)
// printf("%d %f \n", laneA[lane], ddd );
laneA[lane] = atomicInc(nProcessed, 100000);
if (laneA[lane] < nVec)
idx[laneA[lane]] =
inIdx[laneA[lane]];
}
}while (laneA[lane] < nVec);
#endif
__syncthreads();
if (threadIdx.x == 0)
totalVec += nVec;
}
if (threadIdx.x == 0)
nVec = totalVec;
__syncthreads();
// sort the results
for (int i = threadIdx.x; i < _maxVec; i += blockDim.x)
if (i >= nVec)
val[i] = 10000000.;
__syncthreads();
if (_maxVec <= blockDim.x)
bitonic3(val, idx, _maxVec);
else
bitonicLarge(val, idx, _maxVec);
for (uint i = threadIdx.x; i < _k; i += blockDim.x)
if (i < _k) {
_bestDist[iter * _k + i] = val[i];
_bestIdx[iter * _k + i] = idx[i];
}
__syncthreads();
}
}
#endif
#if 1
void PerturbationProTree::rerankKBestVectors(float *_bestDist, uint *_bestIdx,
const float* _queryL1, const uint *_bins, const uint *_nBins,
uint _maxBins, const float* _Q, uint _QN, uint _k) {
uint nnn = log2(_k);
// nnn = 1024;
std::cout << "nnn: " << nnn << std::endl;
uint maxVecConsider = nnn;
uint maxVecOut = nnn;
uint nThreads = (maxVecConsider < 1024) ? maxVecConsider : 1024;
dim3 block(nThreads, 1, 1);
dim3 grid((_QN < 1024) ? _QN : 1024, 1, 1);
uint hash = 2048;
uint shmSize = (maxVecConsider
+ ((hash > maxVecConsider) ? hash : maxVecConsider) + 1 + 10)
* sizeof(float);
uint *selectIdx;// array for storing nn vector IDs (size maxVecOut * _QN)
uint *nVec;
cudaMalloc(&selectIdx, _QN * _k * sizeof(uint));
cudaMalloc(&nVec, _QN * sizeof(uint));
if ((!selectIdx) || (!nVec)) {
std::cout << "getKBestVectors: did not get memory !" << std::endl;
exit(1);
}
cudaMemset(selectIdx, 0, _QN * _k * sizeof(uint));
#if 0
if (_k < 1024)
getKVectorIDsKernel<<<grid, block, shmSize>>>(selectIdx, nVec, d_dbIdx,
d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins, _QN,
d_dim, _maxBins, _k, maxVecConsider, maxVecOut, d_nDBs, 280);
else
getKVectorIDsKernelLarge<<<grid, block, shmSize>>>(selectIdx, nVec,
d_dbIdx, d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins,
_QN, d_dim, _maxBins, _k, maxVecConsider, maxVecOut, d_nDBs,
280);
#else
// shmSize = (3* nThreads + nThreads / 32 + 5) * sizeof(float);
shmSize = (3 * nThreads + 4) * sizeof(float);
getKVectorIDsKernelFast<<<grid, block, shmSize>>>(selectIdx, nVec, d_dbIdx,
d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins, _QN, d_dim,
_maxBins, _k, maxVecConsider, maxVecOut, 2800);
#endif
checkCudaErrors(cudaDeviceSynchronize());
// outputVecUint("selectIdx", selectIdx, 1024);
outputVecUint("nVec", nVec, 100);
std::cout << "multi Vector IDs done" << std::endl;
//// _QN = 1;
//
// outputVecUint("selectIdx", selectIdx, 100);
// uint maxVec = 2 * log2(_k);
uint maxVec = log2(_k);
nThreads = (maxVec > d_dim) ? maxVec : d_dim;
nThreads = (nThreads < 1024) ? nThreads : 1024;
block = dim3(nThreads, 1, 1);
// grid = dim3(1, 1, 1);
// shmSize = (d_dim + d_nClusters * d_lineParts + 2 * maxVec + 2)
// * sizeof(float);
// rerankKernel<<<grid, block, shmSize>>>(_bestDist, _bestIdx, selectIdx, nVec,
// maxVecOut, d_codeBookDistL1L2, d_nClusters, d_lineParts,
// d_lineLambda, d_lineP1, d_lineP2, _queryL1, _QN, d_dim, _maxBins,
// _k, maxVec);
shmSize = (d_dim + d_nClusters * d_lineParts + 2 * maxVec + nThreads
+ nThreads / d_lineParts + 2) * sizeof(float);
if (maxVec <= 4096) {
shmSize = (d_nClusters * d_lineParts + 2 * maxVec
+ nThreads / d_lineParts + 2) * sizeof(float);
std::cout << "rerank: maxVec: " << maxVec << " shm: " << shmSize << std::endl;
rerankKernelFast<<<grid, block, shmSize>>>(_bestDist, _bestIdx,
selectIdx, nVec, maxVecOut, d_codeBookDistL1L2, d_nClusters,
d_lineParts, d_lineLambda, _queryL1, _QN, d_dim, _maxBins, _k,
maxVec);
} else {
shmSize = (d_nClusters * d_lineParts + 2 * 4096 + nThreads / d_lineParts
+ 2) * sizeof(float);
std::cout << "rerank: maxVec: " << maxVec << " k: " << _k << " shm: "
<< shmSize << std::endl;
rerankKernelFastLoop<<<grid, block, shmSize>>>(_bestDist, _bestIdx,
selectIdx, nVec, maxVecOut, d_codeBookDistL1L2, d_nClusters,
d_lineParts, d_lineLambda, _queryL1, _QN, d_dim, _maxBins, _k,
maxVec);
//
// checkCudaErrors(cudaDeviceSynchronize());
thrust::device_ptr<float> bdt(_bestDist);
thrust::device_ptr<uint> bit(_bestIdx);
for (int i = 0; i < _QN; i++)
thrust::sort_by_key(bdt + i * _k, bdt + i * _k + _k, bit + i * _k);
std::cout << "done sort by key" << std::endl;
// shmSize = (6 * 1024 + 2) * sizeof(float);
//
// std::cout << "mergeKernel: shm: " << shmSize << std::endl;
//
// mergeKernel<<<grid, block, shmSize>>>(_bestDist, _bestIdx, _QN, _k);
}
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "rerankKBestVectors done " << std::endl;
outputVec("BestDist", _bestDist, 1000);
cudaFree(nVec);
cudaFree(selectIdx);
}
#else
void PerturbationProTree::rerankKBestVectors(float *_bestDist, uint *_bestIdx,
const float* _queryL1, const uint *_bins, const uint *_nBins,
uint _maxBins, const float* _Q, uint _QN, uint _k) {
uint maxVec = log2(_k);
uint nThreads = (maxVec > d_dim) ? maxVec : d_dim;
nThreads = (nThreads < 1024) ? nThreads : 1024;
dim3 block = dim3(nThreads, 1, 1);
dim3 grid((_QN < 1024) ? _QN : 1024, 1, 1);
uint shmSize = (d_dim + d_nClusters * d_lineParts + 2 * maxVec
+ nThreads / d_lineParts + 10) * sizeof(float);
std::cout << "maxVec: " << maxVec << " shm: " << shmSize << std::endl;
// getKVectorIDsKernel<<<grid, block, shmSize>>>(selectIdx, nVec, d_dbIdx,
// d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins, _QN, d_dim,
// _maxBins, _k, maxVecConsider, maxVecOut, d_nDBs, 280);
rerankPlusVecKernelFast<<<grid, block, shmSize>>>(_bestDist, _bestIdx,
d_dbIdx, d_binPrefix, d_binCounts, _bins, _nBins, _maxBins, 280,
d_codeBookDistL1L2, d_nClusters, d_lineParts, d_lineLambda,
_queryL1, _QN, d_dim, _k, maxVec);
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "rerankKBestVectors done " << std::endl;
outputVec("BestDist", _bestDist, 1000);
}
#endif
void PerturbationProTree::rerankBIGKBestVectors(float *_bestDist,
uint *_bestIdx, const float* _queryL1, const uint *_bins,
const uint *_nBins, uint _maxBins, const float* _Q, uint _QN, uint _k) {
uint nnn = log2(_k);
// nnn = 1024;
std::cout << "nnn: " << nnn << std::endl;
uint maxVecConsider = nnn;
uint maxVecOut = nnn;
uint nThreads = (maxVecConsider < 1024) ? maxVecConsider : 1024;
dim3 block(nThreads, 1, 1);
dim3 grid((_QN < 1024) ? _QN : 1024, 1, 1);
uint hash = 2048;
uint shmSize = (maxVecConsider
+ ((hash > maxVecConsider) ? hash : maxVecConsider) + 1 + 10)
* sizeof(float);
uint *selectIdx;// array for storing nn vector IDs (size maxVecOut * _QN)
uint *nVec;
cudaMalloc(&selectIdx, _QN * _k * sizeof(uint));
cudaMalloc(&nVec, _QN * sizeof(uint));
if ((!selectIdx) || (!nVec)) {
std::cout << "getKBestVectors: did not get memory !" << std::endl;
exit(1);
}
cudaMemset(selectIdx, 0, _QN * _k * sizeof(uint));
#if 0
if (_k < 1024)
getKVectorIDsKernel<<<grid, block, shmSize>>>(selectIdx, nVec, d_dbIdx,
d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins, _QN,
d_dim, _maxBins, _k, maxVecConsider, maxVecOut, d_nDBs, 280);
else
getKVectorIDsKernelLarge<<<grid, block, shmSize>>>(selectIdx, nVec,
d_dbIdx, d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins,
_QN, d_dim, _maxBins, _k, maxVecConsider, maxVecOut, d_nDBs,
280);
#else
// shmSize = (3* nThreads + nThreads / 32 + 5) * sizeof(float);
shmSize = (3 * nThreads + 4) * sizeof(float);
getKVectorIDsKernelFast<<<grid, block, shmSize>>>(selectIdx, nVec, d_dbIdx,
d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins, _QN, d_dim,
_maxBins, _k, maxVecConsider, maxVecOut, nnn);
// getKVectorIDsKernelFast<<<grid, block, shmSize>>>(selectIdx, nVec, d_dbIdx,
// d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins, _QN, d_dim,
// _maxBins, _k, maxVecConsider, maxVecOut, d_nDBs, nnn);
// getKVectorIDsKernelFast<<<grid, block, shmSize>>>(selectIdx, nVec, d_dbIdx,
// d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins, _QN, d_dim,
// _maxBins, _k, maxVecConsider, maxVecOut, d_nDBs, 4096);
// getKVectorIDsKernelFast<<<grid, block, shmSize>>>(selectIdx, nVec, d_dbIdx,
// d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins, _QN, d_dim,
// _maxBins, _k, maxVecConsider, maxVecOut, d_nDBs, 280);
// 280 for most results
#endif
checkCudaErrors(cudaDeviceSynchronize());
// outputVecUint("selectIdx", selectIdx, 1024);
//
// outputVecUint("nVec", nVec, 100);
std::cout << "multi Vector IDs done" << std::endl;
//// _QN = 1;
//
// outputVecUint("selectIdx", selectIdx, 100);
// uint maxVec = 2 * log2(_k);
uint maxVec = log2(_k);
nThreads = (maxVec > d_dim) ? maxVec : d_dim;
nThreads = (nThreads < 1024) ? nThreads : 1024;
block = dim3(nThreads, 1, 1);
// grid = dim3(1, 1, 1);
// shmSize = (d_dim + d_nClusters * d_lineParts + 2 * maxVec + 2)
// * sizeof(float);
// rerankKernel<<<grid, block, shmSize>>>(_bestDist, _bestIdx, selectIdx, nVec,
// maxVecOut, d_codeBookDistL1L2, d_nClusters, d_lineParts,
// d_lineLambda, d_lineP1, d_lineP2, _queryL1, _QN, d_dim, _maxBins,
// _k, maxVec);
cudaMemcpy(_bestIdx, selectIdx, maxVec * _QN * sizeof(uint),
cudaMemcpyDeviceToDevice);
shmSize = (d_dim + d_nClusters * d_lineParts + 2 * maxVec + nThreads
+ nThreads / d_lineParts + 2) * sizeof(float);
#if 0
if (maxVec <= 4096) {
shmSize = (d_nClusters * d_lineParts + 2 * maxVec
+ nThreads / d_lineParts + 2) * sizeof(float);
std::cout << "rerank: maxVec: " << maxVec << " shm: " << shmSize << std::endl;
rerankKernelFast<<<grid, block, shmSize>>>(_bestDist, _bestIdx,
selectIdx, nVec, maxVecOut, d_codeBookDistL1L2, d_nClusters,
d_lineParts, d_lineLambda, _queryL1, _QN, d_dim, _maxBins, _k,
maxVec);
} else {
shmSize = (d_nClusters * d_lineParts + 2 * 4096 + nThreads / d_lineParts
+ 2) * sizeof(float);
std::cout << "rerank: maxVec: " << maxVec << " k: " << _k << " shm: "
<< shmSize << std::endl;
rerankKernelFastLoop<<<grid, block, shmSize>>>(_bestDist, _bestIdx,
selectIdx, nVec, maxVecOut, d_codeBookDistL1L2, d_nClusters,
d_lineParts, d_lineLambda, _queryL1, _QN, d_dim, _maxBins, _k,
maxVec);
//
// checkCudaErrors(cudaDeviceSynchronize());
thrust::device_ptr<float> bdt( _bestDist );
thrust::device_ptr<uint> bit( _bestIdx );
for (int i =0; i < _QN; i++)
thrust::sort_by_key( bdt + i * _k , bdt + i* _k + _k, bit + i * _k);
std::cout << "done sort by key" << std::endl;
// shmSize = (6 * 1024 + 2) * sizeof(float);
//
// std::cout << "mergeKernel: shm: " << shmSize << std::endl;
//
// mergeKernel<<<grid, block, shmSize>>>(_bestDist, _bestIdx, _QN, _k);
}
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "rerankKBestVectors done " << std::endl;
outputVec("BestDist", _bestDist, 1000);
#endif
cudaFree(nVec);
cudaFree(selectIdx);
}
/** assume _hLines in pinned memory */
void PerturbationProTree::rerankBIGKBestVectors2(float *_bestDist,
uint *_bestIdx, const float* _queryL1, const uint *_bins,
const uint *_nBins, uint _maxBins, const float* _Q, uint _QN, uint _k,
const float* _hLines) {
uint nnn = log2(_k);
// nnn = 1024;
std::cout << "nnn: " << nnn << std::endl;
uint maxVecConsider = nnn;
uint maxVecOut = nnn;
uint nThreads = (maxVecConsider < 1024) ? maxVecConsider : 1024;
dim3 block(nThreads, 1, 1);
dim3 grid((_QN < 1024) ? _QN : 1024, 1, 1);
uint hash = 2048;
uint shmSize = (maxVecConsider
+ ((hash > maxVecConsider) ? hash : maxVecConsider) + 1 + 10)
* sizeof(float);
uint *selectIdx;// array for storing nn vector IDs (size maxVecOut * _QN)
uint *nVec;
cudaMalloc(&selectIdx, _QN * _k * sizeof(uint));
cudaMalloc(&nVec, _QN * sizeof(uint));
if ((!selectIdx) || (!nVec)) {
std::cout << "getKBestVectors: did not get memory !" << std::endl;
exit(1);
}
cudaMemset(selectIdx, 0, _QN * _k * sizeof(uint));
// shmSize = (3* nThreads + nThreads / 32 + 5) * sizeof(float);
shmSize = (3 * nThreads + 4) * sizeof(float);
getKVectorIDsKernelFast<<<grid, block, shmSize>>>(selectIdx, nVec, d_dbIdx,
d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins, _QN, d_dim,
_maxBins, _k, maxVecConsider, maxVecOut, nnn);
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "multi Vector IDs done" << std::endl;
// rerankKernel<<<grid, block, shmSize>>>(_bestDist, _bestIdx, selectIdx, nVec,
// maxVecOut, d_codeBookDistL1L2, d_nClusters, d_lineParts,
// d_lineLambda, d_lineP1, d_lineP2, _queryL1, _QN, d_dim, _maxBins,
// _k, maxVec);
uint maxVec = log2(_k);
nThreads = (maxVec > d_dim) ? maxVec : d_dim;
nThreads = (nThreads < 1024) ? nThreads : 1024;
block = dim3(nThreads, 1, 1);
// cudaMemcpy(_bestIdx, selectIdx, maxVec * _QN * sizeof(uint),
// cudaMemcpyDeviceToDevice);
shmSize = (d_dim + d_nClusters * d_lineParts + 2 * maxVec + nThreads
+ nThreads / d_lineParts + 2) * sizeof(float);
#if 1
if (maxVec <= 4096) {
shmSize = (d_nClusters * d_lineParts + 2 * maxVec
+ nThreads / d_lineParts + 2) * sizeof(float);
std::cout << "rerank: maxVec: " << maxVec << " shm: " << shmSize << std::endl;
rerankBIGKernelFast<<<grid, block, shmSize>>>(_bestDist, _bestIdx,
selectIdx, nVec, maxVecOut, d_codeBookDistL1L2, d_nClusters,
d_lineParts, _hLines, _queryL1, _QN, d_dim, _maxBins, _k,
maxVec);
} else {
shmSize = (d_nClusters * d_lineParts + 2 * 4096 + nThreads / d_lineParts
+ 2) * sizeof(float);
std::cout << "rerank: maxVec: " << maxVec << " k: " << _k << " shm: "
<< shmSize << std::endl;
rerankKernelFastLoop<<<grid, block, shmSize>>>(_bestDist, _bestIdx,
selectIdx, nVec, maxVecOut, d_codeBookDistL1L2, d_nClusters,
d_lineParts, d_lineLambda, _queryL1, _QN, d_dim, _maxBins, _k,
maxVec);
//
// checkCudaErrors(cudaDeviceSynchronize());
thrust::device_ptr<float> bdt(_bestDist);
thrust::device_ptr<uint> bit(_bestIdx);
for (int i = 0; i < _QN; i++)
thrust::sort_by_key(bdt + i * _k, bdt + i * _k + _k, bit + i * _k);
std::cout << "done sort by key" << std::endl;
// shmSize = (6 * 1024 + 2) * sizeof(float);
//
// std::cout << "mergeKernel: shm: " << shmSize << std::endl;
//
// mergeKernel<<<grid, block, shmSize>>>(_bestDist, _bestIdx, _QN, _k);
}
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "rerankKBestVectors done " << std::endl;
// outputVec("BestDist", _bestDist, 1000);
#endif
cudaFree(nVec);
cudaFree(selectIdx);
}
/** assume _hLines in pinned memory */
void PerturbationProTree::rerankBIGKBestVectorsPerfect(float *_bestDist,
uint *_bestIdx, const uint *_bins, const uint *_nBins, uint _maxBins,
const float* _Q, uint _QN, uint _k, const float* _hLines) {
uint nnn = log2(_k);
// nnn = 1024;
std::cout << "nnn: " << nnn << std::endl;
uint maxVecConsider = nnn;
uint maxVecOut = nnn;
uint nThreads = (maxVecConsider < 1024) ? maxVecConsider : 1024;
dim3 block(nThreads, 1, 1);
dim3 grid((_QN < 1024) ? _QN : 1024, 1, 1);
uint hash = 2048;
uint shmSize = (maxVecConsider
+ ((hash > maxVecConsider) ? hash : maxVecConsider) + 1 + 10)
* sizeof(float);
uint *selectIdx;// array for storing nn vector IDs (size maxVecOut * _QN)
uint *nVec;
cudaMalloc(&selectIdx, _QN * _k * sizeof(uint));
cudaMalloc(&nVec, _QN * sizeof(uint));
if ((!selectIdx) || (!nVec)) {
std::cout << "getKBestVectors: did not get memory !" << std::endl;
exit(1);
}
cudaMemset(selectIdx, 0, _QN * _k * sizeof(uint));
// shmSize = (3* nThreads + nThreads / 32 + 5) * sizeof(float);
shmSize = (3 * nThreads + 4) * sizeof(float);
getKVectorIDsKernelFast<<<grid, block, shmSize>>>(selectIdx, nVec, d_dbIdx,
d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins, _QN, d_dim,
_maxBins, _k, maxVecConsider, maxVecOut, nnn);
checkCudaErrors(cudaDeviceSynchronize());
cudaMemcpy(_bestIdx, selectIdx, _k * _QN * sizeof(uint),
cudaMemcpyDeviceToDevice);
std::cout << "multi Vector IDs done" << std::endl;
uint maxVec = log2(_k);
nThreads = d_dim;
block = dim3(nThreads, 1, 1);
shmSize = (d_dim / 4 + d_dim + 1) * sizeof(float);
std::cout << "rerankPerfect: maxVec: " << maxVec << " shm: " << shmSize << std::endl;
rerankBIGKernelPerfect<<<grid, block, shmSize>>>(_bestDist, _bestIdx,
selectIdx, nVec, maxVecOut, _Q, _QN, _hLines, d_dim, _maxBins, _k,
maxVec);
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "rerankKBestVectors done " << std::endl;
// outputVec("BestDist", _bestDist, 1000);
cudaFree(nVec);
cudaFree(selectIdx);
}
/** with line reranking given CPU _hlines */
void PerturbationProTree::rerankBIGKBestVectors(vector<uint>& _resIdx,
float *_bestDist, uint *_bestIdx, const float* _queryL1,
const uint *_bins, const uint *_nBins, uint _maxBins, const float* _Q,
uint _QN, uint _k, const float *_hLines) {
uint nnn = log2(_k);
// nnn = 1024;
std::cout << "nnn: " << nnn << std::endl;
uint maxVecConsider = nnn;
uint maxVecOut = nnn;
uint nThreads = (maxVecConsider < 1024) ? maxVecConsider : 1024;
dim3 block(nThreads, 1, 1);
dim3 grid((_QN < 1024) ? _QN : 1024, 1, 1);
uint hash = 2048;
uint shmSize = (maxVecConsider
+ ((hash > maxVecConsider) ? hash : maxVecConsider) + 1 + 10)
* sizeof(float);
uint *selectIdx;// array for storing nn vector IDs (size maxVecOut * _QN)
uint *nVec;
cudaMalloc(&selectIdx, _QN * _k * sizeof(uint));
cudaMalloc(&nVec, _QN * sizeof(uint));
if ((!selectIdx) || (!nVec)) {
std::cout << "getKBestVectors: did not get memory !" << std::endl;
exit(1);
}
cudaMemset(selectIdx, 0, _QN * _k * sizeof(uint));
#if 0
if (_k < 1024)
getKVectorIDsKernel<<<grid, block, shmSize>>>(selectIdx, nVec, d_dbIdx,
d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins, _QN,
d_dim, _maxBins, _k, maxVecConsider, maxVecOut, d_nDBs, 280);
else
getKVectorIDsKernelLarge<<<grid, block, shmSize>>>(selectIdx, nVec,
d_dbIdx, d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins,
_QN, d_dim, _maxBins, _k, maxVecConsider, maxVecOut, d_nDBs,
280);
#else
// shmSize = (3* nThreads + nThreads / 32 + 5) * sizeof(float);
shmSize = (3 * nThreads + 4) * sizeof(float);
getKVectorIDsKernelFast<<<grid, block, shmSize>>>(selectIdx, nVec, d_dbIdx,
d_binPrefix, d_binCounts, d_nBins, _Q, _bins, _nBins, _QN, d_dim,
_maxBins, _k, maxVecConsider, maxVecOut, 128);
// 280 for most results
#endif
checkCudaErrors(cudaDeviceSynchronize());
// outputVecUint("selectIdx", selectIdx, 1024);
//
// outputVecUint("nVec", nVec, 100);
std::cout << "multi Vector IDs done" << std::endl;
//// _QN = 1;
//
// outputVecUint("selectIdx", selectIdx, 100);
// uint maxVec = 2 * log2(_k);
uint maxVec = log2(_k);
nThreads = (maxVec > d_dim) ? maxVec : d_dim;
nThreads = (nThreads < 1024) ? nThreads : 1024;
block = dim3(nThreads, 1, 1);
cudaMemset(selectIdx, 0, maxVec * _QN * sizeof(uint));
// cudaMemcpy(_bestIdx, selectIdx, maxVec * _QN * sizeof(uint),
// cudaMemcpyDeviceToDevice);
//
// cudaMemcpy(&_resIdx[0], selectIdx, _QN * maxVec * sizeof(uint),
// cudaMemcpyDeviceToHost);
// checkCudaErrors(cudaDeviceSynchronize());
std::cout << "copy down done " << std::endl;
// now assemble lines on CPU:
float *cLines = new float[_QN * maxVec * d_lineParts];
if (!cLines) {
cerr << "did not get cLines memory" << std::endl;
exit(1);
}
for (int c = 0, i = 0; i < _QN * maxVec; i++) {
size_t idx = _resIdx[i];
idx *= 16;
// idx = idx % 1000000000;
for (int k = 0; k < d_lineParts; k++) {
cLines[c++] = _hLines[idx + k];
}
}
//
// for (int k = 0; k < 1600; k++) {
// std::cout << " " << cLines[k] << std::endl;
// }
std::cout << "CPU assembled" << std::endl;
cudaMemcpy(d_lineLambda, cLines, _QN * maxVec * d_lineParts * sizeof(float),
cudaMemcpyHostToDevice);
std::cout << "done line assembly" << std::endl;
outputVecUint("before", selectIdx, 200);
#if 1
if (maxVec <= 4096) {
std::cout << "output maxVec: " << maxVec << std::endl;
std::cout << "nThreads: " << nThreads << std::endl;
std::cout << "d_nClusters: " << d_nClusters << std::endl;
std::cout << "d_lineParts: " << d_lineParts << std::endl;
shmSize = (d_nClusters * d_lineParts + 2 * maxVec
+ nThreads / d_lineParts + 2) * sizeof(float);
std::cout << "shmSize rerank: " << shmSize << std::endl;
std::cout << "maxVec " << maxVec << " k: " << _k << std::endl;
cudaMemset(_bestIdx, 0, maxVec * _QN * sizeof(uint));
rerankKernelDirectFast<<<grid, block, shmSize>>>(_bestDist, _bestIdx,
selectIdx, nVec, maxVecOut, d_codeBookDistL1L2, d_nClusters,
d_lineParts, d_lineLambda, _queryL1, _QN, d_dim, _maxBins, _k,
maxVec);
// outputVecUint("after", _bestIdx, 3496);
uint* sel = new uint[maxVec * _QN];
uint* best = new uint[maxVec * _QN];
cudaMemcpy(sel, selectIdx, maxVec * _QN * sizeof(uint),
cudaMemcpyDeviceToHost);
// cudaMemcpy(_bestIdx, selectIdx, maxVec * _QN * sizeof(uint), cudaMemcpyDeviceToDevice);
cudaMemcpy(best, _bestIdx, maxVec * _QN * sizeof(uint),
cudaMemcpyDeviceToHost);
bool same = true;
for (int i = 0; i < maxVec * _QN; i++) {
if (sel[i] != best[i]) {
same = false;
std::cout << i << ": " << sel[i] << " " << best[i] << std::endl;
}
}
std::cout << "comoparison: " << ((same) ? " same" : "different") << std::endl;
delete[] best;
delete[] sel;
} else {
// shmSize = (d_nClusters * d_lineParts + 2 * 4096 + nThreads / d_lineParts
// + 2) * sizeof(float);
//
// std::cout << "rerank: maxVec: " << maxVec << " k: " << _k << " shm: "
// << shmSize << std::endl;
//
// rerankKernelFastLoop<<<grid, block, shmSize>>>(_bestDist, _bestIdx,
// selectIdx, nVec, maxVecOut, d_codeBookDistL1L2, d_nClusters,
// d_lineParts, d_lineLambda, _queryL1, _QN, d_dim, _maxBins, _k,
// maxVec);
////
//// checkCudaErrors(cudaDeviceSynchronize());
//
// thrust::device_ptr<float> bdt( _bestDist );
// thrust::device_ptr<uint> bit( _bestIdx );
//
// for (int i =0; i < _QN; i++)
// thrust::sort_by_key( bdt + i * _k , bdt + i* _k + _k, bit + i * _k);
//
// std::cout << "done sort by key" << std::endl;
// shmSize = (6 * 1024 + 2) * sizeof(float);
//
// std::cout << "mergeKernel: shm: " << shmSize << std::endl;
//
// mergeKernel<<<grid, block, shmSize>>>(_bestDist, _bestIdx, _QN, _k);
}
#endif
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "rerankKBestVectors done " << std::endl;
// outputVec("BestDist", _bestDist, 1000);
delete[] cLines;
cudaFree(nVec);
cudaFree(selectIdx);
}
void PerturbationProTree::rerankKBestBinVectors(float *_bestDist,
uint *_bestIdx, const float* _queryL1, const float* _assignVal,
const uint* _assignIdx, uint _maxBins, uint _k1, uint k2,
const float* _Q, uint _QN, uint _k) {
uint nnn = log2(_k);
// nnn = 1024;
std::cout << "nnn: " << nnn << std::endl;
uint maxVecConsider = nnn;
uint maxVecOut = nnn;
uint nThreads = (maxVecConsider < 1024) ? maxVecConsider : 1024;
dim3 block(nThreads, 1, 1);
dim3 grid((_QN < 1024) ? _QN : 1024, 1, 1);
uint *selectIdx;// array for storing nn vector IDs (size maxVecOut * _QN)
uint *nVec;
cudaMalloc(&selectIdx, _QN * _k * sizeof(uint));
cudaMalloc(&nVec, _QN * sizeof(uint));
if ((!selectIdx) || (!nVec)) {
std::cout << "getKBestVectors: did not get memory !" << std::endl;
exit(1);
}
cudaMemset(selectIdx, 0, _QN * _k * sizeof(uint));
// shmSize = (3* nThreads + nThreads / 32 + 5) * sizeof(float);
uint shmSize = (d_p * _k1 * d_nClusters2 + 2 * d_p + 2 * nThreads + 4)
* sizeof(float);
getKBinVectorIDsKernelFast<<<grid, block, shmSize>>>(selectIdx, nVec,
_assignVal, _assignIdx, d_p, _k1, d_nClusters, d_nClusters2,
d_dbIdx, d_binPrefix, d_binCounts, d_nBins, d_distSeq, d_numDistSeq,
d_distCluster, _QN, _maxBins, maxVecOut, 280);
checkCudaErrors(cudaDeviceSynchronize());
// outputVecUint("selectIdx", selectIdx, 1024);
outputVecUint("nVec", nVec, 100);
// std::cout << "multi Vector IDs done" << std::endl;
//// _QN = 1;
//
// outputVecUint("selectIdx", selectIdx, 100);
// uint maxVec = 2 * log2(_k);
uint maxVec = log2(_k);
nThreads = (maxVec > d_dim) ? maxVec : d_dim;
nThreads = (nThreads < 1024) ? nThreads : 1024;
block = dim3(nThreads, 1, 1);
// grid = dim3(1, 1, 1);
// shmSize = (d_dim + d_nClusters * d_lineParts + 2 * maxVec + 2)
// * sizeof(float);
std::cout << "maxVec: " << maxVec << " shm: " << shmSize << std::endl;
// rerankKernel<<<grid, block, shmSize>>>(_bestDist, _bestIdx, selectIdx, nVec,
// maxVecOut, d_codeBookDistL1L2, d_nClusters, d_lineParts,
// d_lineLambda, d_lineP1, d_lineP2, _queryL1, _QN, d_dim, _maxBins,
// _k, maxVec);
shmSize = (d_dim + d_nClusters * d_lineParts + 2 * maxVec + nThreads
+ nThreads / d_lineParts + 2) * sizeof(float);
rerankKernelFast<<<grid, block, shmSize>>>(_bestDist, _bestIdx, selectIdx,
nVec, maxVecOut, d_codeBookDistL1L2, d_nClusters, d_lineParts,
d_lineLambda, _queryL1, _QN, d_dim, _maxBins, _k, maxVec);
checkCudaErrors(cudaDeviceSynchronize());
std::cout << "rerankKBestVectors done " << std::endl;
outputVec("BestDist", _bestDist, 1000);
cudaFree(nVec);
cudaFree(selectIdx);
}
#if 0
__global__ void getBestLineVectorsKernel(float *_bestDist, uint* _bestIdx,
const uint* _inIdx, const uint* _nVec, uint _maxVecIn,
const float* _lambda, const uint* _l1Idx, const uint* _l2Idx,
const float *_Ql1dist, const float *_Ql2dist, uint _QN, uint d_p,
const float* _codeBookDistL1L2, uint _maxBins, uint _k, uint _maxVec) {
extern __shared__ float shm[];
float* shmIter = shm + _dim;
float* val = shmIter;
shmIter += _maxVec;
uint* idx = (uint*) shmIter;
shmIter += _maxVec;
// in shm;
uint &nVec(*(uint*) shmIter);
shmIter++;
// loop over all corresponding vectors in the query
for (int iter = blockIdx.x; iter < _QN; iter += gridDim.x) {
__syncthreads();
if (threadIdx.x == 0) {
nVec = _nVec[iter];
nVec = (nVec < _maxVec) ? nVec : _maxVec;
if (iter % 1000 == 0) {
printf("nVec: %d \n", nVec);
}
}
__syncthreads();
// load query vector
float b;
if (threadIdx.x < _dim)
b = _Q[iter * _dim + threadIdx.x];
// load all indices
for (int a = threadIdx.x; a < nVec; a += blockDim.x) {
idx[a] = _inIdx[iter * _maxVecIn + a];
if (idx[a] >= 1000000) {
printf("panic: %d %d %d %d \n ", idx[a], iter, a, nVec);
}
}
__syncthreads();
// if (threadIdx.x == 0) {
// for (int a = 0; a < nVec; a++) {
// printf("idx: %d %d \n", a, idx[a]);
// }
// }
// loop over all selected vectors
for (int a = 0; a < nVec; a++) {
// compute the distance to the vector
// if (threadIdx.x < _dim) {
// uint loc = idx[a] * _dim + threadIdx.x;
// float v = _dbVec[loc];
//
//// if ((blockIdx.x == 90) && (a == 110))
//// printf( "got: %d %d %f \n", loc, idx[a], v);
//
// shm[threadIdx.x] = sqr( b - v );
// }
if (threadIdx.x < _dim) {
// if (idx[a] < 1000000)
shm[threadIdx.x] = sqr(b - _dbVec[idx[a] * _dim + threadIdx.x]);
}
for (uint stride = _dim >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if (threadIdx.x < stride)
shm[threadIdx.x] += shm[threadIdx.x + stride];
}
__syncthreads();
// store the result
if (threadIdx.x == 0) {
val[a] = shm[0];
// printf("idx: %d dist: %f \n", idx[a], val[a]);
}
__syncthreads();
}
// sort the results
if ((threadIdx.x >= nVec) && (threadIdx.x < _maxVec))
val[threadIdx.x] = 10000000.;
__syncthreads();
bitonic3(val, idx, _maxVec);
if ((threadIdx.x >= nVec) && (threadIdx.x < _maxVec))
val[threadIdx.x] = 0.;
if (threadIdx.x < _k) {
_bestDist[iter * _k + threadIdx.x] = val[threadIdx.x];
_bestIdx[iter * _k + threadIdx.x] = idx[threadIdx.x];
}
__syncthreads();
}
}
#endif
// each block is responsible for one vector, blockDim.x should be _dim
// requires _dim * float shared memory
// looping multiple times to process all B vectors
// _vl is the length of the _p vector segments (should be 2^n)
// k0 k1 k2 k3 k4
// output p0,p1,.. p0,p1,.. ..
__global__ void assignPerturbationKBestClusterKernel(uint *_assign,
const float* _A, const float* _B, uint _Arows, uint _Brows, uint _dim,
uint _p, uint _vl, uint _k, uint _NP2, uint _dimBits) {
extern __shared__ float shmb[];
float* shm = shmb + _dim;
float* shmIter = shm;
shmIter += _NP2;
uint* shmIdx = (uint*) shmIter;
shmIter += _NP2;
uint offs = (2 * _NP2 > _dim) ? 2 * _NP2 : _dim;
shmIter = shm + offs;
float* val = shmIter;
shmIter += _p * _Arows;
uint* idx = (uint*) (shmIter);
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
__syncthreads();
if (threadIdx.x < _dim)
shmb[threadIdx.x] = _B[iter * _dim + threadIdx.x];
for (uint pert = 0; pert < 1; pert++) {
__syncthreads();
uint pIdx = pertIdx(threadIdx.x, _dimBits, pert);
// load perturbed vector
float b = shmb[pIdx];
const float* A = _A + pert * _Arows * _dim;
// loop over all vectors of A
for (int a = 0; a < _Arows; a++) {
if (threadIdx.x < _dim)
shm[threadIdx.x] = sqr(b - A[a * _dim + threadIdx.x]);
// compute the sum of differences for the vector segments
// i.e. all _p segments in parallel
for (uint stride = _vl >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if ((threadIdx.x) < _p * stride) {
uint p = threadIdx.x / stride * _vl;
uint bias = threadIdx.x % stride;
shm[p + bias] += shm[p + bias + stride];
}
}
__syncthreads();
// store the result
if (threadIdx.x < _p) {
val[a + threadIdx.x * _Arows] = shm[threadIdx.x * _vl];
idx[a + threadIdx.x * _Arows] = a;
// if ((threadIdx.x == 0)) // && (a < 10))
// printf("idx %d %f \n", idx[a], val[a]);
}
__syncthreads();
}
// sort the results;
__syncthreads();
for (int i = 0; i < _p; i++) {
if (threadIdx.x < _NP2)
shm[threadIdx.x] = 10000000.;
// copy to original shm
if (threadIdx.x < _Arows) {
shm[threadIdx.x] = val[threadIdx.x + i * _Arows];
shmIdx[threadIdx.x] = idx[threadIdx.x + i * _Arows];
}
__syncthreads();
bitonic3(shm, shmIdx, _NP2);
if (threadIdx.x < _Arows) {
val[threadIdx.x + i * _Arows] = shm[threadIdx.x];
idx[threadIdx.x + i * _Arows] = shmIdx[threadIdx.x];
}
__syncthreads();
}
// write out decision
for (int k = 0; k < _k; k++) {
if (threadIdx.x < _p) {
_assign[(iter * 1 + pert) * _k * _p + k * _p
+ threadIdx.x] = idx[threadIdx.x * _Arows + k];
}
}
}
} // iter
}
// each block is responsible for one vector, blockDim.x should be _dim
// requires _dim * float shared memory
// looping multiple times to process all B vectors
// _vl is the length of the _p vector segments (should be 2^n)
// k0 k1 k2 k3 k4
// output p0,p1,.. p0,p1,.. ..
__global__ void assignPerturbationKBestLineClusterKernel(uint *_assign,
float* _l1Dist, const float* _A, const float* _B, uint _Arows,
uint _Brows, uint _dim, uint _p, uint _vl, uint _k, uint _NP2,
uint _dimBits) {
extern __shared__ float shmb[];
float* shm = shmb + _dim;
float* shmIter = shm;
shmIter += _NP2;
uint* shmIdx = (uint*) shmIter;
shmIter += _NP2;
uint offs = (2 * _NP2 > _dim) ? 2 * _NP2 : _dim;
shmIter = shm + offs;
float* val = shmIter;
shmIter += _p * _Arows;
uint* idx = (uint*) (shmIter);
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
__syncthreads();
if (threadIdx.x < _dim)
shmb[threadIdx.x] = _B[iter * _dim + threadIdx.x];
for (uint pert = 0; pert < 1; pert++) {
__syncthreads();
uint pIdx = pertIdx(threadIdx.x, _dimBits, pert);
// load perturbed vector
float b = shmb[pIdx];
const float* A = _A + pert * _Arows * _dim;
// loop over all vectors of A
for (int a = 0; a < _Arows; a++) {
if (threadIdx.x < _dim)
shm[threadIdx.x] = sqr(b - A[a * _dim + threadIdx.x]);
// compute the sum of differences for the vector segments
// i.e. all _p segments in parallel
for (uint stride = _vl >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if ((threadIdx.x) < _p * stride) {
uint p = threadIdx.x / stride * _vl;
uint bias = threadIdx.x % stride;
shm[p + bias] += shm[p + bias + stride];
}
}
__syncthreads();
// store the result
if (threadIdx.x < _p) {
val[a + threadIdx.x * _Arows] = shm[threadIdx.x * _vl];
idx[a + threadIdx.x * _Arows] = a;
// if ((threadIdx.x == 0)) // && (a < 10))
// printf("idx %d %f \n", idx[a], val[a]);
}
__syncthreads();
}
// sort the results;
__syncthreads();
for (int i = 0; i < _p; i++) {
if (threadIdx.x < _NP2)
shm[threadIdx.x] = 10000000.;
// copy to original shm
if (threadIdx.x < _Arows) {
shm[threadIdx.x] = val[threadIdx.x + i * _Arows];
shmIdx[threadIdx.x] = idx[threadIdx.x + i * _Arows];
}
__syncthreads();
bitonic3(shm, shmIdx, _NP2);
if (threadIdx.x < _Arows) {
val[threadIdx.x + i * _Arows] = shm[threadIdx.x];
idx[threadIdx.x + i * _Arows] = shmIdx[threadIdx.x];
}
__syncthreads();
}
// write out l1 distance
if ((threadIdx.x < _p) && (pert == 0))
_l1Dist[(iter * _p) + threadIdx.x] = val[threadIdx.x * _Arows];
// write out decision
for (int k = 0; k < _k; k++) {
if (threadIdx.x < _p) {
_assign[(iter * 1 + pert) * _k * _p + k * _p
+ threadIdx.x] = idx[threadIdx.x * _Arows + k];
}
}
}
} // iter
}
__global__ void lineClusterKernel(float *_lineLambda, uint *_lineP1,
uint *_lineP2, float* _lineDist, const float* _cbDist, const float* _cb,
uint _nClusters, const float* _B, uint _Brows, uint _dim, uint _p,
uint _vl) {
extern __shared__ float shm[];
float* shmIter = shm;
shmIter += _dim;
float* val = shmIter;
shmIter += _p * _nClusters;
float* minD = shmIter;
shmIter += _p;
uint* minIdx = (uint*) shmIter;
shmIter += _p;
float* lambda = shmIter;
shmIter += _p * _nClusters;
float* dist = shmIter;
shmIter += _p * _nClusters;
float* l1Idx = shmIter;
shmIter += _p * _nClusters;
float* l2Idx = shmIter;
shmIter += _p * _nClusters;
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
__syncthreads();
float b;
// load vector
if (threadIdx.x < _dim)
b = _B[iter * _dim + threadIdx.x];
const float* A = _cb;
// loop over all vectors of A
for (int a = 0; a < _nClusters; a++) {
if (threadIdx.x < _dim)
shm[threadIdx.x] = sqr(b - A[a * _dim + threadIdx.x]);
// compute the sum of differences for the vector segments
// i.e. all _p segments in parallel
for (uint stride = _vl >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if ((threadIdx.x) < _p * stride) {
uint p = threadIdx.x / stride * _vl;
uint bias = threadIdx.x % stride;
shm[p + bias] += shm[p + bias + stride];
}
}
__syncthreads();
// store the result
if (threadIdx.x < _p) {
float v = shm[threadIdx.x * _vl];
val[a + threadIdx.x * _nClusters] = v;
// determine closest center
if ((a == 0) || (minD[threadIdx.x] > v)) {
minD[threadIdx.x] = v;
minIdx[threadIdx.x] = a;
}
}
__syncthreads();
}
__syncthreads();
// if ((threadIdx.x == 0)) {
//
// printf("yea yea yea \n");
//
// for (int p = 0; p < _p; p++) {
// for (int a = 0; a < _nClusters; a++)
// printf(" %.3f ", val[a + p * _nClusters]);
//
// printf( " min: %d %.3f", minIdx[p], minD[p]);
// printf("\n");
// }
//
//
// }
//
// __syncthreads();
uint p = threadIdx.x / _nClusters;
uint cIdx = threadIdx.x % _nClusters;
#if 0
if (p < _p) {
float c2 = _cbDist[minIdx[p] * _nClusters * _p + cIdx * _p + p];
lambda[threadIdx.x] = project(val[threadIdx.x], minD[p], c2,
dist[threadIdx.x]);
if (cIdx == minIdx[p] )
dist[threadIdx.x] = 999999999999.;
if (!isTriangle(val[threadIdx.x], minD[p], c2))
printf(
"non-triangle: p %d pIdx %d: minIdx: %d === %f %f %f = %f %f \n",
p, cIdx, minIdx[p], val[threadIdx.x], minD[p], c2,
dist[threadIdx.x], lambda[threadIdx.x]);
lIdx[threadIdx.x] = cIdx;
}
#endif
uint bestIdx1;
uint bestIdx2;
for (int minId = 0; minId < _nClusters; minId++) {
if (p < _p) {
float c2 = _cbDist[minId * _nClusters * _p + cIdx * _p + p];
float d;
float l;
l = project(val[threadIdx.x], val[p * _nClusters + minId], c2,
d);
if (cIdx == minId)
d = 999999999999.;
if ((minId == 0) || (d < dist[threadIdx.x])) {
dist[threadIdx.x] = d;
lambda[threadIdx.x] = l;
bestIdx1 = cIdx;
bestIdx2 = minId;
}
if (!isTriangle(val[threadIdx.x], val[p * _nClusters + minId],
c2))
printf(
"non-triangle: p %d pIdx %d: minIdx: %d === %f %f %f = %f %f \n",
p, cIdx, minIdx[p], val[threadIdx.x], minD[p], c2,
dist[threadIdx.x], lambda[threadIdx.x]);
}
}
if (p < _p) {
l1Idx[threadIdx.x] = bestIdx1;
l2Idx[threadIdx.x] = bestIdx2;
}
__syncthreads();
#if 1
// reduction to find best axis
for (int stride = _nClusters >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if ((p < _p) && (cIdx < stride)) {
if (dist[threadIdx.x] > dist[threadIdx.x + stride]) {
dist[threadIdx.x] = dist[threadIdx.x + stride];
l1Idx[threadIdx.x] = l1Idx[threadIdx.x + stride];
l2Idx[threadIdx.x] = l2Idx[threadIdx.x + stride];
lambda[threadIdx.x] = lambda[threadIdx.x + stride];
}
}
}
__syncthreads();
#endif
// write results
if (threadIdx.x < _p) {
_lineP1[iter * _p + threadIdx.x] = l1Idx[threadIdx.x * _nClusters];
_lineP2[iter * _p + threadIdx.x] = l2Idx[threadIdx.x * _nClusters];
_lineLambda[iter * _p + threadIdx.x] = lambda[threadIdx.x
* _nClusters];
}
if (_lineDist) {
if (threadIdx.x == 0) {
float d = 0.;
for (int i = 0; i < _p; i++)
d += dist[i * _nClusters];
_lineDist[iter] = d;
}
}
} // iter
}
__global__ void lineClusterKernelFast(float *_lineLambda, float* _lineDist,
const float* _cbDist, const float* _cb, uint _nClusters,
const float* _B, uint _Brows, uint _dim, uint _p, uint _vl) {
extern __shared__ float shm[];
float* shmIter = shm;
shmIter += _dim;
float* val = shmIter;
shmIter += _p * _nClusters;
// float* minD = shmIter;
// shmIter += _p;
// uint* minIdx = (uint*) shmIter;
// shmIter += _p;
float* lambda = shmIter;
shmIter += _p * _nClusters;
float* dist = shmIter;
shmIter += _p * _nClusters;
// float* l1Idx = shmIter;
// shmIter += _p * _nClusters;
// float* l2Idx = shmIter;
// shmIter += _p * _nClusters;
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
__syncthreads();
float b;
// load vector
if (threadIdx.x < _dim)
b = _B[iter * _dim + threadIdx.x];
const float* A = _cb;
// loop over all vectors of A
for (int a = 0; a < _nClusters; a++) {
if (threadIdx.x < _dim)
shm[threadIdx.x] = sqr(b - A[a * _dim + threadIdx.x]);
// compute the sum of differences for the vector segments
// i.e. all _p segments in parallel
for (uint stride = _vl >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if ((threadIdx.x) < _p * stride) {
uint p = threadIdx.x / stride * _vl;
uint bias = threadIdx.x % stride;
shm[p + bias] += shm[p + bias + stride];
}
}
__syncthreads();
// store the result
if (threadIdx.x < _p) {
float v = shm[threadIdx.x * _vl];
val[a + threadIdx.x * _nClusters] = v;
// // determine closest center
// if ((a == 0) || (minD[threadIdx.x] > v)) {
// minD[threadIdx.x] = v;
// minIdx[threadIdx.x] = a;
// }
}
__syncthreads();
}
__syncthreads();
uint p = threadIdx.x / _nClusters;
uint cIdx = threadIdx.x % _nClusters;
for (int minId = 0; minId < _nClusters; minId++) {
if (p < _p) {
float c2 = _cbDist[minId * _nClusters * _p + cIdx * _p + p];
float d;
float l;
l = project(val[threadIdx.x], val[p * _nClusters + minId], c2,
d);
if (cIdx == minId)
d = 999999999999.;
if ((minId == 0) || (d < dist[threadIdx.x])) {
dist[threadIdx.x] = d;
lineDescr& line(*((lineDescr*) (lambda + threadIdx.x)));
line.p1 = cIdx;
line.p2 = minId;
line.lambda = toUShort(l);
}
}
}
__syncthreads();
// reduction to find best axis
for (int stride = _nClusters >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if ((p < _p) && (cIdx < stride)) {
if (dist[threadIdx.x] > dist[threadIdx.x + stride]) {
dist[threadIdx.x] = dist[threadIdx.x + stride];
lambda[threadIdx.x] = lambda[threadIdx.x + stride];
}
}
}
__syncthreads();
// write results
if (threadIdx.x < _p) {
_lineLambda[iter * _p + threadIdx.x] = lambda[threadIdx.x
* _nClusters];
}
if (_lineDist) {
if (threadIdx.x == 0) {
float d = 0.;
for (int i = 0; i < _p; i++)
d += dist[i * _nClusters];
_lineDist[iter] = d;
}
}
} // iter
}
void PerturbationProTree::lineDist(const float* _DB, uint _N) {
d_lineParts = 16;
//TODO
// d_lineParts = 32;
if (!d_codeBookDistL1L2)
computeCBL1L1Dist(d_lineParts);
d_lineP1 = NULL;
d_lineP2 = NULL;
// _N = 1000;
float* dist = NULL;
if (!d_lineLambda)
cudaMalloc(&d_lineLambda, d_lineParts * _N * sizeof(float));
//
// if (!d_lineP1)
// cudaMalloc(&d_lineP1, d_lineParts * _N * sizeof(uint));
// if (!d_lineP2)
// cudaMalloc(&d_lineP2, d_lineParts * _N * sizeof(uint));
if (!d_lineLambda) {
std::cout << "line Dist: did not get memory " << std::endl;
}
cudaMalloc(&dist, _N * sizeof(float));
uint nLines = d_lineParts * d_nClusters;
uint nThreads = (d_dim > nLines) ? d_dim : nLines;
dim3 block(nThreads, 1, 1);
dim3 grid((_N > 1024) ? 1024 : _N, 1, 1);
// dim3 grid(1,1,1);
uint shmSize = (d_lineParts * d_nClusters + 2 * d_lineParts + d_dim
+ 4 * nLines) * sizeof(float);
std::cout << "shmSize: " << shmSize << std::endl;
// lineClusterKernel<<<grid, block, shmSize>>>(d_lineLambda, d_lineP1,
// d_lineP2, dist, d_codeBookDistL1L2, d_multiCodeBook, d_nClusters,
// _DB, _N, d_dim, d_lineParts, d_dim / d_lineParts);
lineClusterKernelFast<<<grid, block, shmSize>>>(d_lineLambda, dist,
d_codeBookDistL1L2, d_multiCodeBook, d_nClusters, _DB, _N, d_dim,
d_lineParts, d_dim / d_lineParts);
checkCudaErrors(cudaDeviceSynchronize());
// outputVec("Dist ", dist, 1000);
//
// outputVec("Lambda", d_lineLambda, 1000);
float minD, maxD, avgD;
float* ldist = new float[_N];
cudaMemcpy(ldist, dist, _N * sizeof(float), cudaMemcpyDeviceToHost);
minD = ldist[0];
maxD = ldist[0];
avgD = 0;
for (int i = 0; i < _N; i++) {
if (ldist[i] < minD)
minD = ldist[i];
if (ldist[i] > maxD)
maxD = ldist[i];
avgD += ldist[i];
}
std::cout << "line dist (min, max, avg) " << minD << " " << maxD << " "
<< (avgD / _N) << std::endl;
cudaFree(dist);
}
__global__ void lineAssignmentKernel(float *_queryDist, const float* _cb,
uint _nClusters, const float* _B, uint _Brows, uint _dim, uint _p,
uint _vl) {
extern __shared__ float shm[];
float* shmIter = shm;
shmIter += _dim;
float* val = shmIter;
shmIter += _p * _nClusters;
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
__syncthreads();
float b;
// load vector
if (threadIdx.x < _dim)
b = _B[iter * _dim + threadIdx.x];
const float* A = _cb;
// loop over all vectors of A
for (int a = 0; a < _nClusters; a++) {
if (threadIdx.x < _dim)
shm[threadIdx.x] = sqr(b - A[a * _dim + threadIdx.x]);
// compute the sum of differences for the vector segments
// i.e. all _p segments in parallel
for (uint stride = _vl >> 1; stride > 0; stride >>= 1) {
__syncthreads();
if ((threadIdx.x) < _p * stride) {
uint p = threadIdx.x / stride * _vl;
uint bias = threadIdx.x % stride;
shm[p + bias] += shm[p + bias + stride];
}
}
__syncthreads();
// store the result
if (threadIdx.x < _p) {
float v = shm[threadIdx.x * _vl];
val[a + threadIdx.x * _nClusters] = v;
}
__syncthreads();
}
__syncthreads();
if (threadIdx.x < _nClusters) {
for (int p = 0; p < _p; p++)
_queryDist[iter * _nClusters * _p + p * _nClusters + threadIdx.x] =
val[p * _nClusters + threadIdx.x];
}
}
}
void PerturbationProTree::getLineAssignment(float* _queryL1Dist,
const float* _Q, uint _QN) {
uint nLines = d_lineParts * d_nClusters;
uint nThreads = (d_dim > nLines) ? d_dim : nLines;
dim3 block(nThreads, 1, 1);
dim3 grid((_QN > 1024) ? 1024 : _QN, 1, 1);
uint shmSize = (d_dim + d_lineParts * d_nClusters) * sizeof(float);
std::cout << "shmSize: " << shmSize << std::endl;
lineAssignmentKernel<<<grid, block, shmSize>>>(_queryL1Dist,
d_multiCodeBook, d_nClusters, _Q, _QN, d_dim, d_lineParts,
d_dim / d_lineParts);
checkCudaErrors(cudaDeviceSynchronize());
// outputVec("qL1", _queryL1Dist, d_nClusters * d_lineParts);
}
__global__ void assignPerturbationKBestClusterKernelSingleP(uint *_assign,
const float* _A, const float* _B, uint _Arows, uint _Brows, uint _dim,
uint _p, uint _vl, uint _k, uint _NP2, uint _dimBits) {
extern __shared__ float shmb[];
float* shm = shmb + _dim;
uint* shmIdx = (uint*) (shm + _NP2);
float val[16];
// loop over all corresponding vectors in B
for (int iter = blockIdx.x; iter < _Brows; iter += gridDim.x) {
__syncthreads();
if (threadIdx.x < _dim)
shmb[threadIdx.x] = _B[iter * _dim + threadIdx.x];
for (uint pert = 0; pert < 1; pert++) {
__syncthreads();
uint pIdx = pertIdx(threadIdx.x, _dimBits, pert);
// load perturbed vector
float b = shmb[pIdx];
const float* A = _A + pert * _Arows * _dim;
// loop over all vectors of A
for (int a = 0; a < _Arows; a++) {
if (threadIdx.x < _dim)
shm[threadIdx.x] = sqr(b - A[a * _dim + threadIdx.x]);
__syncthreads();
// store the result
if (threadIdx.x == a) {
for (int p = 0; p < _p; p++) {
val[p] = 0;
for (int i = 0; i < _vl; i++) {
val[p] += shm[p * _vl + i];
}
}
}
__syncthreads();
}
// sort the results;
for (int i = 0; i < _p; i++) {
if (threadIdx.x < _NP2)
shm[threadIdx.x] = 10000000.;
// copy to original shm
if (threadIdx.x < _Arows) {
shm[threadIdx.x] = val[i];
shmIdx[threadIdx.x] = threadIdx.x;
}
__syncthreads();
bitonic3(shm, shmIdx, _NP2);
val[i] = shmIdx[threadIdx.x];
__syncthreads();
}
// TOOD optimize
// write out decision
for (int k = 0; k < _k; k++) {
if (threadIdx.x == k) {
for (int i = 0; i < _p; i++)
shmIdx[i] = val[i];
}
__syncthreads();
if (threadIdx.x < _p) {
_assign[(iter * 1 + pert) * _k * _p + k * _p
+ threadIdx.x] = shmIdx[threadIdx.x];
}
__syncthreads();
}
}
} // iter
}
void PerturbationProTree::getKBestAssignment(uint *_assign, const float* _A,
const float* _B, uint _Arows, uint _Brows, uint _k) const {
if (_Arows <= 1024) {
uint NP2 = log2(_Arows);
uint nThreads = (NP2 > d_dim) ? NP2 : d_dim;
dim3 block(nThreads, 1, 1);
dim3 grid((_Brows < 1024) ? _Brows : 1024, 1, 1);
uint shm = (2 * nThreads + 2 * _Arows * d_p) * sizeof(float);
if (shm > 32000) {
shm = (nThreads + 2 * _Arows) * sizeof(float);
// std::cout << "kbest single p : shm " << shm << std::endl;
assignPerturbationKBestClusterKernelSingleP<<<grid, block, shm>>>(
_assign, _A, _B, _Arows, _Brows, d_dim, d_p, d_vl, _k, NP2,
d_dimBits);
} else {
// std::cout << "kbest: shm " << shm << std::endl;
assignPerturbationKBestClusterKernel<<<grid, block, shm>>>(_assign,
_A, _B, _Arows, _Brows, d_dim, d_p, d_vl, _k, NP2,
d_dimBits);
}
} else {
if (d_p > 1) {
std::cout << "not implemented";
return;
}
float* resd;
uint* idxD;
cudaMalloc(&resd, _Arows * _Brows * sizeof(float));
cudaMalloc(&idxD, _Arows * _Brows * sizeof(uint));
calcDist(resd, _A, _B, _Arows, _Brows);
// initialize the key array to the trivial list 0,1, ... _Arows, 0, 1, ....
uint* idx = new uint[_Arows * _Brows];
uint h = 0;
for (int j = 0; j < _Brows; j++)
for (int i = 0; i < _Arows; i++, h++)
idx[h] = i;
cudaMemcpy(idxD, idx, _Arows * _Brows * sizeof(uint),
cudaMemcpyHostToDevice);
bitonicSort(resd, _assign, resd, idxD, _Brows, _Arows, 1);
delete[] idx;
cudaFree(idxD);
cudaFree(resd);
}
checkCudaErrors(cudaDeviceSynchronize());
}
void PerturbationProTree::getKBestLineAssignment(uint *_assign, float* _l1Dist,
const float* _A, const float* _B, uint _Arows, uint _Brows,
uint _k) const {
uint NP2 = log2(_Arows);
uint nThreads = (NP2 > d_dim) ? NP2 : d_dim;
dim3 block(nThreads, 1, 1);
dim3 grid((_Brows < 1024) ? _Brows : 1024, 1, 1);
uint shm = (2 * nThreads + 2 * _Arows * d_p) * sizeof(float);
if (shm > 32000) {
shm = (nThreads + 2 * _Arows) * sizeof(float);
// std::cout << "kbest single p : shm " << shm << std::endl;
assignPerturbationKBestClusterKernelSingleP<<<grid, block, shm>>>(
_assign, _A, _B, _Arows, _Brows, d_dim, d_p, d_vl, _k, NP2,
d_dimBits);
std::cout << "!!!!!!!!! not supported !!!!!!!!!!" << std::endl;
} else {
// std::cout << "kbest: shm " << shm << std::endl;
// assignPerturbationKBestClusterKernel<<<grid, block, shm>>>(_assign, _A,
// _B, _Arows, _Brows, d_dim, d_p, d_vl, _k, NP2, d_dimBits,
// d_nDBs);
assignPerturbationKBestLineClusterKernel<<<grid, block, shm>>>(_assign,
_l1Dist, _A, _B, _Arows, _Brows, d_dim, d_p, d_vl, _k, NP2,
d_dimBits);
}
checkCudaErrors(cudaDeviceSynchronize());
}
void PerturbationProTree::testKNN(const float* _Q, uint _QN) {
// outputVecUint("prefix", d_binPrefix + 816791, 20);
// outputVecUint("prefix", d_binCounts + 816791, 20);
// outputVecUint("dbidx", d_dbIdx + 789975, 9);
uint k1 = 4;
prepareDistSequence(d_nClusters2 * k1, d_p);
uint* assignd;
uint* assignd2;
cudaMalloc(&assignd, 1 * k1 * d_p * _QN * sizeof(uint));
cudaMalloc(&assignd2, 1 * k1 * d_p * _QN * sizeof(uint));
outputVec("mcodebook", d_multiCodeBook, 256);
outputVec("mcodebook2", d_multiCodeBook2, 256);
outputVec("mcodebook2 -p2",
d_multiCodeBook2 + d_nClusters * d_nClusters2 * d_dim, 256);
getKBestAssignment(assignd, d_multiCodeBook, _Q, d_nClusters, _QN, k1);
outputVecUint("assign1 pert0: ", assignd, k1 * d_p);
outputVecUint("assign1 pert1: ", assignd + k1 * d_p, k1 * d_p);
float *assignVal;
uint *assignIdx;
cudaMalloc(&assignVal,
_QN * 1 * d_p * k1 * d_nClusters2 * sizeof(float));
cudaMalloc(&assignIdx,
_QN * 1 * d_p * k1 * d_nClusters2 * sizeof(uint));
getKBestAssignment2(assignVal, assignIdx, d_multiCodeBook2, _Q,
d_nClusters2, _QN, assignd, d_nClusters, k1);
outputVecUint("assignIdx 1 - p0", assignIdx, k1 * d_nClusters2);
outputVecUint("assignIdx 1 - p1", assignIdx + k1 * d_nClusters2 * d_p,
k1 * d_nClusters2);
uint *idx = new uint[d_p * k1 * d_nClusters2];
float *val = new float[d_p * k1 * d_nClusters2];
cudaMemcpy(val, assignVal, d_p * k1 * d_nClusters2 * sizeof(float),
cudaMemcpyDeviceToHost);
for (int p = 0; p < d_p; p++) {
for (int i = 0; i < k1 * d_nClusters2; i++)
std::cout << val[p * k1 * d_nClusters2 + i] << " ";
std::cout << std::endl << std::endl;
}
uint k2 = 40;
k2 = 40960;
// uint maxBins = 40;
uint maxBins = 13000;
uint* nBins;
uint* bins;
cudaMalloc(&nBins, 1 * _QN * sizeof(uint));
cudaMalloc(&bins, 1 * _QN * maxBins * sizeof(uint));
// getBins(bins, nBins, assignVal, assignIdx, _QN, k1, k2, maxBins);
getBins(bins, nBins, assignVal, assignIdx, 1, k1, k2, maxBins);
outputVecUint("received nBins: ", nBins, 1);
std::cout << "done with bins!!!!!!" << std::endl;
k2 = 256;
// k2 = 512;
// k2 = 64;
// k2 = 4096;
uint maxVec = k2;
float* bestDist;
uint* bestIdx;
cudaMalloc(&bestDist, _QN * maxVec * sizeof(float));
cudaMalloc(&bestIdx, _QN * maxVec * sizeof(uint));
// getKBestVectors(bestDist, bestIdx, bins, nBins, maxBins, _Q, _QN, k2);
if (k2 <= 1024)
getKBestVectors(bestDist, bestIdx, bins, nBins, maxBins, _Q, 1, k2);
else
getKBestVectorsLarge(bestDist, bestIdx, bins, nBins, maxBins, _Q, 1,
k2);
uint* bestIdxh = new uint[maxVec];
float* bestDisth = new float[maxVec];
cudaMemcpy(bestIdxh, bestIdx, maxVec * sizeof(uint),
cudaMemcpyDeviceToHost);
cudaMemcpy(bestDisth, bestDist, maxVec * sizeof(float),
cudaMemcpyDeviceToHost);
for (int i = 0; i < maxVec; i++) {
std::cout << i << " " << bestIdxh[i] << " " << bestDisth[i] << std::endl;
}
std::cout << std::endl;
float* resd;
cudaMalloc(&resd, d_p * d_NdbVec * sizeof(float));
calcDist(resd, d_dbVec, _Q, d_NdbVec, 1);
//outputVec("Res:", resd, 20);
float* resh = new float[d_p * d_NdbVec];
cudaMemcpy(resh, resd, d_p * d_NdbVec * sizeof(float),
cudaMemcpyDeviceToHost);
vector<pair<float, uint> > ddd;
ddd.resize(d_NdbVec);
for (int i = 0; i < d_NdbVec; i++) {
float val = 0.;
for (int p = 0; p < d_p; p++)
val += resh[i * d_p + p];
ddd[i] = pair<float, uint>(val, i);
}
sort(ddd.begin(), ddd.end());
getAssignment(assignd, d_multiCodeBook, _Q, d_nClusters, 1);
outputVecUint("assign: ", assignd, 4);
getKBestAssignment(assignd, d_multiCodeBook, _Q, d_nClusters, 1, k1);
outputVecUint("assign: ", assignd, k1 * d_p);
outputVecUint("assignIdx2-1: ", assignIdx, k1);
outputVecUint("assignIdx2-2: ", assignIdx + k1 * d_nClusters2, k1);
std::cout << "distance by brute-force search: " << std::endl;
for (int i = 0; i < 20; i++) {
std::cout << i << " " << ddd[i].first << " " << ddd[i].second << std::endl;
getKBestAssignment(assignd, d_multiCodeBook,
d_dbVec + ddd[i].second * d_dim, d_nClusters, 1, k1);
// outputVecUint("assign: ", assignd, k1 * d_p);
getAssignment2(assignd2, d_multiCodeBook2,
d_dbVec + ddd[i].second * d_dim, d_nClusters2, 1, assignd,
d_nClusters);
// outputVecUint("assign2: ", assignd2, d_p);
getKBestAssignment2(assignVal, assignIdx, d_multiCodeBook2,
d_dbVec + ddd[i].second * d_dim, d_nClusters2, 1, assignd,
d_nClusters, k1);
outputVecUint("assignIdx2-1: ", assignIdx, k1);
outputVecUint("assignIdx2-2: ", assignIdx + k1 * d_nClusters2, k1);
// outputVecUint("", assignd + 4, 4);
// outputVecUint("", assignd + 8, 4);
}
cudaFree(bestIdx);
cudaFree(bestDist);
cudaFree(bins);
cudaFree(nBins);
cudaFree(assignIdx);
cudaFree(assignVal);
cudaFree(assignd2);
cudaFree(assignd);
delete[] val;
delete[] idx;
}
void PerturbationProTree::queryKNN(vector<uint>& _resIdx,
vector<float>& _resDist, const float* _Q, uint _QN, uint _kVec) {
_resIdx.resize(_QN * _kVec);
_resDist.resize(_QN * _kVec);
uint k1 = 1;
k1 = 8;
// k1 = 16;
prepareDistSequence(d_nClusters2 * k1, d_p);
// k1 = 16;
// prepareDistSequence(14, d_p);
uint* assignd;
float *assignVal;
uint *assignIdx;
cudaMalloc(&assignd, 1 * k1 * d_p * _QN * sizeof(uint));
float *queryL1;
cudaMalloc(&queryL1, d_nClusters * d_lineParts * _QN * sizeof(uint));
cudaMalloc(&assignVal,
_QN * 1 * d_p * k1 * d_nClusters2 * sizeof(float));
cudaMalloc(&assignIdx,
_QN * 1 * d_p * k1 * d_nClusters2 * sizeof(uint));
uint k2 = 40;
k2 = 4096;
// k2 = 8192;
// k2 = _kVec;
uint maxBins = 4096;
// uint maxBins = 4* 8192;
// uint maxBins = 2048;
uint* nBins;
uint* bins;
cudaMalloc(&nBins, _QN * 1 * sizeof(uint));
cudaMalloc(&bins, _QN * maxBins * 1 * sizeof(uint));
if (!nBins || !bins) {
std::cout << "Bins: did not get memory" << std::endl;
exit(1);
}
uint maxVec = _kVec;
float* bestDist;
uint* bestIdx;
cudaMalloc(&bestDist, _QN * maxVec * sizeof(float));
cudaMalloc(&bestIdx, _QN * maxVec * sizeof(uint));
for (int i = 0; i < 1; i++) {
getKBestAssignment(assignd, d_multiCodeBook, _Q, d_nClusters, _QN, k1);
getLineAssignment(queryL1, _Q, _QN);
getKBestAssignment2(assignVal, assignIdx, d_multiCodeBook2, _Q,
d_nClusters2, _QN, assignd, d_nClusters, k1);
// outputVec("assignVal: ", assignVal, 200);
// outputVecUint("assignIdx: ", assignIdx, 200);
std::cout << "done assignements " << std::endl;
#if 1
getBins(bins, nBins, assignVal, assignIdx, _QN, k1, k2, maxBins);
// outputVecUint("Bins", bins, 2000);
std::cout << "done bins " << std::endl;
// if (maxVec <= 1024)
// getKBestVectors(bestDist, bestIdx, bins, nBins, maxBins, _Q, _QN,
// maxVec);
// else
// getKBestVectorsLarge(bestDist, bestIdx, bins, nBins, maxBins, _Q,
// _QN, maxVec);
rerankKBestVectors(bestDist, bestIdx, queryL1, bins, nBins, maxBins, _Q,
_QN, maxVec);
#else
rerankKBestBinVectors(bestDist, bestIdx, queryL1, assignVal, assignIdx,
maxBins, k1, k2, _Q, _QN, maxVec);
#endif
std::cout << "done vectors " << _QN << std::endl;
}
cudaMemcpy(&_resIdx[0], bestIdx, _QN * maxVec * sizeof(uint),
cudaMemcpyDeviceToHost);
cudaMemcpy(&_resDist[0], bestDist, _QN * maxVec * sizeof(float),
cudaMemcpyDeviceToHost);
#if 0
if (maxVec > 4096) {
// sort the results on host
vector< pair<float,uint> > svec;
svec.resize(_QN*maxVec);
std::cout << "dist before: ";
for (int i = 0; i < 100; i++) {
std::cout << "\t" << _resDist[i];
}
std::cout << std::endl;
for (int i = 0; i < _QN * maxVec; i++) {
svec[i] = pair<float,uint>(_resDist[i], _resIdx[i]);
}
for (int i = 0; i < _QN; i++) {
sort(svec.begin() + i * maxVec, svec.begin() + i * maxVec + maxVec);
}
for (int i = 0; i < _QN * maxVec; i++) {
_resIdx[i] = svec[i].second;
_resDist[i] = svec[i].first;
}
std::cout << "dist: ";
for (int i = 0; i < 00; i++) {
std::cout << "\t" << _resDist[i];
}
std::cout << std::endl;
}
#endif
cudaFree(bestIdx);
cudaFree(bestDist);
cudaFree(bins);
cudaFree(nBins);
cudaFree(assignIdx);
cudaFree(assignVal);
cudaFree(assignd);
}
void PerturbationProTree::queryBIGKNN(vector<uint>& _resIdx,
vector<float>& _resDist, const float* _Q, uint _QN, uint _kVec,
const std::vector<uint>& _gtBins, uint _offset) {
_resIdx.resize(_QN * _kVec);
_resDist.resize(_QN * _kVec);
// uint k1 = 32;
uint k1 = 8;
k1 = 16;
// prepareDistSequence(d_nClusters2 * k1, d_p);
// prepareDistSequence(14, d_p);
uint* assignd;
float *assignVal;
uint *assignIdx;
cudaMalloc(&assignd, 1 * k1 * d_p * _QN * sizeof(uint));
float *queryL1;
cudaMalloc(&queryL1, d_nClusters * d_lineParts * _QN * sizeof(uint));
cudaMalloc(&assignVal,
_QN * 1 * d_p * k1 * d_nClusters2 * sizeof(float));
cudaMalloc(&assignIdx,
_QN * 1 * d_p * k1 * d_nClusters2 * sizeof(uint));
// uint k2 = 40;
// k2 = 4096;
// k2 = 8192;
// k2 = _kVec;
// uint maxBins = 4096;
uint maxBins = 64 * 8192;
// uint maxBins = 2048;
// uint maxBins = 16;
uint* nBins;
uint* bins;
cudaMalloc(&nBins, _QN * 1 * sizeof(uint));
cudaMalloc(&bins, _QN * maxBins * 1 * sizeof(uint));
if (!nBins || !bins) {
std::cout << "Bins: did not get memory" << std::endl;
exit(1);
}
uint maxVec = _kVec;
float* bestDist;
uint* bestIdx;
cudaMalloc(&bestDist, _QN * maxVec * sizeof(float));
cudaMalloc(&bestIdx, _QN * maxVec * sizeof(uint));
getKBestAssignment(assignd, d_multiCodeBook, _Q, d_nClusters, _QN, k1);
// getLineAssignment(queryL1, _Q, _QN);
getKBestAssignment2(assignVal, assignIdx, d_multiCodeBook2, _Q,
d_nClusters2, _QN, assignd, d_nClusters, k1);
// outputVec("assignVal: ", assignVal, 200);
// outputVecUint("assignIdx: ", assignIdx, 200);
std::cout << "done assignements " << std::endl;
#if 1
// getBIGBins(bins, nBins, assignVal, assignIdx, _QN, k1, k2, maxBins);
// getBIGBins(bins, nBins, assignVal, assignIdx, _QN, k1, _kVec, maxBins);
// getBIGBins(bins, nBins, assignVal, assignIdx, _QN, k1, 4 * 8192, maxBins);
// getBIGBinsSorted(bins, nBins, assignVal, assignIdx, _QN, k1, _kVec,
// maxBins);
//
getBIGBins2D(bins, nBins, assignVal, assignIdx, _QN, k1, _kVec, maxBins);
// outputVecUint("final Bins", bins, _QN * maxBins);
// outputVecUint("nBins: ", nBins, 2);
// outputVecUint("nBins: ", nBins, 400);
countZeros("bins: ", bins, _QN * maxBins);
std::cout << "done bins " << std::endl;
////////////////////////////////////////////////////////////////////////////////////
// check Bins
uint* hBins = new uint[_QN * maxBins];
uint* hnBins = new uint[_QN];
cudaMemcpy(hBins, bins, _QN * maxBins * sizeof(uint),
cudaMemcpyDeviceToHost);
cudaMemcpy(hnBins, nBins, _QN * sizeof(uint), cudaMemcpyDeviceToHost);
uint found = 0;
uint avg = 0;
uint binMax = 0;
uint binMin = 100000;
for (int i = 0; i < _QN; i++) {
int b = 0;
for (; b < hnBins[i]; b++) {
if (hBins[i * maxBins + b] == _gtBins[i + _offset]) {
found++;
avg += b;
if (b < binMin)
binMin = b;
if (b > binMax)
binMax = b;
break;
}
}
}
std::cout << "found bins: " << found;
std::cout << " at avg location: " << (float(avg) / float(found));
std::cout << " min " << binMin << " max " << binMax << std::endl;
delete[] hnBins;
delete[] hBins;
// check Bins - End
///////////////////////////////////////////////////////////////////////////////////
// if (maxVec <= 1024)
// getKBestVectors(bestDist, bestIdx, bins, nBins, maxBins, _Q, _QN,
// maxVec);
// else
// getKBestVectorsLarge(bestDist, bestIdx, bins, nBins, maxBins, _Q,
// _QN, maxVec);
rerankBIGKBestVectors(bestDist, bestIdx, queryL1, bins, nBins, maxBins, _Q,
_QN, maxVec);
#endif
std::cout << "done vectors " << _QN << std::endl;
// outputVecUint("BestIdx: ", bestIdx, _QN * maxVec);
countZeros("bestIdx: ", bestIdx, _QN * maxVec);
cudaMemcpy(&_resIdx[0], bestIdx, _QN * maxVec * sizeof(uint),
cudaMemcpyDeviceToHost);
cudaMemcpy(&_resDist[0], bestDist, _QN * maxVec * sizeof(float),
cudaMemcpyDeviceToHost);
cudaFree(bestIdx);
cudaFree(bestDist);
cudaFree(bins);
cudaFree(nBins);
cudaFree(assignIdx);
cudaFree(assignVal);
cudaFree(assignd);
}
void PerturbationProTree::queryBIGKNNRerank(vector<uint>& _resIdx,
vector<float>& _resDist, const float* _Q, uint _QN, uint _kVec,
const float* _hLines) {
_resIdx.resize(_QN * _kVec);
_resDist.resize(_QN * _kVec);
d_lineParts = 16; // TODO !!!!
if (!d_codeBookDistL1L2)
computeCBL1L1Dist(d_lineParts);
uint k1 = 16;
// prepareDistSequence(d_nClusters2 * k1, d_p);
// prepareDistSequence(14, d_p);
uint* assignd;
float *assignVal;
uint *assignIdx;
cudaMalloc(&assignd, 1 * k1 * d_p * _QN * sizeof(uint));
float *queryL1;
cudaMalloc(&queryL1, d_nClusters * d_lineParts * _QN * sizeof(uint));
cudaMalloc(&assignVal,
_QN * 1 * d_p * k1 * d_nClusters2 * sizeof(float));
cudaMalloc(&assignIdx,
_QN * 1 * d_p * k1 * d_nClusters2 * sizeof(uint));
uint k2 = 40;
k2 = 4096;
// k2 = 8192;
// k2 = _kVec;
// uint maxBins = 4096;
uint maxBins = 4 * 8192;
// uint maxBins = 2048;
uint* nBins;
uint* bins;
cudaMalloc(&nBins, _QN * 1 * sizeof(uint));
cudaMalloc(&bins, _QN * maxBins * 1 * sizeof(uint));
if (!nBins || !bins) {
std::cout << "Bins: did not get memory" << std::endl;
exit(1);
}
uint maxVec = _kVec;
float* bestDist;
uint* bestIdx;
cudaMalloc(&bestDist, _QN * maxVec * sizeof(float));
cudaMalloc(&bestIdx, _QN * maxVec * sizeof(uint));
getKBestAssignment(assignd, d_multiCodeBook, _Q, d_nClusters, _QN, k1);
// getLineAssignment(queryL1, _Q, _QN);
getKBestAssignment2(assignVal, assignIdx, d_multiCodeBook2, _Q,
d_nClusters2, _QN, assignd, d_nClusters, k1);
// outputVec("assignVal: ", assignVal, 200);
// outputVecUint("assignIdx: ", assignIdx, 200);
std::cout << "done assignements " << std::endl;
#if 1
getBIGBins(bins, nBins, assignVal, assignIdx, _QN, k1, k2, maxBins);
// outputVecUint("Bins", bins, 2000);
std::cout << "done bins " << std::endl;
// if (maxVec <= 1024)
// getKBestVectors(bestDist, bestIdx, bins, nBins, maxBins, _Q, _QN,
// maxVec);
// else
// getKBestVectorsLarge(bestDist, bestIdx, bins, nBins, maxBins, _Q,
// _QN, maxVec);
rerankBIGKBestVectors(_resIdx, bestDist, bestIdx, queryL1, bins, nBins,
maxBins, _Q, _QN, maxVec, _hLines);
#endif
std::cout << "done vectors " << _QN << std::endl;
// outputVecUint("BestIdx: ", bestIdx, 1000);
cudaMemcpy(&_resIdx[0], bestIdx, _QN * maxVec * sizeof(uint),
cudaMemcpyDeviceToHost);
cudaMemcpy(&_resDist[0], bestDist, _QN * maxVec * sizeof(float),
cudaMemcpyDeviceToHost);
cudaFree(bestIdx);
cudaFree(bestDist);
cudaFree(bins);
cudaFree(nBins);
cudaFree(assignIdx);
cudaFree(assignVal);
cudaFree(assignd);
}
void PerturbationProTree::queryBIGKNNRerank2(vector<uint>& _resIdx,
vector<float>& _resDist, const float* _Q, uint _QN, uint _kVec,
const float* _hLines) {
_resIdx.resize(_QN * _kVec);
_resDist.resize(_QN * _kVec);
// uint k1 = 32;
uint k1 = 8;
k1 = 16;
// d_lineParts = 16; // TODO !!!!
if (!d_codeBookDistL1L2)
computeCBL1L1Dist(d_lineParts);
// prepareDistSequence(d_nClusters2 * k1, d_p);
// prepareDistSequence(14, d_p);
uint* assignd;
float *assignVal;
uint *assignIdx;
cudaMalloc(&assignd, 1 * k1 * d_p * _QN * sizeof(uint));
float *queryL1;
cudaMalloc(&queryL1, d_nClusters * d_lineParts * _QN * sizeof(uint));
cudaMalloc(&assignVal,
_QN * 1 * d_p * k1 * d_nClusters2 * sizeof(float));
cudaMalloc(&assignIdx,
_QN * 1 * d_p * k1 * d_nClusters2 * sizeof(uint));
// uint k2 = 40;
// k2 = 4096;
// k2 = 8192;
// k2 = _kVec;
// uint maxBins = 4096;
uint maxBins = 64 * 8192;
// uint maxBins = 2048;
// uint maxBins = 16;
uint* nBins;
uint* bins;
cudaMalloc(&nBins, _QN * 1 * sizeof(uint));
cudaMalloc(&bins, _QN * maxBins * 1 * sizeof(uint));
if (!nBins || !bins) {
std::cout << "Bins: did not get memory" << std::endl;
exit(1);
}
uint maxVec = _kVec;
float* bestDist;
uint* bestIdx;
cudaMalloc(&bestDist, _QN * maxVec * sizeof(float));
cudaMalloc(&bestIdx, _QN * maxVec * sizeof(uint));
getKBestAssignment(assignd, d_multiCodeBook, _Q, d_nClusters, _QN, k1);
getLineAssignment(queryL1, _Q, _QN);
getKBestAssignment2(assignVal, assignIdx, d_multiCodeBook2, _Q,
d_nClusters2, _QN, assignd, d_nClusters, k1);
// outputVec("assignVal: ", assignVal, 200);
// outputVecUint("assignIdx: ", assignIdx, 200);
std::cout << "done assignements " << std::endl;
//
getBIGBins2D(bins, nBins, assignVal, assignIdx, _QN, k1, _kVec, maxBins);
rerankBIGKBestVectors2(bestDist, bestIdx, queryL1, bins, nBins, maxBins, _Q,
_QN, maxVec, _hLines);
outputVecUint( "bestIdx: ", bestIdx, 4096);
outputVec( "bestDist: ", bestDist, 4096 );
std::cout << "done vectors " << _QN << std::endl;
// outputVecUint("BestIdx: ", bestIdx, _QN * maxVec);
countZeros("bestIdx: ", bestIdx, _QN * maxVec);
cudaMemcpy(&_resIdx[0], bestIdx, _QN * maxVec * sizeof(uint),
cudaMemcpyDeviceToHost);
cudaMemcpy(&_resDist[0], bestDist, _QN * maxVec * sizeof(float),
cudaMemcpyDeviceToHost);
cudaFree(bestIdx);
cudaFree(bestDist);
cudaFree(bins);
cudaFree(nBins);
cudaFree(assignIdx);
cudaFree(assignVal);
cudaFree(assignd);
}
void PerturbationProTree::queryBIGKNNRerankPerfect(vector<uint>& _resIdx,
vector<float>& _resDist, const float* _Q, uint _QN, uint _kVec,
const float* _hLines) {
_resIdx.resize(_QN * _kVec);
_resDist.resize(_QN * _kVec);
// uint k1 = 32;
uint k1 = 8;
k1 = 16;
uint* assignd;
float *assignVal;
uint *assignIdx;
cudaMalloc(&assignd, 1 * k1 * d_p * _QN * sizeof(uint));
// float *queryL1;
// cudaMalloc(&queryL1, d_nClusters * d_lineParts * _QN * sizeof(uint));
cudaMalloc(&assignVal,
_QN * 1 * d_p * k1 * d_nClusters2 * sizeof(float));
cudaMalloc(&assignIdx,
_QN * 1 * d_p * k1 * d_nClusters2 * sizeof(uint));
// uint k2 = 40;
// k2 = 4096;
// k2 = 8192;
// k2 = _kVec;
// uint maxBins = 4096;
uint maxBins = 64 * 8192;
// uint maxBins = 2048;
// uint maxBins = 16;
uint* nBins;
uint* bins;
cudaMalloc(&nBins, _QN * 1 * sizeof(uint));
cudaMalloc(&bins, _QN * maxBins * 1 * sizeof(uint));
if (!nBins || !bins) {
std::cout << "Bins: did not get memory" << std::endl;
exit(1);
}
uint maxVec = _kVec;
float* bestDist;
uint* bestIdx;
cudaMalloc(&bestDist, _QN * maxVec * sizeof(float));
cudaMalloc(&bestIdx, _QN * maxVec * sizeof(uint));
getKBestAssignment(assignd, d_multiCodeBook, _Q, d_nClusters, _QN, k1);
// getLineAssignment(queryL1, _Q, _QN);
getKBestAssignment2(assignVal, assignIdx, d_multiCodeBook2, _Q,
d_nClusters2, _QN, assignd, d_nClusters, k1);
std::cout << "done assignements " << std::endl;
getBIGBins2D(bins, nBins, assignVal, assignIdx, _QN, k1, _kVec, maxBins);
// rerankBIGKBestVectors2(bestDist, bestIdx, queryL1, bins, nBins, maxBins, _Q,
// _QN, maxVec, _hLines);
rerankBIGKBestVectorsPerfect(bestDist, bestIdx, bins, nBins, maxBins, _Q,
_QN, maxVec, _hLines);
std::cout << "done vectors " << _QN << std::endl;
// outputVecUint("BestIdx: ", bestIdx, _QN * maxVec);
countZeros("bestIdx: ", bestIdx, _QN * maxVec);
cudaMemcpy(&_resIdx[0], bestIdx, _QN * maxVec * sizeof(uint),
cudaMemcpyDeviceToHost);
cudaMemcpy(&_resDist[0], bestDist, _QN * maxVec * sizeof(float),
cudaMemcpyDeviceToHost);
cudaFree(bestIdx);
cudaFree(bestDist);
cudaFree(bins);
cudaFree(nBins);
cudaFree(assignIdx);
cudaFree(assignVal);
cudaFree(assignd);
}
#if 0
__global__ void locateIDs(uint _baseNum, const uint* _prefix,
const uint* _counts, const uint* _dbIdx, uint _N, const uint* _gtIdx,
uint* _gtPos) {
extern __shared__ float shm[];
float* shmIter = shm;
uint* pref = (uint*) shmIter;
shmIter += blockDim.x;
uint* pId = (uint*) shmIter;
shmIter += blockDim.x;
// load corresponding queries;
for (uint qId = blockIdx.y * blockDim.x + threadIdx.x; qId < _N;
qId += gridDim.y * blockDim.x) {
uint q = _gtIdx[qId];
for (uint bId = blockIdx.x * blockDim.x + threadIdx.x; bId < _baseNum;
bid += gridDim.x * blockDim.x) {
__syncthreads();
pref[threadIdx.x] = _prefix[bId];
pId[threadIdx.x] = bId;
__syncthreads();
for (int i = 0; i < blockDim.x; i++) {
if (pref[i] == q)
_gtPos[qId] = pId[i];
}
}
}
}
void PerturbationProTree::locateIDs(uint _baseNum, const vector<uint>& _gt,
vector<uint>& _gtBins) {
uint N = _gt.size();
_gtBins.resize(N);
uint* gtIdx = NULL;
uint* gtBins = NULL;
cudaMalloc(>Idx, N * sizeof(uint));
cudaMalloc(>Bins, N * sizeof(uint));
if ((gtIdx == NULL) || (gtBins == NULL)) {
std::cout << "locateIDs: did not get memory!" << std::endl;
return;
}
uint nThreads = 1024;
dim3 block(nThreads, 1, 1);
dim3 grid(1024, N / nThreads + 1, 1);
uint shmSize = (2 * nThreads) * sizeof(float);
locateIDs<<<grid, block, shmSize>>>(_baseNum, d_prefix, d_dbIdx, N, gtIdx,
gtBins);
checkCudaErrors(cudaDeviceSynchronize());
cudaMemcpy(&(_gtBins[0]), gtBins, N * sizeof(uint), cudaMemcpyDeviceToHost);
cudaFree(gtBins);
cudaFree(gtIdx);
}
#endif
}
/* namespace */
|
the_stack
|
namespace MegAutoBA {
namespace geo {
namespace {
template <typename T>
__global__ void QuaternionToRotation(
const int nItem, const int N, const T *Qx, const T *Qy, const T *Qz,
const T *Qw, const T *dQx, const T *dQy, const T *dQz, const T *dQw, T *R00,
T *R01, T *R02, T *R10, T *R11, T *R12, T *R20, T *R21, T *R22, T *dR00,
T *dR01, T *dR02, T *dR10, T *dR11, T *dR12, T *dR20, T *dR21, T *dR22) {
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= nItem) return;
const T qw = Qw[idx];
const T qx = Qx[idx];
const T qy = Qy[idx];
const T qz = Qz[idx];
R00[idx] = 1 - 2 * (qy * qy + qz * qz);
R01[idx] = 2 * (qx * qy - qw * qz);
R02[idx] = 2 * (qx * qz + qw * qy);
R10[idx] = 2 * (qx * qy + qw * qz);
R11[idx] = 1 - 2 * (qx * qx + qz * qz);
R12[idx] = 2 * (qy * qz - qw * qx);
R20[idx] = 2 * (qx * qz - qw * qy);
R21[idx] = 2 * (qy * qz + qw * qx);
R22[idx] = 1 - 2 * (qx * qx + qy * qy);
for (int i = 0; i < N; ++i) {
unsigned int index = idx + i * nItem;
const T dqw = dQw[index];
const T dqx = dQx[index];
const T dqy = dQy[index];
const T dqz = dQz[index];
dR00[index] = -4 * (qy * dqy + qz * dqz);
dR01[index] = 2 * (qx * dqy + qy * dqx - qw * dqz - qz * dqw);
dR02[index] = 2 * (qx * dqz + qz * dqx + qw * dqy + qy * dqw);
dR10[index] = 2 * (qx * dqy + qy * dqx + qw * dqz + qz * dqw);
dR11[index] = -4 * (qx * dqx + qz * dqz);
dR12[index] = 2 * (qy * dqz + qz * dqy - qw * dqx - qx * dqw);
dR20[index] = 2 * (qx * dqz + qz * dqx - qw * dqy - qy * dqw);
dR21[index] = 2 * (qy * dqz + qz * dqy + qw * dqx + qx * dqw);
dR22[index] = -4 * (qx * dqx + qy * dqy);
}
}
template <typename T>
__device__ inline int Get_i_For_R2Q(const T r[3][3]) {
int i = 0;
if (r[1][1] > r[0][0]) i = 1;
if (r[2][2] > r[i][i]) i = 2;
return i;
}
template <typename T>
struct R2Q_Address_Wrapper {};
__constant__ const float *f_R[3][3];
__constant__ const float *f_dR[3][3];
__constant__ float *f_Q[4];
__constant__ float *f_dQ[4];
template <>
struct R2Q_Address_Wrapper<float> {
static __device__ __host__ const float *const (&get_R())[3][3] { return f_R; }
static __device__ __host__ const float *const (&get_dR())[3][3] {
return f_dR;
}
static __device__ __host__ float *const (&get_Q())[4] { return f_Q; }
static __device__ __host__ float *const (&get_dQ())[4] { return f_dQ; }
};
__constant__ const double *d_R[3][3];
__constant__ const double *d_dR[3][3];
// x, y, z, w
__constant__ double *d_Q[4];
__constant__ double *d_dQ[4];
template <>
struct R2Q_Address_Wrapper<double> {
static __device__ __host__ const double *const (&get_R())[3][3] {
return d_R;
}
static __device__ __host__ const double *const (&get_dR())[3][3] {
return d_dR;
}
static __device__ __host__ double *const (&get_Q())[4] { return d_Q; }
static __device__ __host__ double *const (&get_dQ())[4] { return d_dQ; }
};
template <typename T>
__global__ void RotationToQuaternion(const int nItem, const int N) {
/*
* 00: 0 01: 1 02: 2
* 10: 3 11: 4 12: 5
* 20: 6 21: 7 22: 8
*/
using W = R2Q_Address_Wrapper<T>;
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= nItem) return;
const T r[3][3]{
{W::get_R()[0][0][idx], W::get_R()[0][1][idx], W::get_R()[0][2][idx]},
{W::get_R()[1][0][idx], W::get_R()[1][1][idx], W::get_R()[1][2][idx]},
{W::get_R()[2][0][idx], W::get_R()[2][1][idx], W::get_R()[2][2][idx]}};
// inv_qw start with trace
T inv_qw = r[0][0] + r[1][1] + r[2][2];
if (inv_qw > 0) {
// inv_qw start with 1 / sqrt(trace + 1)
inv_qw = 2 * Wrapper::rsqrtG<T>::call(inv_qw + 1);
// qw_mul_4 start with inv_4qw
T qw_mul_4 = 0.25 * inv_qw;
W::get_Q()[0][idx] = (r[1][2] - r[2][1]) * qw_mul_4;
W::get_Q()[1][idx] = (r[2][0] - r[0][2]) * qw_mul_4;
W::get_Q()[2][idx] = (r[0][1] - r[1][0]) * qw_mul_4;
// qw_mul_4 should be the inv of inv_4qw
qw_mul_4 = 1 / qw_mul_4;
W::get_Q()[3][idx] = 0.25 * qw_mul_4;
const T inv_4q0_pow2 = inv_qw * inv_qw * 0.0625;
for (int i = 0; i < N; ++i) {
unsigned int index = idx + i * nItem;
const T dqw = 0.125 * inv_qw *
(W::get_dR()[0][0][index] + W::get_dR()[1][1][index] +
W::get_dR()[2][2][index]);
// w
W::get_dQ()[3][index] = dqw;
// x
W::get_dQ()[0][index] =
((W::get_dR()[1][2][index] - W::get_dR()[2][1][index]) * qw_mul_4 -
4 * dqw * (r[1][2] - r[2][1])) *
inv_4q0_pow2;
// y
W::get_dQ()[1][index] =
((W::get_dR()[2][0][index] - W::get_dR()[0][2][index]) * qw_mul_4 -
4 * dqw * (r[2][0] - r[0][2])) *
inv_4q0_pow2;
// z
W::get_dQ()[2][index] =
((W::get_dR()[0][1][index] - W::get_dR()[1][0][index]) * qw_mul_4 -
4 * dqw * (r[0][1] - r[1][0])) *
inv_4q0_pow2;
}
} else {
const int i = Get_i_For_R2Q(r);
const int j = (i + 1) % 3;
const int k = (j + 1) % 3;
// inv_qw start with 1 / sqrt(trace + 1)
inv_qw = 2 * Wrapper::rsqrtG<T>::call(r[i][i] - r[j][j] - r[k][k] + 1);
// qw_mul_4 start with inv_4qw
T qw_mul_4 = 0.25 * inv_qw;
W::get_Q()[i][idx] = 1 / inv_qw;
// w
W::get_Q()[3][idx] = (r[k][j] - r[j][k]) * qw_mul_4;
W::get_Q()[j][idx] = (r[j][i] + r[i][j]) * qw_mul_4;
W::get_Q()[k][idx] = (r[k][i] + r[i][k]) * qw_mul_4;
// qw_mul_4 should be the inv of inv_4qw
qw_mul_4 = 1 / qw_mul_4;
const T inv_4q0_pow2 = inv_qw * inv_qw * 0.0625;
for (int n = 0; n < N; ++n) {
unsigned int index = idx + n * nItem;
const T dq0 = 0.125 * inv_qw *
(W::get_dR()[i][i][index] - W::get_dR()[j][j][index] -
W::get_dR()[k][k][index]);
W::get_dQ()[i][index] = dq0;
// w
W::get_dQ()[3][index] =
((W::get_dR()[k][j][index] - W::get_dR()[j][k][index]) * qw_mul_4 -
4 * dq0 * (r[k][j] - r[j][k])) *
inv_4q0_pow2;
W::get_dQ()[j][index] =
((W::get_dR()[j][i][index] + W::get_dR()[i][j][index]) * qw_mul_4 -
4 * dq0 * (r[j][i] + r[i][j])) *
inv_4q0_pow2;
W::get_dQ()[k][index] =
((W::get_dR()[k][i][index] + W::get_dR()[i][k][index]) * qw_mul_4 -
4 * dq0 * (r[k][i] + r[i][k])) *
inv_4q0_pow2;
}
}
}
template <typename T>
__global__ void Normalize_(const int nItem, const int N, T *Qx, T *Qy, T *Qz,
T *Qw, T *dQx, T *dQy, T *dQz, T *dQw) {
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= nItem) return;
const T qw = Qw[idx];
const T qx = Qx[idx];
const T qy = Qy[idx];
const T qz = Qz[idx];
int sign = qw > 0 ? 1 : -1;
const T inv_l2 =
Wrapper::rsqrtG<T>::call(qw * qw + qx * qx + qy * qy + qz * qz) * sign;
Qw[idx] = qw * inv_l2;
Qx[idx] = qx * inv_l2;
Qy[idx] = qy * inv_l2;
Qz[idx] = qz * inv_l2;
for (int i = 0; i < N; ++i) {
unsigned int index = idx + i * nItem;
const T dqw = dQw[index];
const T dqx = dQx[index];
const T dqy = dQy[index];
const T dqz = dQz[index];
const T common_coeff =
inv_l2 * inv_l2 * (qw * dqw + qx * dqx + qy * dqy + qz * dqz);
dQw[index] = inv_l2 * (dqw - qw * common_coeff);
dQx[index] = inv_l2 * (dqx - qx * common_coeff);
dQy[index] = inv_l2 * (dqy - qy * common_coeff);
dQz[index] = inv_l2 * (dqz - qz * common_coeff);
}
}
} // namespace
template <typename T>
JM33<T> QuaternionToRotationMatrix(const JV4<T> &Q) {
JM33<T> R{};
const MegAutoBA::JetVector<T> &JV_Template = Q(0, 0);
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
R(i, j).initAs(JV_Template);
}
}
const auto nItem = JV_Template.getItemNum();
const auto N = JV_Template.getGradShape();
// 512 instead of 1024 for the limitation of registers
dim3 block_dim(std::min(decltype(nItem)(512), nItem));
dim3 grid_dim((nItem - 1) / block_dim.x + 1);
QuaternionToRotation<T><<<grid_dim, block_dim>>>(
nItem, N, Q.x().getCUDAResPtr(), Q.y().getCUDAResPtr(),
Q.z().getCUDAResPtr(), Q.w().getCUDAResPtr(), Q.x().getCUDAGradPtr(),
Q.y().getCUDAGradPtr(), Q.z().getCUDAGradPtr(), Q.w().getCUDAGradPtr(),
R(0, 0).getCUDAResPtr(), R(0, 1).getCUDAResPtr(), R(0, 2).getCUDAResPtr(),
R(1, 0).getCUDAResPtr(), R(1, 1).getCUDAResPtr(), R(1, 2).getCUDAResPtr(),
R(2, 0).getCUDAResPtr(), R(2, 1).getCUDAResPtr(), R(2, 2).getCUDAResPtr(),
R(0, 0).getCUDAGradPtr(), R(0, 1).getCUDAGradPtr(),
R(0, 2).getCUDAGradPtr(), R(1, 0).getCUDAGradPtr(),
R(1, 1).getCUDAGradPtr(), R(1, 2).getCUDAGradPtr(),
R(2, 0).getCUDAGradPtr(), R(2, 1).getCUDAGradPtr(),
R(2, 2).getCUDAGradPtr());
// TODO: use stream sync later
cudaDeviceSynchronize();
return R;
}
template <typename T>
JV4<T> RotationMatrixToQuaternion(const JM33<T> &R) {
using W = R2Q_Address_Wrapper<T>;
JV4<T> Q{};
cudaStream_t stream;
cudaStreamCreateWithFlags(&stream, CU_STREAM_NON_BLOCKING);
const T *const address_R[3][3]{
{R(0, 0).getCUDAResPtr(), R(0, 1).getCUDAResPtr(),
R(0, 2).getCUDAResPtr()},
{R(1, 0).getCUDAResPtr(), R(1, 1).getCUDAResPtr(),
R(1, 2).getCUDAResPtr()},
{R(2, 0).getCUDAResPtr(), R(2, 1).getCUDAResPtr(),
R(2, 2).getCUDAResPtr()}};
const T *const address_dR[3][3]{
{R(0, 0).getCUDAGradPtr(), R(0, 1).getCUDAGradPtr(),
R(0, 2).getCUDAGradPtr()},
{R(1, 0).getCUDAGradPtr(), R(1, 1).getCUDAGradPtr(),
R(1, 2).getCUDAGradPtr()},
{R(2, 0).getCUDAGradPtr(), R(2, 1).getCUDAGradPtr(),
R(2, 2).getCUDAGradPtr()}};
cudaMemcpyToSymbolAsync(W::get_R(), address_R, 9 * sizeof(T *), 0,
cudaMemcpyHostToDevice, stream);
cudaMemcpyToSymbolAsync(W::get_dR(), address_dR, 9 * sizeof(T *), 0,
cudaMemcpyHostToDevice, stream);
const MegAutoBA::JetVector<T> &JV_Template = R(0, 0);
for (int i = 0; i < 4; ++i) {
Q(i).initAs(JV_Template);
}
T *const address_Q[4]{Q.x().getCUDAResPtr(), Q.y().getCUDAResPtr(),
Q.z().getCUDAResPtr(), Q.w().getCUDAResPtr()};
T *const address_dQ[4]{Q.x().getCUDAGradPtr(), Q.y().getCUDAGradPtr(),
Q.z().getCUDAGradPtr(), Q.w().getCUDAGradPtr()};
cudaMemcpyToSymbolAsync(W::get_Q(), address_Q, 4 * sizeof(T *), 0,
cudaMemcpyHostToDevice, stream);
cudaMemcpyToSymbolAsync(W::get_dQ(), address_dQ, 4 * sizeof(T *), 0,
cudaMemcpyHostToDevice, stream);
const auto nItem = JV_Template.getItemNum();
const auto N = JV_Template.getGradShape();
// 512 instead of 1024 for the limitation of registers
dim3 block_dim(std::min(decltype(nItem)(512), nItem));
dim3 grid_dim((nItem - 1) / block_dim.x + 1);
RotationToQuaternion<T><<<grid_dim, block_dim, 0, stream>>>(nItem, N);
cudaStreamSynchronize(stream);
cudaStreamDestroy(stream);
return Q;
}
template <typename T>
JV4<T> &Normalize_(JV4<T> &Q) {
cudaStream_t stream;
cudaStreamCreateWithFlags(&stream, CU_STREAM_NON_BLOCKING);
const auto nItem = Q(0).getItemNum();
const auto N = Q(0).getGradShape();
// 512 instead of 1024 for the limitation of registers
dim3 block_dim(std::min(decltype(nItem)(768), nItem));
dim3 grid_dim((nItem - 1) / block_dim.x + 1);
Normalize_<T><<<grid_dim, block_dim, 0, stream>>>(
nItem, N, Q.x().getCUDAResPtr(), Q.y().getCUDAResPtr(),
Q.z().getCUDAResPtr(), Q.w().getCUDAResPtr(), Q.x().getCUDAGradPtr(),
Q.y().getCUDAGradPtr(), Q.z().getCUDAGradPtr(), Q.w().getCUDAGradPtr());
cudaStreamSynchronize(stream);
cudaStreamDestroy(stream);
return Q;
}
template JM33<float> QuaternionToRotationMatrix(const JV4<float> &Q);
template JM33<double> QuaternionToRotationMatrix(const JV4<double> &Q);
template JV4<float> RotationMatrixToQuaternion(const JM33<float> &R);
template JV4<double> RotationMatrixToQuaternion(const JM33<double> &R);
template JV4<float> &Normalize_(JV4<float> &Q);
template JV4<double> &Normalize_(JV4<double> &Q);
} // namespace geo
} // namespace MegAutoBA
|
the_stack
|
* This sample demonstrates how 2D convolutions
* with very large kernel sizes
* can be efficiently implemented
* using FFT transformations.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cufft.h>
#include <cutil_inline.h>
typedef float2 Complex;
////////////////////////////////////////////////////////////////////////////////
// Helper functions
////////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
int iDivUp(int a, int b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Align a to nearest higher multiple of b
int iAlignUp(int a, int b){
return (a % b != 0) ? (a - a % b + b) : a;
}
////////////////////////////////////////////////////////////////////////////////
// Reference straightfroward CPU convolution
////////////////////////////////////////////////////////////////////////////////
extern "C" void convolutionCPU(
Complex *h_Result,
Complex *h_Data,
Complex *h_Kernel,
int dataW,
int dataH,
int kernelW,
int kernelH,
int kernelX,
int kernelY
);
////////////////////////////////////////////////////////////////////////////////
// Padding kernels
////////////////////////////////////////////////////////////////////////////////
#include "convolutionFFT2D_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
int calculateFFTsize(int dataSize){
//Highest non-zero bit position of dataSize
int hiBit;
//Neares lower and higher powers of two numbers for dataSize
unsigned int lowPOT, hiPOT;
//Align data size to a multiple of half-warp
//in order to have each line starting at properly aligned addresses
//for coalesced global memory writes in padKernel() and padData()
dataSize = iAlignUp(dataSize, 16);
//Find highest non-zero bit
for(hiBit = 31; hiBit >= 0; hiBit--)
if(dataSize & (1U << hiBit)) break;
//No need to align, if already power of two
lowPOT = 1U << hiBit;
if(lowPOT == dataSize) return dataSize;
//Align to a nearest higher power of two, if the size is small enough,
//else align only to a nearest higher multiple of 512,
//in order to save computation and memory bandwidth
hiPOT = 1U << (hiBit + 1);
if(hiPOT <= 1024)
return hiPOT;
else
return iAlignUp(dataSize, 512);
}
//Kernel dimensions
const int KERNEL_W = 7;
const int KERNEL_H = 7;
//Kernel center position
const int KERNEL_X = 1;
const int KERNEL_Y = 6;
//Width and height of padding for "clamp to border" addressing mode
const int PADDING_W = KERNEL_W - 1;
const int PADDING_H = KERNEL_H - 1;
//Input data dimension
#if 1
const int DATA_W = 200;
const int DATA_H = 200;
#else
const int DATA_W = 1000;
const int DATA_H = 1000;
#endif
//Derive FFT size from data and kernel dimensions
const int FFT_W = calculateFFTsize(DATA_W + PADDING_W);
const int FFT_H = calculateFFTsize(DATA_H + PADDING_H);
const int FFT_SIZE = FFT_W * FFT_H * sizeof(Complex);
const int KERNEL_SIZE = KERNEL_W * KERNEL_H * sizeof(Complex);
const int DATA_SIZE = DATA_W * DATA_H * sizeof(Complex);
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv){
Complex
*h_Kernel,
*h_Data,
*h_ResultCPU,
*h_ResultGPU;
cudaArray
*a_Kernel,
*a_Data;
cudaChannelFormatDesc float2tex
= cudaCreateChannelDesc<float2>();
Complex
*d_PaddedKernel,
*d_PaddedData;
cufftHandle FFTplan;
Complex
rCPU, rGPU;
double
max_delta_ref, delta, ref, sum_delta2, sum_ref2, L2norm;
int i, x, y;
unsigned int hTimer;
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if ( cutCheckCmdLineFlag(argc, (const char **)argv, "device"))
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
cutilCheckError( cutCreateTimer(&hTimer) );
printf("Input data size : %i x %i\n", DATA_W, DATA_H );
printf("Convolution kernel size : %i x %i\n", KERNEL_W, KERNEL_H );
printf("Padded image size : %i x %i\n", DATA_W + PADDING_W, DATA_H + PADDING_H);
printf("Aligned padded image size : %i x %i\n", FFT_W, FFT_H );
printf("Allocating memory...\n");
h_Kernel = (Complex *)malloc(KERNEL_SIZE);
h_Data = (Complex *)malloc(DATA_SIZE);
h_ResultCPU = (Complex *)malloc(DATA_SIZE);
h_ResultGPU = (Complex *)malloc(FFT_SIZE);
cutilSafeCall( cudaMallocArray(&a_Kernel, &float2tex, KERNEL_W, KERNEL_H) );
cutilSafeCall( cudaMallocArray(&a_Data, &float2tex, DATA_W, DATA_H) );
cutilSafeCall( cudaMalloc((void **)&d_PaddedKernel, FFT_SIZE) );
cutilSafeCall( cudaMalloc((void **)&d_PaddedData, FFT_SIZE) );
printf("Generating random input data...\n");
srand(2007);
for(i = 0; i < (KERNEL_W * KERNEL_H); i++){
h_Kernel[i].x = (float)rand() / (float)RAND_MAX;
h_Kernel[i].y = 0;
}
for(i = 0; i < (DATA_W * DATA_H); i++){
h_Data[i].x = (float)rand() / (float)RAND_MAX;
h_Data[i].y = 0;
}
printf("Creating FFT plan for %i x %i...\n", FFT_W, FFT_H);
cufftSafeCall( cufftPlan2d(&FFTplan, FFT_H, FFT_W, CUFFT_C2C) );
printf("Uploading to GPU and padding convolution kernel and input data...\n");
printf("...initializing padded kernel and data storage with zeroes...\n");
cutilSafeCall( cudaMemset(d_PaddedKernel, 0, FFT_SIZE) );
cutilSafeCall( cudaMemset(d_PaddedData, 0, FFT_SIZE) );
printf("...copying input data and convolution kernel from host to CUDA arrays\n");
cutilSafeCall( cudaMemcpyToArray(a_Kernel, 0, 0, h_Kernel, KERNEL_SIZE, cudaMemcpyHostToDevice) );
cutilSafeCall( cudaMemcpyToArray(a_Data, 0, 0, h_Data, DATA_SIZE, cudaMemcpyHostToDevice) );
printf("...binding CUDA arrays to texture references\n");
cutilSafeCall( cudaBindTextureToArray(texKernel, a_Kernel) );
cutilSafeCall( cudaBindTextureToArray(texData, a_Data) );
//Block width should be a multiple of maximum coalesced write size
//for coalesced memory writes in padKernel() and padData()
dim3 threadBlock(16, 12);
dim3 kernelBlockGrid(iDivUp(KERNEL_W, threadBlock.x), iDivUp(KERNEL_H, threadBlock.y));
dim3 dataBlockGrid(iDivUp(FFT_W, threadBlock.x), iDivUp(FFT_H, threadBlock.y));
printf("...padding convolution kernel\n");
padKernel<<<kernelBlockGrid, threadBlock>>>(
d_PaddedKernel,
FFT_W,
FFT_H,
KERNEL_W,
KERNEL_H,
KERNEL_X,
KERNEL_Y
);
cutilCheckMsg("padKernel() execution failed\n");
printf("...padding input data array\n");
padData<<<dataBlockGrid, threadBlock>>>(
d_PaddedData,
FFT_W,
FFT_H,
DATA_W,
DATA_H,
KERNEL_W,
KERNEL_H,
KERNEL_X,
KERNEL_Y
);
cutilCheckMsg("padData() execution failed\n");
//Not including kernel transformation into time measurement,
//since convolution kernel is not changed very frequently
printf("Transforming convolution kernel...\n");
cufftSafeCall( cufftExecC2C(FFTplan, (cufftComplex *)d_PaddedKernel, (cufftComplex *)d_PaddedKernel, CUFFT_FORWARD) );
printf("Running GPU FFT convolution...\n");
cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
cufftSafeCall( cufftExecC2C(FFTplan, (cufftComplex *)d_PaddedData, (cufftComplex *)d_PaddedData, CUFFT_FORWARD) );
modulateAndNormalize<<<16, 128>>>(
d_PaddedData,
d_PaddedKernel,
FFT_W * FFT_H
);
cutilCheckMsg("modulateAndNormalize() execution failed\n");
cufftSafeCall( cufftExecC2C(FFTplan, (cufftComplex *)d_PaddedData, (cufftComplex *)d_PaddedData, CUFFT_INVERSE) );
cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutStopTimer(hTimer) );
double gpuTime = cutGetTimerValue(hTimer);
printf("GPU time: %f msecs. //%f MPix/s\n", gpuTime, DATA_W * DATA_H * 1e-6 / (gpuTime * 0.001) );
printf("Reading back GPU FFT results...\n");
cutilSafeCall( cudaMemcpy(h_ResultGPU, d_PaddedData, FFT_SIZE, cudaMemcpyDeviceToHost) );
printf("Checking GPU results...\n");
printf("...running reference CPU convolution\n");
convolutionCPU(
h_ResultCPU,
h_Data,
h_Kernel,
DATA_W,
DATA_H,
KERNEL_W,
KERNEL_H,
KERNEL_X,
KERNEL_Y
);
printf("...comparing the results\n");
sum_delta2 = 0;
sum_ref2 = 0;
max_delta_ref = 0;
for(y = 0; y < DATA_H; y++)
for(x = 0; x < DATA_W; x++){
rCPU = h_ResultCPU[y * DATA_W + x];
rGPU = h_ResultGPU[y * FFT_W + x];
delta = (rCPU.x - rGPU.x) * (rCPU.x - rGPU.x) + (rCPU.y - rGPU.y) * (rCPU.y - rGPU.y);
ref = rCPU.x * rCPU.x + rCPU.y * rCPU.y;
if((delta / ref) > max_delta_ref) max_delta_ref = delta / ref;
sum_delta2 += delta;
sum_ref2 += ref;
}
L2norm = sqrt(sum_delta2 / sum_ref2);
printf("Max delta / CPU value %E\n", sqrt(max_delta_ref));
printf("L2 norm: %E\n", L2norm);
printf((L2norm < 1e-6) ? "TEST PASSED\n" : "TEST FAILED\n");
printf("Shutting down...\n");
cutilSafeCall( cudaUnbindTexture(texData) );
cutilSafeCall( cudaUnbindTexture(texKernel) );
cufftSafeCall( cufftDestroy(FFTplan) );
cutilSafeCall( cudaFree(d_PaddedData) );
cutilSafeCall( cudaFree(d_PaddedKernel) );
cutilSafeCall( cudaFreeArray(a_Data) );
cutilSafeCall( cudaFreeArray(a_Kernel) );
free(h_ResultGPU);
free(h_ResultCPU);
free(h_Data);
free(h_Kernel);
cudaThreadExit();
cutilExit(argc, argv);
}
|
the_stack
|
#include "HoughCircle.h"
#include <iostream>
#include <fstream>
#include <cmath>
using namespace std;
#include "ErrorCode.h"
#include "CoordiSet.h"
//#define DEBUG
// 宏:HOUGH_INF_GREAT
// 定义了一个足够大的正整数,该整数在使用过程中被认为是无穷大。
#define HOUGH_INF_GREAT ((1 << 30) - 1)
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_YI
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// 宏:DEF_BLOCK_1D
// 定义了默认的一维线程块的尺寸。
#define DEF_BLOCK_1D 512
//----------------------------内核函数声明(10个)-----------------------------
// Kernel 函数:_houghcirImgKer(根据输入图像 inimg 计算得票数)
// 对输入图像的每一个有效像素点,寻找每一个该点可能在的圆,计算圆心以及半径,
// 统计得票数。
static __global__ void // Kernel 函数无返回值
_houghcirImgKer(
ImageCuda inimg, // 输入图像。
int bufHoughDev[], // 得票数矩阵。
int radiusMin, // 最小检测的圆半径。
int bufwidth, // 得票数矩阵 bufHough 的宽度。
int bufheight // 得票数矩阵 bufHough 的高度。
);
// Kernel 函数:_findpartmaxKer(计算局部最大值)
// 在得票数矩阵 bufHough 中寻找局部最大值,当该位置的得票数大于其邻域的得票
// 数,并且大于圆的阈值,被认为是一个局部最大值,即一个可能圆。
static __global__ void
_findpartmaxKer(
int bufHoughDev[], // 得票数矩阵。
int bufsortDev[], // 局部最值矩阵。
int sumdev[], // 存在的圆的个数。
int threshold, // 圆的阈值。
int bufwidth, // 得票数矩阵 bufHough 的宽度。
int bufheights, // 得票数矩阵 bufHough 的高度。
int numperRDev[] // 不同半径的圆的个数。
);
// Kernel 函数:_countcirbyRKer(按照不同的半径计算圆的个数)
// 根据局部最大值矩阵,按照不同的半径,统计每个局部最大值(可能圆)是当前半径
// 的第几个局部最大值,并将索引值保存在 bufsort 数组中。
static __global__ void
_countcirbyRKer(
int bufsortDev[], // 局部最值矩阵。
int bufwidth, // 矩阵的宽度。
int bufheight // 矩阵的高度。
);
// Kernel 函数:_getcirinfKer(获得圆的得票数和索引信息)
// 将得到的可能圆的得票数和索引值保存在对应的数组中。
static __global__ void
_getcirinfKer(
int bufHoughDev[], // 得票数矩阵。
int bufsortDev[], // 局部最值矩阵。
int numperRDev[], // 不同半径的圆的个数。
int cirvoteDev[], // 圆的得票数
int cirindexDev[], // 圆的索引值。
int bufwidth, // 矩阵的宽度。
int bufheight // 矩阵的高度。
);
// Kernel 函数: _shearToPosKer(转换数据形式)
// 对排序后的数组进行整理。
static __global__ void
_shearToPosKer(
int cirvoteDev[], // 圆的得票数。
int cirindexDev[], // 圆的索引值。
int lensec, // 矩阵行数。
int judge // 块内共享内存的大小。
);
// Kernel 函数: _shearSortRowDesKer(行降序排序)
// 对待排序矩阵的每一行进行双调排序。
static __global__ void
_shearSortRowDesKer(
int cirvoteDev[], // 圆的得票数。
int cirindexDev[], // 圆的索引值。
int lensec, // 矩阵行数。
int judge // 块内共享内存的大小。
);
// Kernel 函数: _shearSortColDesKer(列降序排序)
// 对待排序矩阵的每一列进行双调排序。
static __global__ void
_shearSortColDesKer(
int cirvoteDev[], // 圆的得票数。
int cirindexDev[], // 圆的索引值。
int length, // 矩阵列数。
int lensec, // 矩阵行数。
int judge // 块内共享内存的大小。
);
// Kernel 函数:_calcirparamKer(计算圆的返回参数)
// 对按照得票数进行排序之后的圆,重新恢复参数,并保存在圆的返回参数结构体中。
static __global__ void
_calcirparamKer(
int cirvoteDev[], // 圆的得票数。
int cirindexDev[], // 圆的索引值。
CircleParam circleDev[], // 圆的返回参数。
int bufwidth, // 矩阵的宽度。
int bufheight, // 矩阵的高度。
int radiusMin // 最小检测的圆半径。
);
// Kernel 函数:_houghoutKer(画出已检测到的圆)
// 根据圆的参数返回结构体,对最终已经检测到的圆,输出到 outimg 中。
static __global__ void
_houghoutKer(
ImageCuda outimg, // 输出图像
CircleParam cirparamdev[], // 圆的参数结构体
int circlenum // 圆的个数
);
//----------------------------全局函数声明(3个)-------------------------------------
// Device 静态方法:_findcirsumDev(计算小于半径 radius 的圆的总个数)
// 根据上一步计算结果,按照不同半径存在的圆的个数进行累加,统计小于给定半径的
// 圆的总个数。
static __device__ int
_findcirsumDev(
int radius, // 圆的半径。
int numperRDev[] // 不同半径的圆的个数。
);
// Host 函数:_recalCirParam(确定最终检测的圆和参数)
// 根据可能圆之间的距离信息,确定最终检测到的圆的个数,并还原圆的参数。
static __host__ int
_recalCirParam(
CircleParam circle[], // 可能圆的参数结构体
CircleParam *circleparam, // 圆的参数结构体
int *circleMax, // 检测圆的最大数量
int sum, // 可能圆的数量
float distThres, // 两个不同圆之间的最小距离
int rThres // 区别两个圆的最小半径差别。
);
// Host 函数:_houghcirByImg(根据输入图像进行 Hough 圆检测)
// 根据输入图像,通过 Hough 变换进行圆检测。
static __host__ int
_houghcirByImg(
Image *inimg, // 输入图像
int *circleMax, // 检测的圆的最大数量
CircleParam *circleparam, // 圆的参数结构体
int radiusMin, // 最小检测的圆半径
int radiusMax, // 最大检测的圆半径
int cirThreshold, // 圆的阈值
float distThres, // 区别两个圆的最小距离。
int rThres // 区别两个圆的最小半径差别。
);
//-----------------------------内核函数实现-------------------------------------------
// 宏:VOTE(x,y,z)
// 在三维投票空间中投票。
#define VOTE(x,y,z) \
if(x>=0 && x<inimgCud.imgMeta.width && y>=0 && y<inimgCud.imgMeta.height )\
{int index=(z) * (bufwidth + 2) * (bufheight + 2)+((y) + 1) * (bufwidth + 2) + (x) + 1;\
atomicAdd(&bufHoughDev[index], 1);}
// Kernel 函数:_houghcirImgKer(根据输入图像计算得票数)
// 采用bresenham画圆投票,效率提高
static __global__ void _houghcirImgKer(
ImageCuda inimgCud, int bufHoughDev[], int radiusMin,
int bufwidth, int bufheight)
{
// 计算线程对应的输出点的位置,其中 x 和 y 分别表示线程处理的像素点的坐标
// 的 x 和 y 分量(其中,x 表示 column;y 表示 row)。
// r 代表当前线程处理的圆的半径的大小。
// (x0,y0)点,对应的是(inimg->ROX1+x0,inimg->ROY1+y0),以ROI区域的左上角
// (inimg->ROX1,inimg->ROY1)为原点
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if (x0 >= inimgCud.imgMeta.width || y0 >= inimgCud.imgMeta.height)
return;
// 定义局部变量。
unsigned char intemp;
int radius;
// 计算输入坐标点对应的图像数据数组下标。
int inidx = y0 * inimgCud.pitchBytes + x0;
// 读取第一个输入坐标点对应的像素值。
intemp = inimgCud.imgMeta.imgData[inidx];
// 根据当前 block 的 z 方向坐标计算需要计算的半径的值。r是z坐标,radius是
radius = z + radiusMin;
// 若当前像素点(x0, y0)是前景点(即像素值为 255),以他为中心,radisu半径,
// 投票空间画圆
// 如果当前像素值为 255,即有效像素值,则在投票空间进行bresenham法画圆投票。
if (intemp == 255) {
int x, y,d;
x = 0;
y = radius;
d = 3-2*radius;
while(x < y){
// 注意:x,y是以(0,0)为圆心得到的坐标,需要偏移到(x0,x0)坐标系中,z是
// 投票空间第三维坐标,不是圆的半径,注意和radius区别。
VOTE(x0+x,y0+y,z);
VOTE(x0+x,y0-y,z);
VOTE(x0-x,y0+y,z);
VOTE(x0-x,y0-y,z);
VOTE(x0+y,y0+x,z);
VOTE(x0+y,y0-x,z);
VOTE(x0-y,y0+x,z);
VOTE(x0-y,y0-x,z);
if(d < y)
d += 4*x+6;
else{
d += 4*(x-y)+10;
y--;
}
x++;
}// while
}// if (intemp == 255)
}// end of kernel
#undef VOTE
/*
// Kernel 函数:_houghcirImgKer(根据输入图像计算得票数)
static __global__ void _houghcirImgKer(
ImageCuda inimg, int bufHoughDev[], int radiusMin,
int bufwidth, int bufheight)
{
// 计算线程对应的输出点的位置,其中 x 和 y 分别表示线程处理的像素点的坐标
// 的 x 和 y 分量(其中,x 表示 column;y 表示 row)。
// r 代表当前线程处理的圆的半径的大小。
int x0 = blockIdx.x * blockDim.x + threadIdx.x;
int y0 = blockIdx.y * blockDim.y + threadIdx.y;
int r = blockIdx.z;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if (x0 >= inimg.imgMeta.width || y0 >= inimg.imgMeta.height)
return;
// 定义局部变量。
unsigned char intemp;
int bufidx;
int y, ymin, ymax;
float tempx;
int x, radius;
// 计算输入坐标点对应的图像数据数组下标。
int inidx = y0 * inimg.pitchBytes + x0;
// 读取第一个输入坐标点对应的像素值。
intemp = inimg.imgMeta.imgData[inidx];
// 根据当前 block 的 z 方向坐标计算需要计算的半径的值。
radius = r + radiusMin;
// 若当前像素点(x, y)是前景点(即像素值为 1),则经过该点的半径为 r 的圆
// 心纵坐标范围为(y - r, y + r)。
// 计算该线程需要处理的圆心纵坐标的范围。
ymin = max(0, (int)y0 - radius);
ymax = min(y0 + radius, (int)inimg.imgMeta.height);
// 如果当前像素值为 255,即有效像素值,则对该像素点进行圆检测。
if (intemp == 255) {
// 圆心纵坐标从 bmin 循环到 bmax,对于每一个可能的纵坐标值,
// 计算其对应的圆心横坐标 a。若 a 在图像范围内,进行投票。
//i是当前像素点高度坐标,a是当前点宽度坐标
for (y = ymin; y < ymax + 1; y++){
// 计算圆心横坐标 a的值。
tempx = sqrtf((float)(radius * radius - (y0 - y) * (y0 - y)));
// 左半圆投票
x = (int)(fabs(x0 - tempx) + 0.5f);
// 若 a 不在范围内,则跳出循环。
if (x <= 0 || x > inimg.imgMeta.width)
continue;
// 计算当前 (x, y, r) 在得票数矩阵中的索引值。
bufidx = r * (bufwidth + 2) * (bufheight + 2) +
(y + 1) * (bufwidth + 2) + x + 1;
// 使用原子操作进行投票。
atomicAdd(&bufHoughDev[bufidx], 1);
// 右半圆投票
x = (int)(fabs(x0 + tempx) + 0.5f);
// 若 a 不在范围内,则跳出循环。
if (x <= 0 || x > inimg.imgMeta.width)
continue;
// 计算当前 (x, y, r) 在得票数矩阵中的索引值。
bufidx = r * (bufwidth + 2) * (bufheight + 2) +
(y + 1) * (bufwidth + 2) + x + 1;
// 使用原子操作进行投票。
atomicAdd(&bufHoughDev[bufidx], 1);
}
}
}
*/
// Kernel 函数:_findpartmaxKer(计算局部最大值)
static __global__ void _findpartmaxKer(
int bufHoughDev[], int bufsortDev[], int sumdev[],
int threshold, int bufwidth, int bufheight, int numperRDev[])
{
// 计算线程对应的输出点的位置,其中 x 和 y 分别表示线程处理的像素点的坐标
// 的 x 和 y 分量(其中,x 表示 column;y 表示 row)。
// r 代表当前线程处理的圆的半径的大小。
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
// 计算该线程在块内的相对位置。
int inindex = threadIdx.y * blockDim.x + threadIdx.x;
// 申请共享内存,存该块内符合条件的局部最大值个数,即存在的圆的个数。
__shared__ int totalsum[1];
// 初始化所有块内的共享内存。
if (inindex == 0)
totalsum[0] = 0;
// 块内同步。
__syncthreads();
// 计算当前线程在 bufHough 矩阵中的对应索引值。
int index = z * (bufwidth + 2) * (bufheight + 2) +
(r + 1) * (bufwidth + 2) + c + 1;
int idx = z * bufwidth * bufheight + r * bufwidth + c;
// 当前线程的得票数大于圆的阈值,并且大于邻域中的值时,认为是局部最大值,
// 即可能是圆。
if (bufHoughDev[index] > threshold &&
bufHoughDev[index] > bufHoughDev[index - 1] &&
bufHoughDev[index] >= bufHoughDev[index + 1] &&
bufHoughDev[index] > bufHoughDev[index - bufwidth - 2] &&
bufHoughDev[index] >= bufHoughDev[index + bufwidth + 2]) {
bufsortDev[idx] = bufHoughDev[index];
// 使用原子操作对局部最大值进行统计。
atomicAdd(&numperRDev[z], 1);
atomicAdd(&totalsum[0], 1);
} else {
bufsortDev[idx] = 0;
}
// 块内同步。
__syncthreads();
// 将统计出的圆的个数统计到 sumdev 中。
if (inindex == 0 && totalsum[0] != 0) {
atomicAdd(&sumdev[0], totalsum[0]);
}
}
// Kernel 函数:_countcirbyRKer(按照不同的半径计算圆的个数)
static __global__ void _countcirbyRKer(
int bufsortDev[], int bufwidth, int bufheight)
{
// 计算线程的索引,即圆的半径。
int r = blockIdx.x * blockDim.x + threadIdx.x;
// 初始化圆的个数为 1。
int count = 1;
// 计算该线程对应的局部最大值矩阵 bufsort 中的索引值。
int idx = r * bufwidth * bufheight;
int index;
// 半径为 r,对矩阵 bufsort 进行统计,得到该半径的圆的个数。
for (int j = 0; j < bufheight; j++) {
index = idx;
for (int i = 0; i < bufwidth; i++) {
// 若矩阵 bufsort 当前位置的值不为 0,则为其赋值 count,表示该
// 局部最大值是半径为 r 的圆中的第 count 个。
if (bufsortDev[index] != 0) {
bufsortDev[index] = count;
count++;
}
index += 1;
}
idx += bufwidth;
}
}
// Kernel 函数:_getcirinfKer(获得圆的得票数和索引信息)
static __global__ void _getcirinfKer(
int bufHoughDev[], int bufsortDev[], int numperRDev[],
int cirvoteDev[], int cirindexDev[], int bufwidth, int bufheight)
{
// 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标
// 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。
// z 代表当前线程处理的圆的半径的大小。
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
// 计算当前线程在 bufsort 矩阵中的对应索引值。
int index = z * bufwidth * bufheight + r * bufwidth + c;
int ciridx, idx;
// 如果 bufsort 矩阵当前位置的值不为 0,则说明是局部最大值。
if (bufsortDev[index] != 0) {
// 计算该局部最大值的输出位置。该位置等于半径小于 z 的所有圆数
// 加上该局部最大值在当前半径中的位置。
ciridx = bufsortDev[index] + _findcirsumDev(z, numperRDev) - 1;
// 将该局部最大值的索引信息赋值到 cirindex 数组中。
// 该索引是不加边框的bufSortDev中的索引。
cirindexDev[ciridx] = index;
// 计算在得票数矩阵 bufHough 中的索引值。
idx = z * (bufwidth + 2) * (bufheight + 2) +
(r + 1) * (bufwidth + 2) + c + 1;
// 将该局部最大值的得票数赋值到 cirvote 数组中。
cirvoteDev[ciridx] = bufHoughDev[idx];
}
}
// Kernel 函数: _shearToPosKer(转换数据形式)
static __global__ void _shearToPosKer(
int cirvoteDev[], int cirindexDev[], int lensec, int judge)
{
// 读取线程号和块号。
int cid = threadIdx.x;
int rid = blockIdx.x;
extern __shared__ int shared[];
// 通过偏移,获得存放得票数和索引值的两部分共享内存空间。
int *vote, *index;
vote = shared;
index = shared + judge;
// 为得票数和索引值赋初始值。
vote[cid] = cirvoteDev[rid * lensec + cid];
index[cid] = cirindexDev[rid * lensec + cid];
// 块内同步。
__syncthreads();
// 偶数行赋值。
if (rid % 2 == 0) {
cirvoteDev[rid * lensec + cid] = vote[cid];
cirindexDev[rid * lensec + cid] = index[cid];
} else {
// 奇数行赋值。
cirvoteDev[rid * lensec + cid] = vote[lensec - 1 - cid];
cirindexDev[rid * lensec + cid] = index[lensec - 1 - cid];
}
}
// Kernel 函数: _shearSortRowDesKer(行降序排序)
static __global__ void _shearSortRowDesKer(
int cirvoteDev[], int cirindexDev[], int lensec, int judge)
{
// 读取线程号和块号。
int cid = threadIdx.x;
int rid = blockIdx.x;
extern __shared__ int shared[];
// 通过偏移,获得存放得票数和索引值的两部分共享内存空间。
int *vote, *index;
vote = shared;
index = shared + judge;
// 为共享内存赋初始值。
if (cid < lensec) {
vote[cid] = cirvoteDev[rid * lensec + cid];
index[cid] = cirindexDev[rid * lensec + cid];
}
// 块内同步。
__syncthreads();
// 声明临时变量
int ixj, tempvote, tempindex;
// 偶数行降序排序。
if (rid % 2 == 0) {
for (int k = 2; k <= lensec; k <<= 1) {
// 双调合并。
for (int j = k >> 1; j > 0; j >>= 1) {
// ixj 是与当前位置 cid 进行比较交换的位置。
ixj = cid ^ j;
if (ixj > cid) {
// 如果 (cid & k) == 0,按照降序交换两项。
if ((cid & k) == 0 && (vote[cid] < vote[ixj])) {
// 交换得票数。
tempvote = vote[cid];
vote[cid] = vote[ixj];
vote[ixj] = tempvote;
// 交换索引值。
tempindex = index[cid];
index[cid] = index[ixj];
index[ixj] = tempindex;
// 如果 (cid & k) == 0,按照升序交换两项。
} else if ((cid & k) != 0 && vote[cid] > vote[ixj]) {
// 交换得票数。
tempvote = vote[cid];
vote[cid] = vote[ixj];
vote[ixj] = tempvote;
// 交换索引值。
tempindex = index[cid];
index[cid] = index[ixj];
index[ixj] = tempindex;
}
}
__syncthreads();
}
}
// 奇数行升序排序。
} else {
for (int k = 2; k <= lensec; k <<= 1) {
// 双调合并。
for (int j = k >> 1; j > 0; j >>= 1) {
// ixj 是与当前位置 cid 进行比较交换的位置。
ixj = cid ^ j;
if (ixj > cid) {
// 如果 (cid & k) == 0,按照降序交换两项。
if ((cid & k) == 0 && (vote[cid] > vote[ixj])) {
// 交换得票数。
tempvote = vote[cid];
vote[cid] = vote[ixj];
vote[ixj] = tempvote;
// 交换索引值。
tempindex = index[cid];
index[cid] = index[ixj];
index[ixj] = tempindex;
// 如果 (cid & k) == 0,按照升序交换两项。
} else if ((cid & k) != 0 && vote[cid] < vote[ixj]) {
// 交换得票数。
tempvote = vote[cid];
vote[cid] = vote[ixj];
vote[ixj] = tempvote;
// 交换索引值。
tempindex = index[cid];
index[cid] = index[ixj];
index[ixj] = tempindex;
}
}
__syncthreads();
}
}
}
// 将共享内存中的排序后的数组拷贝到全局内存中。
if (cid <lensec) {
cirvoteDev[rid * lensec + cid] = vote[cid];
cirindexDev[rid * lensec + cid] = index[cid];
}
}
// Kernel 函数: _shearSortColDesKer(列降序排序)
static __global__ void _shearSortColDesKer(
int cirvoteDev[], int cirindexDev[],
int length, int lensec, int judge)
{
// 读取线程号和块号。
int cid = threadIdx.x;
int rid = blockIdx.x;
// 判断是否越界。
if (rid >= lensec)
return;
extern __shared__ int shared[];
// 通过偏移,获得存放得票数和索引值的两部分共享内存空间。
int *vote, *index;
vote = shared;
index = shared + judge;
// 为共享内存赋初始值。
if (cid < length) {
vote[cid] = cirvoteDev[rid + cid * lensec];
index[cid] = cirindexDev[rid + cid * lensec];
}
// 块内同步。
__syncthreads();
// 声明临时变量。
int ixj, tempvote, tempindex;
// 并行双调排序,降序排序。
for (int k = 2; k <= length; k <<= 1) {
// 双调合并。
for (int j = k >> 1; j > 0; j >>= 1) {
// ixj 是与当前位置 cid 进行比较交换的位置。
ixj = cid ^ j;
if (ixj > cid) {
// 如果 (cid & k) == 0,按照降序交换两项。
if ((cid & k) == 0 && (vote[cid] < vote[ixj])) {
// 交换得票数。
tempvote = vote[cid];
vote[cid] = vote[ixj];
vote[ixj] = tempvote;
// 交换索引值。
tempindex = index[cid];
index[cid] = index[ixj];
index[ixj] = tempindex;
// 如果 (cid & k) == 0,按照升序交换两项。
} else if ((cid & k) != 0 && vote[cid] > vote[ixj]) {
// 交换得票数。
tempvote = vote[cid];
vote[cid] = vote[ixj];
vote[ixj] = tempvote;
// 交换索引值。
tempindex = index[cid];
index[cid] = index[ixj];
index[ixj] = tempindex;
}
}
__syncthreads();
}
}
// 将共享内存中的排序后的数组拷贝到全局内存中。
if (cid < length) {
cirvoteDev[rid + cid * lensec] = vote[cid];
cirindexDev[rid + cid * lensec] = index[cid];
}
}
// Kernel 函数:_calcirparamKer(计算圆的返回参数)
static __global__ void _calcirparamKer(
int cirvoteDev[], int cirindexDev[], CircleParam circleDev[],
int bufwidth, int bufheight, int radiusMin)
{
// 获取线程号。
int index = blockIdx.x * blockDim.x + threadIdx.x;
// 声明局部变量。
int radius, x, y, temp;
// 获取当前圆在 cirindex 矩阵中的索引值。
int idx = cirindexDev[index];
// 矩阵的大小。
// 矩阵的大小。
int size = (bufwidth) * (bufheight);
// 计算当前圆的半径。
radius = idx / size;
temp = idx - radius * size;
// 计算当前圆的圆心的纵坐标。
y = temp / (bufwidth);
// 计算当前圆的圆心的横坐标。
x = temp % (bufwidth);
// 为当前圆的返回参数进行赋值。
circleDev[index].a = x;
circleDev[index].b = y;
circleDev[index].radius = radius + radiusMin;
circleDev[index].votes = cirvoteDev[index];
}
// Kernel 函数:_houghoutKer(画出已检测到的圆)
static __global__ void _houghoutKer(
ImageCuda outimg, CircleParam cirparamdev[], int circlenum)
{
// 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标
// 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省
// 计算资源,一方面防止由于段错误导致的程序崩溃。
if (c >= outimg.imgMeta.width || r >= outimg.imgMeta.height)
return;
// 计算当前坐标点对应的图像数据数组下标。
unsigned char *outptr;
outptr = outimg.imgMeta.imgData + c + r * outimg.pitchBytes;
*outptr = 0;
// 声明局部变量
int i, temp, radius, a, b;
// 对所有已经检测出的圆进行循环,找到输入图像中对应的点,并赋值 128。
for (i = 0; i < circlenum; i++) {
// 得到圆的参数,圆心 (a, b), 圆的半径 radius。
radius = cirparamdev[i].radius;
a = cirparamdev[i].a;
b = cirparamdev[i].b;
// 计算当前像素点 (c, r) 到该圆心 (a, b) 的距离。
temp = (c - a) * (c - a) + (r - b) * (r - b);
// 若该距离小于 20,则认为是该圆上的点,在输出图像中赋值 128。
if (abs(temp - radius * radius) < 50)
*outptr = 255;
}
}
//-----------------------------全局函数实现-------------------------------------------
// 函数:_findMinMaxCoordinates(根据输入点集的坐标,找到最上、最下、最左、最右
// 的点,从而确定图像的宽和高)
static __host__ int _findMinMaxCoordinates(CoordiSet *guidingset,
int *xmin, int *ymin,
int *xmax, int *ymax)
{
// 声明局部变量。
int errcode;
// 在 host 端申请一个新的 CoordiSet 变量。
CoordiSet *tmpcoordiset;
errcode = CoordiSetBasicOp::newCoordiSet(&tmpcoordiset);
if (errcode != NO_ERROR)
return errcode;
errcode = CoordiSetBasicOp::makeAtHost(tmpcoordiset, guidingset->count);
if (errcode != NO_ERROR)
return errcode;
// 将坐标集拷贝到 Host 端。
errcode = CoordiSetBasicOp::copyToHost(guidingset, tmpcoordiset);
if (errcode != NO_ERROR)
return errcode;
// 初始化 x 和 y 方向上的最小最大值。
xmin[0] = xmax[0] = tmpcoordiset->tplData[0];
ymin[0] = ymax[0] = tmpcoordiset->tplData[1];
// 循环寻找坐标集最左、最右、最上、最下的坐标。
for (int i = 1;i < tmpcoordiset->count;i++) {
// 寻找 x 方向上的最小值。
if (xmin[0] > tmpcoordiset->tplData[2 * i])
xmin[0] = tmpcoordiset->tplData[2 * i];
// 寻找 x 方向上的最大值
if (xmax[0] < tmpcoordiset->tplData[2 * i])
xmax[0] = tmpcoordiset->tplData[2 * i];
// 寻找 y 方向上的最小值。
if (ymin[0] > tmpcoordiset->tplData[2 * i + 1])
ymin[0] = tmpcoordiset->tplData[2 * i + 1];
// 寻找 y 方向上的最大值
if (ymax[0] < tmpcoordiset->tplData[2 * i + 1])
ymax[0] = tmpcoordiset->tplData[2 * i + 1];
}
// 释放临时坐标集变量。
CoordiSetBasicOp::deleteCoordiSet(tmpcoordiset);
return errcode;
}
// Device 静态方法:_findcirsumDev(计算小于半径 radius 的圆的总个数)
static __device__ int _findcirsumDev(int radius, int numperRDev[])
{
int n = radius;
// 将圆的总个数初始化为 0。
int cirsum = 0;
// 计算小于所给半径 radius 的圆的总个数,并赋值给 cirsum。
while (--n >= 0) {
cirsum += numperRDev[n];
}
// 返回计算所得的圆的总个数。
return cirsum;
}
// Host 静态方法:_cirSortLoop(shear 排序核心函数)
static __host__ int _cirSortLoop(int cirvoteDev[], int cirindexDev[],
int length, int lensec)
{
// 检查数组是否为 NULL,如果为 NULL 直接报错返回。
if (cirvoteDev == NULL || cirindexDev == NULL)
return NULL_POINTER;
// 计算二维数组中长和宽的较大值。
int judge;
if (length > 0 && lensec > 0)
judge = (length > lensec) ? length : lensec;
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
for (int i = length; i >= 1; i >>= 1) {
// 首先进行列排序。
_shearSortColDesKer<<<judge, judge, 2 * judge * sizeof (int)>>>
(cirvoteDev, cirindexDev, length, lensec, judge);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// 然后进行行排序。
_shearSortRowDesKer<<<judge, judge, 2 * judge * sizeof (int)>>>
(cirvoteDev, cirindexDev, lensec, judge);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
}
// 整理排序后的数组。
_shearToPosKer<<<length, lensec, 2 * judge * sizeof (int)>>>
(cirvoteDev, cirindexDev, lensec, judge);
// 若调用 CUDA 出错返回错误代码。
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
return NO_ERROR;
}
// Host 函数:_recalCirParam(确定最终检测的圆和参数)
// 从circle中选出有效的、不重复的,放入circleparam中
static __host__ int _recalCirParam(
CircleParam *circle, CircleParam *circleparam,
int *circleMax, int sum, float distThres, int rThres)
{
// 根据两个圆的距离关系,确定最终检测出的圆。
int a1, a2, b1, b2, diffr;
float distance;
// 统计最终检测的圆的个数。
int circlenum = 0;
for (int i = 0; i < sum; i++) {
// 若当前圆的参数结构体的得票数为 0,直接进行下次循环。
if (circle[i].votes == 0 || circle[i].radius == 0)
continue;
for (int j = i + 1; j < sum; j++) {
// 得到两个圆的圆心的坐标。
a1 = circle[i].a;
b1 = circle[i].b;
a2 = circle[j].a;
b2 = circle[j].b;
// 计算两个圆半径差值
diffr = abs(circle[i].radius - circle[j].radius);
// 计算两个圆的圆心 (a1, b1), (a2, b2) 的距离。
distance = (float)(a1 - a2) * (a1 - a2) + (b1 - b2) * (b1 - b2);
// 圆心和半径都相近的,认为是同一个圆
if (distance < distThres * distThres && diffr < rThres) {
// 合并后的圆参数取平均值,票数合并 ,放入i中
circle[i].a = (circle[i].a+circle[j].a)/2;
circle[i].b = (circle[i].b+circle[j].b)/2;
circle[i].radius = (circle[i].radius+circle[j].radius)/2;
circle[i].votes = circle[i].votes+circle[j].votes;
// j中另一圆取消
circle[j].a = 0;
circle[j].b = 0;
circle[j].radius = 0;
circle[j].votes = 0;
}
}
// 检测出的圆的个数加 1。
circlenum++;
}
// 根据circlenum以及期望检测出的圆的个数circleMax,确定最终圆的个数。
circleMax[0] = (circlenum < circleMax[0]) ? circlenum : circleMax[0];
// 将最终检测的圆的参数赋值到需要返回的圆的参数结构体中。
int k = 0;
for (int i = 0; i < sum; i++) {
// 赋值到最后一个圆时,结束循环。
if (k >= circleMax[0])
break;
// 若得票数不为 0,说明是检测出的圆,赋值到圆的返回参数结构体中。
// 票数为零 则说明是被合并到其他圆中,直接跳过。
if (circle[i].votes != 0) {
circleparam[k].a = circle[i].a;
circleparam[k].b = circle[i].b;
circleparam[k].radius = circle[i].radius;
circleparam[k].votes = circle[i].votes;
// 标记加 1。
k++;
}
}
return NO_ERROR;
}
// 宏:FAIL_CIRCLE_IMG_FREE
// 如果出错,就释放之前申请的内存。
#define FAIL_CIRCLE_IMG_FREE do { \
if (alldataDev != NULL) \
cudaFree(alldataDev); \
if (cirdataDev != NULL) \
cudaFree(cirdataDev); \
if (circleDev != NULL) \
cudaFree(circleDev); \
if (circle != NULL) \
delete[] circle; \
} while (0)
// Host 静态方法:_houghcirByImg(根据输入图像进行 Hough 圆检测)
// 根据输入图像 inimg,通过 Hough 变换进行圆检测。
static __host__ int _houghcirByImg(
Image *inimg, int *circleMax, CircleParam *circleparam,
int radiusMin, int radiusMax, int cirThreshold, float distThres,int rThres)
{
// 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。
if (inimg == NULL)
return NULL_POINTER;
int errcode; // 局部变量,错误码
int bufwidth, bufheight;
int rangeR = radiusMax - radiusMin + 1;
// 输入图像不为空,则根据输入图像的ROI区域得到ROI区域的宽和高。
bufwidth = inimg->roiX2-inimg->roiX1;
bufheight = inimg->roiY2-inimg->roiY1;
// 定义设备端的输入输出数组指针,当输入输出指针在 Host 端时,在设备端申请对
// 应大小的数组。
int *alldataDev = NULL;
int *cirdataDev = NULL;
CircleParam *circleDev = NULL;
CircleParam *circle = NULL;
// 声明 Device 端需要的所有空间。
int *bufHoughDev = NULL, *bufsortDev = NULL;
int *sumdev = NULL, *numperRDev = NULL;
cudaError_t cudaerrcode;
// 一次性申请 Device 端需要的所有空间。
cudaerrcode = cudaMalloc((void **)&alldataDev,
(1 + rangeR + bufwidth * bufheight * rangeR +
(bufwidth + 2) * (bufheight + 2) * rangeR) *
sizeof (int));
if (cudaerrcode != cudaSuccess)
return CUDA_ERROR;
// 通过偏移得到各指针的地址。
sumdev = alldataDev;
numperRDev = alldataDev + 1;
bufsortDev = alldataDev + 1 + rangeR;
bufHoughDev = alldataDev + 1 +rangeR+ bufwidth * bufheight * rangeR;
// 初始化 Hough 变换累加器在 Device 上的内存空间。
cudaerrcode = cudaMemset(alldataDev, 0,
(1 + rangeR + bufwidth * bufheight * rangeR +
(bufwidth + 2) * (bufheight + 2) * rangeR) *
sizeof (int));
if (cudaerrcode != cudaSuccess) {
// 释放之前申请的内存。
FAIL_CIRCLE_IMG_FREE;
return CUDA_ERROR;
}
// 将输入图像拷贝入 Device 内存。
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR) {
// 释放之前申请的内存。
FAIL_CIRCLE_IMG_FREE;
return errcode;
}
// 提取输入图像的 ROI 子图像。
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR) {
// 释放之前申请的内存。
FAIL_CIRCLE_IMG_FREE;
return errcode;
}
// 调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
blocksize.z = 1;
gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) /
blocksize.x;
gridsize.y = (insubimgCud.imgMeta.height + blocksize.y - 1) /
blocksize.y;
gridsize.z = rangeR;
// 调用核函数,对输入图像计算 Hough 累加矩阵。
_houghcirImgKer<<<gridsize, blocksize>>>(
insubimgCud, bufHoughDev, radiusMin, bufwidth, bufheight);
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
FAIL_CIRCLE_IMG_FREE;
return CUDA_ERROR;
}
// 重新计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
blocksize.z = 1;
gridsize.x = (bufwidth + blocksize.x - 1) / blocksize.x;
gridsize.y = (bufheight + blocksize.y - 1) / blocksize.y;
gridsize.z = rangeR;
// 调用核函数,对 bufHough 矩阵寻找局部最大值。
_findpartmaxKer<<<gridsize, blocksize>>>(
bufHoughDev, bufsortDev, sumdev, cirThreshold,
bufwidth, bufheight, numperRDev);
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
FAIL_CIRCLE_IMG_FREE;
return CUDA_ERROR;
}
// 可能存在的圆的数量。
int sum;
// 将计算得到的可能存在的圆的数量 sum 拷贝到 Host 端。
cudaerrcode = cudaMemcpy(&sum, sumdev, sizeof (int),
cudaMemcpyDeviceToHost);
if (cudaerrcode != cudaSuccess) {
// 释放之前申请的内存。
FAIL_CIRCLE_IMG_FREE;
return CUDA_ERROR;
}
if(sum<=0){
*circleMax=0;
return NO_ERROR;
}
// 重新计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
blocksize.x = (rangeR > DEF_BLOCK_1D) ? DEF_BLOCK_1D : rangeR;
blocksize.y = 1;
blocksize.z = 1;
gridsize.x = (rangeR + blocksize.x - 1) / blocksize.x;
gridsize.y = 1;
gridsize.z = 1;
// 调用核函数,统计不同半径的圆的个数。
_countcirbyRKer<<<blocksize, gridsize>>>(bufsortDev, bufwidth, bufheight);
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
FAIL_CIRCLE_IMG_FREE;
return CUDA_ERROR;
}
// 对统计出的可能存在的圆的总数 sum,
// 取大于或者等于它的最小的 2 的幂次方数。
int index = (int)ceil(log(sum*1.0) / log(2.0f));
if (index > sizeof (int) * 8 - 1)
return OP_OVERFLOW;
int sortlength = (1 << index);
// 声明 Device 端需要的所有空间。
int *cirvoteDev = NULL, *cirindexDev = NULL;
// 一次性申请 Device 端需要的所有空间。
cudaerrcode = cudaMalloc((void **)&cirdataDev,
(2 * sortlength) * sizeof (int));
if (cudaerrcode != cudaSuccess) {
// 释放之前申请的内存。
FAIL_CIRCLE_IMG_FREE;
return CUDA_ERROR;
}
// 初始化 Device 上的内存空间。
cudaerrcode = cudaMemset(cirdataDev, 0, (2 * sortlength) * sizeof (int));
if (cudaerrcode != cudaSuccess) {
// 释放之前申请的内存。
FAIL_CIRCLE_IMG_FREE;
return CUDA_ERROR;
}
// 通过偏移获得数组的地址。
cirindexDev = cirdataDev;
cirvoteDev = cirdataDev + sortlength;
// 重新计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
blocksize.z = 1;
gridsize.x = (bufwidth + blocksize.x - 1) / blocksize.x;
gridsize.y = (bufheight + blocksize.y - 1) / blocksize.y;
gridsize.z = rangeR;
// 调用核函数,计算可能圆的索引以及得票数值。
_getcirinfKer<<<gridsize, blocksize>>>(
bufHoughDev, bufsortDev, numperRDev,
cirvoteDev, cirindexDev, bufwidth, bufheight);
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
FAIL_CIRCLE_IMG_FREE;
return CUDA_ERROR;
}
// 需要使用并行的 Shear Sort 对可能圆按照得票数大小
// 排序,定义排序时需要的数组宽度和高度。
int sortwidth = (sortlength > 256) ? 256 : sortlength;
int sortheight = (sortlength + sortwidth - 1) / sortwidth;
// 调用并行 Shear Sort 算法,对可能圆按照得票数排序。
errcode = _cirSortLoop(cirvoteDev, cirindexDev, sortwidth, sortheight);
if (errcode != NO_ERROR) {
// 释放之前申请的内存。
FAIL_CIRCLE_IMG_FREE;
return errcode;
}
#ifdef DEBUG
int *cirIndex=(int*)malloc(sortlength*sizeof (int));
int *cirVote=(int*)malloc(sortlength*sizeof (int));
// 将计算得到的极值点数量 sum 拷贝到 Host 端。
cudaerrcode = cudaMemcpy(cirIndex, cirindexDev, sortlength*sizeof (int),
cudaMemcpyDeviceToHost);
cudaerrcode = cudaMemcpy(cirVote, cirvoteDev, sortlength*sizeof (int),
cudaMemcpyDeviceToHost);
int size = (bufwidth) * (bufheight);
for(int n=0;n<sum;n++){
// 计算当前圆的半径。
int radius = cirIndex[n] / size;
int temp = cirIndex[n] - radius * size;
// 计算当前圆的圆心的纵坐标。
int b = temp / (bufwidth+2);
// 计算当前圆的圆心的横坐标。
int a = temp % (bufwidth+2);
printf("[%2d] index=%10d vote=%5d (%3d,%3d) r=%3d\n",n,cirIndex[n],cirVote[n],a,b,radius);
}
delete[]cirIndex;
delete[]cirVote;
#endif
// 申请 Device 端需要的存放圆的返回参数的空间。
cudaerrcode = cudaMalloc((void **)&circleDev,
sum * sizeof (CircleParam));
if (cudaerrcode != cudaSuccess) {
// 释放之前申请的内存。
FAIL_CIRCLE_IMG_FREE;
return CUDA_ERROR;
}
// 重新计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
blocksize.x = (sum > DEF_BLOCK_1D) ? DEF_BLOCK_1D : sum;
blocksize.y = 1;
blocksize.z = 1;
gridsize.x = (sum + blocksize.x - 1) / blocksize.x;
gridsize.y = 1;
gridsize.z = 1;
// 调用核函数,计算圆的返回参数。
_calcirparamKer<<<gridsize, blocksize>>>(
cirvoteDev, cirindexDev, circleDev, bufwidth, bufheight,
radiusMin);
if (cudaGetLastError() != cudaSuccess) {
// 释放之前申请的内存。
FAIL_CIRCLE_IMG_FREE;
return CUDA_ERROR;
}
// 为圆的参数返回结构体分配空间。
circle = new CircleParam[sum];
// 将核函数计算出的圆的返回参数复制到 Host 端中。
cudaerrcode = cudaMemcpy(circle, circleDev,
sum * sizeof (CircleParam),
cudaMemcpyDeviceToHost);
if (cudaerrcode != cudaSuccess) {
// 释放之前申请的内存。
FAIL_CIRCLE_IMG_FREE;
return CUDA_ERROR;
}
// 调用函数 _recalCirParam 计算最终检测的圆的数量以及参数。
errcode = _recalCirParam(circle, circleparam, circleMax, sum, distThres,rThres);
if (errcode != NO_ERROR) {
// 释放之前申请的内存。
FAIL_CIRCLE_IMG_FREE;
return errcode;
}
// 释放之前申请的内存。
cudaFree(alldataDev);
cudaFree(cirdataDev);
cudaFree(circleDev);
delete[] circle;
// 处理完毕,退出。
return NO_ERROR;
}
// 取消前面的宏定义。
#undef FAIL_CIRCLE_IMG_FREE
// 全局方法:_drawCircle(把圆参数数组绘制到图像上)
__host__ int _drawCircle(Image *resultimg,
int *circleMax,
CircleParam *circleparam
){
int errcode; // 局部变量,错误码
cudaError_t cudaerrcode;
CircleParam *circleDev = NULL;
// 为 device 端圆返回参数数组申请空间。
cudaerrcode = cudaMalloc((void **)&circleDev,
circleMax[0] * sizeof (CircleParam));
if (cudaerrcode != cudaSuccess) {
cudaFree(circleDev);
return CUDA_ERROR;
}
// 将计算得到的参数从 Host 端拷贝到 Device 端。
cudaerrcode = cudaMemcpy(circleDev, circleparam,
circleMax[0] * sizeof (CircleParam),
cudaMemcpyHostToDevice);
if (cudaerrcode != cudaSuccess) {
// 释放之前申请的内存。
cudaFree(circleDev);
return CUDA_ERROR;
}
// 将结果图像拷贝入 Device 内存。
errcode = ImageBasicOp::copyToCurrentDevice(resultimg);
if (errcode != NO_ERROR)
return errcode;
// 提取结果图像的 ROI 子图像。
ImageCuda resultimgCud;
errcode = ImageBasicOp::roiSubImage(resultimg, &resultimgCud);
if (errcode != NO_ERROR)
return errcode;
dim3 blocksize, gridsize;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
blocksize.z = 1;
gridsize.x = (resultimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x;
gridsize.y = (resultimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y;
gridsize.z = 1;
// 调用 kernel函数,得出最终输出图像。
_houghoutKer<<<gridsize, blocksize>>>(
resultimgCud, circleDev, circleMax[0]);
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
return NO_ERROR;
}
//-----------------------------成员函数实现-------------------------------------------
// Host 成员方法:houghcircle(Hough 变换检测圆)
__host__ int HoughCircle::houghcircle(Image *inimg, CoordiSet *guidingset,
int *circleMax, CircleParam *circleparam,
bool writetofile)
{
// 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。
if (inimg == NULL && guidingset == NULL)
return NULL_POINTER;
int errcode; // 局部变量,错误码
// 声明输出图像
Image *resultimg;
ImageBasicOp::newImage(&resultimg);
if (guidingset != NULL) {
// 若输入坐标集不为空,则将该点集拷贝入 Device 内存。
errcode = CoordiSetBasicOp::copyToCurrentDevice(guidingset);
if (errcode != NO_ERROR)
return errcode;
// 计算坐标集的最大坐标位置,为创建图像做准备
int minx,miny,maxx,maxy;
int errorcode=_findMinMaxCoordinates(guidingset,&minx,&miny,&maxx,&maxy);
if(errorcode!=NO_ERROR)
return INVALID_DATA;
// 根据输入 coordiset 创建输入图像 coorimg
Image *coorimg;
ImageBasicOp::newImage(&coorimg);
//给工作图像分配空间,宽度是最大坐标值+1,因为坐标从0开始计数,再+1,保证轮廓外连通
ImageBasicOp::makeAtHost(coorimg,maxx+2 ,maxy+2);
// coordiset 转成coorimg ,把坐标集绘制到图像上,前景255,背景0
ImgConvert imgcvt(255,0);
imgcvt.cstConvertToImg(guidingset,coorimg);
#ifdef DEBUG
// 把填充前的图像coorimg保存到文件
ImageBasicOp::copyToHost(coorimg);
ImageBasicOp::writeToFile("coorimg.bmp",coorimg);
#endif
// 根据输入图像 coorimg 进行 Hough 变换圆检测。
errcode = _houghcirByImg(coorimg, circleMax, circleparam, radiusMin,
radiusMax, cirThreshold, distThres,rThres);
if (errcode != NO_ERROR)
return errcode;
// 清除输入图像 coorimg
ImageBasicOp::deleteImage(coorimg);
// 如果需要输出图像,分配空间
if(writetofile)
ImageBasicOp::makeAtHost(resultimg,maxx+2 ,maxy+2);
} else {
// 输入图像不为空,则根据输入图像进行 Hough 变换圆检测。
errcode = _houghcirByImg(inimg, circleMax, circleparam, radiusMin,
radiusMax, cirThreshold, distThres,rThres);
if (errcode != NO_ERROR)
return errcode;
// 分片局部坐标转化成全局坐标
for(int i=0; i< *circleMax; i++) {
circleparam[i].a+=inimg->roiX1;
circleparam[i].b+=inimg->roiY1;
}
// 如果需要输出图像,分配空间
if(writetofile)
ImageBasicOp::makeAtHost(resultimg,
inimg->width,
inimg->height);
}
// 如果需要输出图像,调用 kernel 写入到“result.bmp”中
if(writetofile)
_drawCircle(resultimg, circleMax, circleparam);
// 检测结果写入文件
ImageBasicOp::copyToHost(resultimg);
ImageBasicOp::writeToFile("result.bmp",resultimg);
// 删除输出图像img
ImageBasicOp::deleteImage(resultimg);
// 处理完毕,退出。
return NO_ERROR;
}
// Host 成员方法:pieceCircle(分片检测inimg中的圆形,放入数组返回)
__host__ int
HoughCircle:: pieceCircle(
Image *inimg, // 输入待检测的图形
int piecenum, // 每个维度上分块数量
int *circleMax, // 返回检测到的圆数量
CircleParam *circleparam, // 返回检测到的圆参数
bool writetofile // 是否把检测结果写到文件中
){
int pointer=0;
// 计算分片的大小
int cell_x=inimg->width/piecenum;
int cell_y=inimg->height/piecenum;
#ifdef DEBUG
printf("cell_x=%d cell_y=%d\n",cell_x,cell_y);
#endif
// 开始分块处理
for(int y=0;y<piecenum;y++)
for(int x=0;x<piecenum;x++)
{//.......................分块第一阶段..........................
#ifdef DEBUG
printf(" \n----------------- y=[%d] x=[%d]\n",y,x);
#endif
// 每个分片中圆上限
int piececirmax=10;
CircleParam *piececirparam= new CircleParam[piececirmax];
for(int i=0;i<piececirmax;i++){
piececirparam[i].a=-1;
piececirparam[i].b=-1;
piececirparam[i].radius=-1;
piececirparam[i].votes=-1;
}
inimg->roiX1=x*cell_x;
inimg->roiX2=x*cell_x+cell_x-1;
inimg->roiY1=y*cell_y;
inimg->roiY2=y*cell_y+cell_y-1;
#ifdef DEBUG
printf("x1=%d x2=%d y1=%d y2=%d \n",
inimg->roiX1,inimg->roiX2,
inimg->roiY1,inimg->roiY2);
#endif
// 下面函数运行后,piececirmax中放的是检测到的圆的个数。
// houghcircle()返回后得到的是全局坐标
houghcircle(inimg, NULL, &piececirmax, piececirparam,false);
// 分片圆结果放入全局数组
for(int i=0; i< piececirmax; i++) {
if(pointer>=*circleMax)break;
circleparam[pointer]=piececirparam[i];
pointer++;
}
// 循环内声明的局部动态内存,循环内回收
if(piececirparam!=NULL)
{delete[] piececirparam;piececirparam=NULL;}
//.........................分块第二阶段........................
if(x<piecenum-1 && y<piecenum-1){
}
}// end of for x,for y
// 回收资源
// 返回真实矩形的个数
*circleMax=pointer;
if(*circleMax<=0)
return NO_ERROR;
// 如果需要输出图像,调用 kernel 写入到“result.bmp”中
if(writetofile)
{
Image *resultimg;
ImageBasicOp::newImage(&resultimg);
ImageBasicOp::makeAtHost(resultimg,inimg->width,inimg->height);
_drawCircle(resultimg, circleMax, circleparam);
// 检测结果写入文件
ImageBasicOp::copyToHost(resultimg);
ImageBasicOp::writeToFile("result.bmp",resultimg);
ImageBasicOp::deleteImage(resultimg);
}
return NO_ERROR;
}
|
the_stack
|
typedef int int32_t;
#define CUCL_GLOBAL_KERNEL extern "C" __global__
#define GASQ
#define GLOB_ID_1D (blockDim.x * blockIdx.x + threadIdx.x)
#define LOC_ID_1D (threadIdx.x)
#define GRP_ID_1D (blockIdx.x)
#define LOC_SZ_1D (blockDim.x)
#define LOCSHAR_MEM __shared__
#define LSMASQ
#define BARRIER_SYNC __syncthreads()
CUCL_GLOBAL_KERNEL void bconv__out_chan_1000__in_chan_1024__y_1__x_1__img_1__chan_1000( GASQ float const * const filts, // CUCL IN out_chan:in_chan:y:x
GASQ float const * const out_grad_loss, // CUCL IN img:chan:y:x
GASQ float * const in_grad_loss ) // CUCL OUT img:chan:y:x
/* work */ // CUCL REF pels_blk:out_ix_blk:pels_tile:out_ix_tile:pels:out_ix
/* oix */ // CUCL REF in_chan:sy:sx
/* fioc */ // CUCL REF out_chan:ky:kx
{
// CUCL IX pel_ix out_grad_loss use_dims=img:y:x
// CUCL IX filt_elem_ix fioc
// CUCL IX out_ix oix
// CUCL IX GRP_ID_1D work use_dims=pels_blk:out_ix_blk
// CUCL IX LOC_ID_1D work use_dims=pels_tile:out_ix_tile
// note: <each thread handles> work use_dims=pels:out_out_ix; with pels_sz==out_chan_sz==t_tile_sz (currently); loops over in.chan==filts.in_chan
LOCSHAR_MEM float in_smem[40];
LOCSHAR_MEM float filts_smem[120];
float out_tile[8*8] = {0}; // tile of output for this thread to compute, stored in registers
// reg. buffers for one strip each from in and filts, for the same filts element
float filts_strip[8]; // across output chans
float in_strip[8]; // across pels (approx square block in x/y space, favoring x if sqrt() not integer)
int32_t const blk_out_ix = (GRP_ID_1D%9)*15*8;
int32_t const blk_pel_ix = (GRP_ID_1D/9)*5*8;
for( int32_t filt_elem_ix = 0; filt_elem_ix != 1024; ++filt_elem_ix ) {
BARRIER_SYNC;
for( int32_t i = 0; i != 1; ++i ) {
if( (LOC_ID_1D+LOC_SZ_1D*i) < 40 ) {
int32_t const pel_ix = (blk_pel_ix+LOC_ID_1D+LOC_SZ_1D*i);
float v = 0;
int const smem_in_ix_y = ((pel_ix/6)%6)+(filt_elem_ix%1) - 0;
int const smem_in_ix_x = (pel_ix%6)+(filt_elem_ix%1) - 0;
if(smem_in_ix_y >= 0 && smem_in_ix_x >= 0 && (pel_ix/36) < 1 &&
smem_in_ix_x < 6 && smem_in_ix_y < 6 ) {
v = out_grad_loss[(pel_ix/36)*36000 +
filt_elem_ix*36 +
smem_in_ix_y*6 +
smem_in_ix_x*1];
}
in_smem[LOC_ID_1D+LOC_SZ_1D*i] = v;
}
}
for( int32_t i = 0; i != 2; ++i ) {
if( (LOC_ID_1D+LOC_SZ_1D*i) < 120 ) {
int32_t const out_ix = (blk_out_ix+LOC_ID_1D+LOC_SZ_1D*i);
float v = 0;
int const smem_filt_ix_y = (out_ix%1)+(filt_elem_ix%1)*1;
int const smem_filt_ix_x = (out_ix%1)+(filt_elem_ix%1)*1;
if( out_ix < 1024 && filt_elem_ix < 1000
&& smem_filt_ix_x < 1 && smem_filt_ix_y < 1 ) {
v = filts[filt_elem_ix*1024 +
out_ix*1 +
smem_filt_ix_y*1 +
smem_filt_ix_x*1];
}
filts_smem[LOC_ID_1D+LOC_SZ_1D*i] = v;
}
}
BARRIER_SYNC;
// begin loads
filts_strip[0] = filts_smem[(LOC_ID_1D%15)*8+0];
filts_strip[1] = filts_smem[(LOC_ID_1D%15)*8+1];
filts_strip[2] = filts_smem[(LOC_ID_1D%15)*8+2];
filts_strip[3] = filts_smem[(LOC_ID_1D%15)*8+3];
filts_strip[4] = filts_smem[(LOC_ID_1D%15)*8+4];
filts_strip[5] = filts_smem[(LOC_ID_1D%15)*8+5];
filts_strip[6] = filts_smem[(LOC_ID_1D%15)*8+6];
filts_strip[7] = filts_smem[(LOC_ID_1D%15)*8+7];
in_strip[0] = in_smem[(LOC_ID_1D/15)*8+0];
in_strip[1] = in_smem[(LOC_ID_1D/15)*8+1];
in_strip[2] = in_smem[(LOC_ID_1D/15)*8+2];
in_strip[3] = in_smem[(LOC_ID_1D/15)*8+3];
in_strip[4] = in_smem[(LOC_ID_1D/15)*8+4];
in_strip[5] = in_smem[(LOC_ID_1D/15)*8+5];
in_strip[6] = in_smem[(LOC_ID_1D/15)*8+6];
in_strip[7] = in_smem[(LOC_ID_1D/15)*8+7];
// end loads;
// begin fmas
out_tile[0] += filts_strip[0]*in_strip[0];
out_tile[1] += filts_strip[1]*in_strip[0];
out_tile[2] += filts_strip[2]*in_strip[0];
out_tile[3] += filts_strip[3]*in_strip[0];
out_tile[4] += filts_strip[4]*in_strip[0];
out_tile[5] += filts_strip[5]*in_strip[0];
out_tile[6] += filts_strip[6]*in_strip[0];
out_tile[7] += filts_strip[7]*in_strip[0];
out_tile[8] += filts_strip[0]*in_strip[1];
out_tile[9] += filts_strip[1]*in_strip[1];
out_tile[10] += filts_strip[2]*in_strip[1];
out_tile[11] += filts_strip[3]*in_strip[1];
out_tile[12] += filts_strip[4]*in_strip[1];
out_tile[13] += filts_strip[5]*in_strip[1];
out_tile[14] += filts_strip[6]*in_strip[1];
out_tile[15] += filts_strip[7]*in_strip[1];
out_tile[16] += filts_strip[0]*in_strip[2];
out_tile[17] += filts_strip[1]*in_strip[2];
out_tile[18] += filts_strip[2]*in_strip[2];
out_tile[19] += filts_strip[3]*in_strip[2];
out_tile[20] += filts_strip[4]*in_strip[2];
out_tile[21] += filts_strip[5]*in_strip[2];
out_tile[22] += filts_strip[6]*in_strip[2];
out_tile[23] += filts_strip[7]*in_strip[2];
out_tile[24] += filts_strip[0]*in_strip[3];
out_tile[25] += filts_strip[1]*in_strip[3];
out_tile[26] += filts_strip[2]*in_strip[3];
out_tile[27] += filts_strip[3]*in_strip[3];
out_tile[28] += filts_strip[4]*in_strip[3];
out_tile[29] += filts_strip[5]*in_strip[3];
out_tile[30] += filts_strip[6]*in_strip[3];
out_tile[31] += filts_strip[7]*in_strip[3];
out_tile[32] += filts_strip[0]*in_strip[4];
out_tile[33] += filts_strip[1]*in_strip[4];
out_tile[34] += filts_strip[2]*in_strip[4];
out_tile[35] += filts_strip[3]*in_strip[4];
out_tile[36] += filts_strip[4]*in_strip[4];
out_tile[37] += filts_strip[5]*in_strip[4];
out_tile[38] += filts_strip[6]*in_strip[4];
out_tile[39] += filts_strip[7]*in_strip[4];
out_tile[40] += filts_strip[0]*in_strip[5];
out_tile[41] += filts_strip[1]*in_strip[5];
out_tile[42] += filts_strip[2]*in_strip[5];
out_tile[43] += filts_strip[3]*in_strip[5];
out_tile[44] += filts_strip[4]*in_strip[5];
out_tile[45] += filts_strip[5]*in_strip[5];
out_tile[46] += filts_strip[6]*in_strip[5];
out_tile[47] += filts_strip[7]*in_strip[5];
out_tile[48] += filts_strip[0]*in_strip[6];
out_tile[49] += filts_strip[1]*in_strip[6];
out_tile[50] += filts_strip[2]*in_strip[6];
out_tile[51] += filts_strip[3]*in_strip[6];
out_tile[52] += filts_strip[4]*in_strip[6];
out_tile[53] += filts_strip[5]*in_strip[6];
out_tile[54] += filts_strip[6]*in_strip[6];
out_tile[55] += filts_strip[7]*in_strip[6];
out_tile[56] += filts_strip[0]*in_strip[7];
out_tile[57] += filts_strip[1]*in_strip[7];
out_tile[58] += filts_strip[2]*in_strip[7];
out_tile[59] += filts_strip[3]*in_strip[7];
out_tile[60] += filts_strip[4]*in_strip[7];
out_tile[61] += filts_strip[5]*in_strip[7];
out_tile[62] += filts_strip[6]*in_strip[7];
out_tile[63] += filts_strip[7]*in_strip[7];
// end fmas;
}
int32_t pel_ix = blk_pel_ix + (LOC_ID_1D/15)*8; // first pel_ix for this thread
int32_t igl_y, igl_x;
for( int32_t work_pel = 0; work_pel < 8; ++work_pel, ++pel_ix) {
int32_t out_ix = blk_out_ix + (LOC_ID_1D%15)*8; // first out_ix for this thread
// begin outs_to_filts_strip
switch(work_pel) {
filts_strip[0] = out_tile[0];
filts_strip[1] = out_tile[1];
filts_strip[2] = out_tile[2];
filts_strip[3] = out_tile[3];
filts_strip[4] = out_tile[4];
filts_strip[5] = out_tile[5];
filts_strip[6] = out_tile[6];
filts_strip[7] = out_tile[7];
break;
filts_strip[0] = out_tile[8];
filts_strip[1] = out_tile[9];
filts_strip[2] = out_tile[10];
filts_strip[3] = out_tile[11];
filts_strip[4] = out_tile[12];
filts_strip[5] = out_tile[13];
filts_strip[6] = out_tile[14];
filts_strip[7] = out_tile[15];
break;
filts_strip[0] = out_tile[16];
filts_strip[1] = out_tile[17];
filts_strip[2] = out_tile[18];
filts_strip[3] = out_tile[19];
filts_strip[4] = out_tile[20];
filts_strip[5] = out_tile[21];
filts_strip[6] = out_tile[22];
filts_strip[7] = out_tile[23];
break;
filts_strip[0] = out_tile[24];
filts_strip[1] = out_tile[25];
filts_strip[2] = out_tile[26];
filts_strip[3] = out_tile[27];
filts_strip[4] = out_tile[28];
filts_strip[5] = out_tile[29];
filts_strip[6] = out_tile[30];
filts_strip[7] = out_tile[31];
break;
filts_strip[0] = out_tile[32];
filts_strip[1] = out_tile[33];
filts_strip[2] = out_tile[34];
filts_strip[3] = out_tile[35];
filts_strip[4] = out_tile[36];
filts_strip[5] = out_tile[37];
filts_strip[6] = out_tile[38];
filts_strip[7] = out_tile[39];
break;
filts_strip[0] = out_tile[40];
filts_strip[1] = out_tile[41];
filts_strip[2] = out_tile[42];
filts_strip[3] = out_tile[43];
filts_strip[4] = out_tile[44];
filts_strip[5] = out_tile[45];
filts_strip[6] = out_tile[46];
filts_strip[7] = out_tile[47];
break;
filts_strip[0] = out_tile[48];
filts_strip[1] = out_tile[49];
filts_strip[2] = out_tile[50];
filts_strip[3] = out_tile[51];
filts_strip[4] = out_tile[52];
filts_strip[5] = out_tile[53];
filts_strip[6] = out_tile[54];
filts_strip[7] = out_tile[55];
break;
filts_strip[0] = out_tile[56];
filts_strip[1] = out_tile[57];
filts_strip[2] = out_tile[58];
filts_strip[3] = out_tile[59];
filts_strip[4] = out_tile[60];
filts_strip[5] = out_tile[61];
filts_strip[6] = out_tile[62];
filts_strip[7] = out_tile[63];
break;
}
// end outs_to_filts_strip;
// begin stores
igl_y = (((pel_ix/6)%6)-0)*1+(out_ix%1);
igl_x = ((pel_ix%6)-0)*1+(out_ix%1);
if( igl_x >= 0 && igl_y >= 0 && igl_y < 6 && igl_x < 6 &&
out_ix < 1024 && (pel_ix/36) < 1 ) {
in_grad_loss[ (pel_ix/36)*36864 + out_ix*36 +
igl_y*6 + igl_x*1] = filts_strip[0];
};
++out_ix;
igl_y = (((pel_ix/6)%6)-0)*1+(out_ix%1);
igl_x = ((pel_ix%6)-0)*1+(out_ix%1);
if( igl_x >= 0 && igl_y >= 0 && igl_y < 6 && igl_x < 6 &&
out_ix < 1024 && (pel_ix/36) < 1 ) {
in_grad_loss[ (pel_ix/36)*36864 + out_ix*36 +
igl_y*6 + igl_x*1] = filts_strip[1];
};
++out_ix;
igl_y = (((pel_ix/6)%6)-0)*1+(out_ix%1);
igl_x = ((pel_ix%6)-0)*1+(out_ix%1);
if( igl_x >= 0 && igl_y >= 0 && igl_y < 6 && igl_x < 6 &&
out_ix < 1024 && (pel_ix/36) < 1 ) {
in_grad_loss[ (pel_ix/36)*36864 + out_ix*36 +
igl_y*6 + igl_x*1] = filts_strip[2];
};
++out_ix;
igl_y = (((pel_ix/6)%6)-0)*1+(out_ix%1);
igl_x = ((pel_ix%6)-0)*1+(out_ix%1);
if( igl_x >= 0 && igl_y >= 0 && igl_y < 6 && igl_x < 6 &&
out_ix < 1024 && (pel_ix/36) < 1 ) {
in_grad_loss[ (pel_ix/36)*36864 + out_ix*36 +
igl_y*6 + igl_x*1] = filts_strip[3];
};
++out_ix;
igl_y = (((pel_ix/6)%6)-0)*1+(out_ix%1);
igl_x = ((pel_ix%6)-0)*1+(out_ix%1);
if( igl_x >= 0 && igl_y >= 0 && igl_y < 6 && igl_x < 6 &&
out_ix < 1024 && (pel_ix/36) < 1 ) {
in_grad_loss[ (pel_ix/36)*36864 + out_ix*36 +
igl_y*6 + igl_x*1] = filts_strip[4];
};
++out_ix;
igl_y = (((pel_ix/6)%6)-0)*1+(out_ix%1);
igl_x = ((pel_ix%6)-0)*1+(out_ix%1);
if( igl_x >= 0 && igl_y >= 0 && igl_y < 6 && igl_x < 6 &&
out_ix < 1024 && (pel_ix/36) < 1 ) {
in_grad_loss[ (pel_ix/36)*36864 + out_ix*36 +
igl_y*6 + igl_x*1] = filts_strip[5];
};
++out_ix;
igl_y = (((pel_ix/6)%6)-0)*1+(out_ix%1);
igl_x = ((pel_ix%6)-0)*1+(out_ix%1);
if( igl_x >= 0 && igl_y >= 0 && igl_y < 6 && igl_x < 6 &&
out_ix < 1024 && (pel_ix/36) < 1 ) {
in_grad_loss[ (pel_ix/36)*36864 + out_ix*36 +
igl_y*6 + igl_x*1] = filts_strip[6];
};
++out_ix;
igl_y = (((pel_ix/6)%6)-0)*1+(out_ix%1);
igl_x = ((pel_ix%6)-0)*1+(out_ix%1);
if( igl_x >= 0 && igl_y >= 0 && igl_y < 6 && igl_x < 6 &&
out_ix < 1024 && (pel_ix/36) < 1 ) {
in_grad_loss[ (pel_ix/36)*36864 + out_ix*36 +
igl_y*6 + igl_x*1] = filts_strip[7];
};
++out_ix;
// end stores;
}
}
|
the_stack
|
namespace NKernel
{
struct TNonNegativeSegmentedSum
{
template <typename T>
__host__ __device__ __forceinline__ T operator()(const T& left, const T& right) const
{
const bool leftFlag = ExtractSignBit(left);
const bool rightFlag = ExtractSignBit(right);
const bool newFlag = leftFlag | rightFlag;
const T resultValue = rightFlag ? abs(right) : abs(right) + abs(left);
return OrSignBit(resultValue, newFlag);
}
};
struct TSegmentedSum
{
template <typename T>
__host__ __device__ __forceinline__ TPair<ui32, T> operator()(const TPair<ui32, T>& left, const TPair<ui32, T>& right) const {
const bool leftFlag = left.First;
const bool rightFlag = right.First;
const bool newFlag = leftFlag || rightFlag;
const T resultValue = rightFlag ? right.Second : left.Second + right.Second;
return {newFlag, resultValue};
}
};
//output iterator for segmented scan + scatter with mask routine
//class based on cache-modified-output iterator from cub
template <cub::CacheStoreModifier MODIFIER,
typename TValueType,
typename TOffsetType = ptrdiff_t,
bool Inclusive = false>
class TNonNegativeSegmentedScanOutputIterator
{
private:
// Proxy object
struct TReference
{
TValueType* Ptr;
const ui32* __restrict Index;
const ui32* __restrict End;
/// Constructor
__host__ __device__ __forceinline__ TReference(TValueType* ptr,
const ui32* __restrict index,
const ui32* __restrict end)
: Ptr(ptr)
, Index(index)
, End(end) {
}
/// Assignment
__device__ __forceinline__ TValueType operator=(TValueType val) {
if (Inclusive) {
TIndexWrapper indexWrapper(Index[0]);
TValueType outputValue = abs(val);
Ptr[indexWrapper.Index()] = outputValue;
} else {
if ((Index + 1) != End)
{
TIndexWrapper indexWrapper(Index[1]);
TValueType outputValue = indexWrapper.IsSegmentStart() ? 0 : abs(val);
Ptr[indexWrapper.Index()] = outputValue;
}
}
return val;
}
};
private:
TValueType* Ptr;
const ui32* __restrict Index;
const ui32* __restrict End;
public:
// Required iterator traits
typedef TNonNegativeSegmentedScanOutputIterator self_type; ///< My own type
typedef TOffsetType difference_type; ///< Type to express the result of subtracting one iterator from another
typedef TValueType value_type; ///< The type of the element the iterator can point to
typedef TValueType* pointer; ///< The type of a pointer to an element the iterator can point to
typedef TReference reference; ///< The type of a reference to an element the iterator can point to
typedef std::random_access_iterator_tag iterator_category;
public:
/// Constructor
template <class TQualifiedValueType>
__host__ __device__ __forceinline__ TNonNegativeSegmentedScanOutputIterator(TQualifiedValueType* ptr,
const ui32* __restrict index,
const ui32* __restrict end
)
: Ptr(const_cast<typename cub::RemoveQualifiers<TQualifiedValueType>::Type*>(ptr))
, Index(index)
, End(end) {
}
/// Postfix increment
__host__ __device__ __forceinline__ self_type operator++(int) {
self_type retval = *this;
Index++;
return retval;
}
/// Prefix increment
__host__ __device__ __forceinline__ self_type operator++() {
Index++;
return *this;
}
/// Indirection
__host__ __device__ __forceinline__ reference operator*() const
{
return TReference(Ptr, Index, End);
}
/// Addition
template <typename TDistance>
__host__ __device__ __forceinline__ self_type operator+(TDistance n) const
{
self_type retval(Ptr, Index + n, End);
return retval;
}
/// Addition assignment
template <typename TDistance>
__host__ __device__ __forceinline__ self_type& operator+=(TDistance n) {
Index += n;
return *this;
}
/// Subtraction
template <typename TDistance>
__host__ __device__ __forceinline__ self_type operator-(TDistance n) const {
self_type retval(Ptr, Index - n, End);
return retval;
}
/// Subtraction assignment
template <typename TDistance>
__host__ __device__ __forceinline__ self_type& operator-=(TDistance n) {
Index -= n;
return *this;
}
/// TDistance
__host__ __device__
__forceinline__ difference_type operator-(self_type other) const {
return Index - other.Index;
}
/// Array subscript
template <typename TDistance>
__host__ __device__ __forceinline__ reference operator[](TDistance n) const {
return TReference(Ptr, Index + n, End);
}
/// Equal to
__host__ __device__ __forceinline__ bool operator==(const self_type& rhs) {
return (Index == rhs.Index) && (Ptr == rhs.Ptr);
}
/// Not equal to
__host__ __device__ __forceinline__ bool operator!=(const self_type& rhs) {
return !TNonNegativeSegmentedScanOutputIterator::operator==(rhs);
}
};
//output iterator for segmented scan + scatter with mask routine
//class based on cache-modified-output iterator from cub
template <typename T,
bool Inclusive = false,
typename TOffsetType = ptrdiff_t>
class TSegmentedScanOutputIterator
{
private:
using TValueType = TPair<ui32, T>;
// Proxy object
struct TReference
{
T* Ptr;
T* End;
/// Constructor
__host__ __device__ __forceinline__ TReference(T* ptr,
T* end)
: Ptr(ptr)
, End(end) {
}
/// Assignment
__device__ __forceinline__ TValueType operator=(TValueType val) {
if (Inclusive) {
Ptr[0] = val.Second;
} else {
if ((Ptr + 1) != End)
{
Ptr[1] = val.Second;
}
}
return val;
}
};
private:
T* Ptr;
T* End;
public:
// Required iterator traits
typedef TSegmentedScanOutputIterator self_type; ///< My own type
typedef TOffsetType difference_type; ///< Type to express the result of subtracting one iterator from another
typedef TValueType value_type; ///< The type of the element the iterator can point to
typedef TValueType* pointer; ///< The type of a pointer to an element the iterator can point to
typedef TReference reference; ///< The type of a reference to an element the iterator can point to
typedef std::random_access_iterator_tag iterator_category;
public:
/// Constructor
template <class TQualifiedValueType>
__host__ __device__ __forceinline__ TSegmentedScanOutputIterator(TQualifiedValueType* ptr,
TQualifiedValueType* end)
: Ptr(const_cast<typename cub::RemoveQualifiers<TQualifiedValueType>::Type*>(ptr))
, End(const_cast<typename cub::RemoveQualifiers<TQualifiedValueType>::Type*>(end)) {
}
/// Postfix increment
__host__ __device__ __forceinline__ self_type operator++(int) {
self_type retval = *this;
Ptr++;
return retval;
}
/// Prefix increment
__host__ __device__ __forceinline__ self_type operator++() {
Ptr++;
return *this;
}
/// Indirection
__host__ __device__ __forceinline__ reference operator*() const
{
return TReference(Ptr, End);
}
/// Addition
template <typename TDistance>
__host__ __device__ __forceinline__ self_type operator+(TDistance n) const
{
self_type retval(Ptr + n, End);
return retval;
}
/// Addition assignment
template <typename TDistance>
__host__ __device__ __forceinline__ self_type& operator+=(TDistance n) {
Ptr += n;
return *this;
}
/// Subtraction
template <typename TDistance>
__host__ __device__ __forceinline__ self_type operator-(TDistance n) const {
self_type retval(Ptr - n, End);
return retval;
}
/// Subtraction assignment
template <typename TDistance>
__host__ __device__ __forceinline__ self_type& operator-=(TDistance n) {
Ptr -= n;
return *this;
}
/// TDistance
__host__ __device__
__forceinline__ difference_type operator-(self_type other) const {
return Ptr - other.Ptr;
}
/// Array subscript
template <typename TDistance>
__host__ __device__ __forceinline__ reference operator[](TDistance n) const {
return TReference(Ptr + n, End);
}
/// Equal to
__host__ __device__ __forceinline__ bool operator==(const self_type& rhs) {
return (Ptr == rhs.Ptr) && (End == rhs.End);
}
/// Not equal to
__host__ __device__ __forceinline__ bool operator!=(const self_type& rhs) {
return !TSegmentedScanOutputIterator::operator==(rhs);
}
};
template <typename T,
typename TOffsetT = ptrdiff_t>
class TSegmentedScanInputIterator
{
public:
using TValueType = TPair<ui32, T>;
// Required iterator traits
typedef TSegmentedScanInputIterator self_type; ///< My own type
typedef TOffsetT difference_type; ///< Type to express the result of subtracting one iterator from another
typedef TValueType value_type; ///< The type of the element the iterator can point to
typedef TValueType reference; ///< The type of a reference to an element the iterator can point to
typedef std::random_access_iterator_tag iterator_category; ///< The iterator category
typedef T* pointer;
private:
const T* Src;
const ui32* Flags;
ui32 FlagMask;
public:
/// Constructor
__host__ __device__ __forceinline__ TSegmentedScanInputIterator(const T* src,
const ui32* flags,
ui32 flagMask)
: Src(src)
, Flags(flags)
, FlagMask(flagMask) {
}
/// Postfix increment
__host__ __device__ __forceinline__ self_type operator++(int)
{
self_type retval = *this;
Src++;
Flags++;
return retval;
}
/// Prefix increment
__host__ __device__ __forceinline__ self_type operator++()
{
Src++;
Flags++;
return *this;
}
/// Indirection
__host__ __device__ __forceinline__ reference operator*() const
{
bool flag = Flags[0] & FlagMask;
return {flag, Src[0]};
}
/// Addition
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator+(Distance n) const
{
self_type retval(Src + n, Flags + n, FlagMask);
return retval;
}
/// Addition assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator+=(Distance n)
{
Src += n;
Flags += n;
return *this;
}
/// Subtraction
template <typename Distance>
__host__ __device__ __forceinline__ self_type operator-(Distance n) const
{
self_type retval(Src - n, Flags - n, FlagMask);
return retval;
}
/// Subtraction assignment
template <typename Distance>
__host__ __device__ __forceinline__ self_type& operator-=(Distance n)
{
Src -= n;
Flags -= n;
return *this;
}
/// Distance
__host__ __device__ __forceinline__ difference_type operator-(self_type other) const
{
return Src - other.Src;
}
/// Array subscript
template <typename Distance>
__host__ __device__ __forceinline__ reference operator[](Distance n) const
{
bool flag = Flags[n] & FlagMask;
return {flag, Src[n]};
}
/// Structure dereference
__host__ __device__ __forceinline__ pointer operator->()
{
return Src;
}
/// Equal to
__host__ __device__ __forceinline__ bool operator==(const self_type& rhs)
{
return (Src == rhs.Src && Flags == rhs.Flags && FlagMask == rhs.FlagMask);
}
/// Not equal to
__host__ __device__ __forceinline__ bool operator!=(const self_type& rhs)
{
return !TSegmentedScanInputIterator::operator==(rhs);
}
};
}
|
the_stack
|
#include "bm3d.hpp"
#define cimg_display 0
#include "CImg.h"
// Repeat the execution of kernels 100 times
#define REPEAT 100
// Adjust the size of the total shared local memory for different GPUs
// e.g. 48KB on P100
#define TOTAL_SLM 48*1024
// Adjust the thread block size of the block matching kernel for different GPUs.
// The maximum thread block size is 32 * MAX_NUM_WARPS
#define MAX_NUM_WARPS 16u
using namespace cimg_library;
int main(int argc, char** argv)
{
if( argc < 4 )
{
std::cerr << "Usage: " << argv[0]
<< " NosiyImage DenoisedImage sigma [color] [ReferenceImage]\n"
<< " color - color image denoising (experimental only)\n"
<< " ReferenceImage - if provided, computes and prints PSNR "
<< "between the reference image and denoised image\n";
return 1;
}
//Store a noisy image
CImg<unsigned char> image(argv[1]);
float sigma = strtof(argv[3], NULL);
unsigned int channels = 1;
if (argc >= 5 && strcmp(argv[4],"color") == 0) channels = 3;
std::cout << "Sigma = " << sigma << std::endl;
if (channels > 1)
std::cout << "Color denoising: yes" << std::endl;
else
std::cout << "Color denoising: no" << std::endl;
std::vector<unsigned int> sigma2(channels);
sigma2[0] = 25 * 25;
//Convert color image to YCbCr color space
if (channels == 3)
{
image = image.get_channels(0, 2).RGBtoYCbCr();
//Convert the sigma^2 variance to the YCbCr color space
long s = sigma * sigma;
sigma2[0] = ((66l*66l*s + 129l*129l*s + 25l*25l*s) / (256l*256l));
sigma2[1] = ((38l*38l*s + 74l*74l*s + 112l*112l*s) / (256l*256l));
sigma2[2] = ((112l*112l*s + 94l*94l*s + 18l*18l*s) / (256l*256l));
}
std::cout << "Noise variance for individual channels (YCrCb if color): ";
for (unsigned int k = 0; k < sigma2.size(); k++)
std::cout << sigma2[k] << " ";
std::cout << std::endl;
// Check for invalid input
if(! image.data() )
{
std::cerr << "Could not open or find the image" << std::endl;
return 1;
}
std::cout << "Image width: " << image.width() << " height: " << image.height() << std::endl;
//Store a denoised image
CImg<unsigned char> dst_image(image.width(), image.height(), 1, channels, 0);
// Vector of image channels
std::vector<uchar*> d_noisy_image;
std::vector<uchar*> d_denoised_image;
//Numerator and denominator used for aggregation
std::vector<float*> d_numerator;
std::vector<float*> d_denominator;
ushort* d_stacks; //Addresses of similar patches to each reference patch of a batch
uint* d_num_patches_in_stack; //Number of similar patches for each referenca patch of a batch that are stored in d_stacks
float* d_gathered_stacks; //3D groups of a batch
float* d_w_P; //Weights for aggregation
float* d_kaiser_window; //Kaiser window used for aggregation
uint2 h_batch_size = make_uint2(256, 128); //h_batch_size.x has to be divisible by properties.warpSize
//Denoising parameters and their shorthands
Params h_hard_params(19, 8, 16, 2500, 3, 2.7f);
const uint k = h_hard_params.k;
const uint N = h_hard_params.N;
const uint p = h_hard_params.p;
//Reserved sizes
const int width = image.width();
const int height = image.height();
size_t image_size = width * height;
d_noisy_image.resize(channels);
d_denoised_image.resize(channels);
d_numerator.resize(channels);
d_denominator.resize(channels);
for(auto & it : d_noisy_image)
cuda_error_check( cudaMalloc((void**)&it, sizeof(uchar) * image_size) );
for(auto & it : d_denoised_image)
cuda_error_check( cudaMalloc((void**)&it, sizeof(uchar) * image_size) );
for(auto & it : d_numerator)
cuda_error_check( cudaMalloc((void**)&it, sizeof(float) * image_size) );
for(auto & it : d_denominator)
cuda_error_check( cudaMalloc((void**)&it, sizeof(float) * image_size) );
cuda_error_check( cudaMalloc((void**)&d_stacks,
sizeof(ushort) * h_batch_size.x * h_batch_size.y * N) );
cuda_error_check( cudaMalloc((void**)&d_num_patches_in_stack,
sizeof(uint) * h_batch_size.x * h_batch_size.y ) );
cuda_error_check( cudaMalloc((void**)&d_gathered_stacks,
sizeof(float) * (N+1) * k * k * h_batch_size.x * h_batch_size.y) );
cuda_error_check( cudaMalloc((void**)&d_w_P,
sizeof(float) * h_batch_size.x * h_batch_size.y) );
cuda_error_check( cudaMalloc((void**)&d_kaiser_window,
sizeof(float) * k * k) );
//image dimensions
const uint2 image_dim = make_uint2(width, height);
//dimensions limiting addresses of reference patches
const uint2 stacks_dim = make_uint2(width - (k - 1), height - (k - 1));
int paramN1 = N + 1; //maximal size of a stack with a reference patch
const uint p_block_width = (warpSize-1) * p + k;
const uint s_image_p_size = p_block_width * k * sizeof(uchar);
const uint shared_mem_available = TOTAL_SLM - s_image_p_size;
//Block-matching shared memory sizes per warp
const uint s_diff_size = p_block_width * sizeof(uint);
const uint s_patches_in_stack_size = warpSize * sizeof(uchar);
const uint s_patch_stacks_size = N * warpSize * sizeof(uint);
const uint num_warps = std::min(shared_mem_available /
(s_diff_size + s_patches_in_stack_size + s_patch_stacks_size), MAX_NUM_WARPS);
uint lmem_size_bm = ((s_diff_size + s_patches_in_stack_size + s_patch_stacks_size) * num_warps) +
s_image_p_size;
//Determine launch parameteres for the block match kernel
dim3 num_threads_bm = dim3(warpSize*num_warps, 1);
dim3 num_blocks_bm = dim3(h_batch_size.x / warpSize, h_batch_size.y);
//Determine launch parameteres for the get and aggregate kernels
const dim3 num_threads(k, k);
const dim3 num_blocks(h_batch_size.x, h_batch_size.y);
//Determine launch parameteres for the DCT kernel
const uint trans_size = k*k*paramN1*h_batch_size.x*h_batch_size.y;
const dim3 num_blocks_tr((trans_size + (KER2_BLOCK_WIDTH*k) - 1) / (KER2_BLOCK_WIDTH*k), 1, 1);
const dim3 num_threads_tr(k, KER2_BLOCK_WIDTH/k, 1);
const uint s_size_t = k*k*(paramN1+1)*sizeof(float); //+1 for avoinding bank conflicts
//Determine launch parameteres for final division kernel
const dim3 num_threads_f(64, 4);
const dim3 num_blocks_f((width + 63)/64, (height + 3)/4);
//Create an kaiser window (only for k = 8, alpha = 2.0) and copy it to the device.
std::vector<float> kaiserWindow(k*k);
if (k == 8) {
// First quarter of the matrix
kaiserWindow[0 + k * 0] = 0.1924f;
kaiserWindow[0 + k * 1] = 0.2989f;
kaiserWindow[0 + k * 2] = 0.3846f;
kaiserWindow[0 + k * 3] = 0.4325f;
kaiserWindow[1 + k * 0] = 0.2989f;
kaiserWindow[1 + k * 1] = 0.4642f;
kaiserWindow[1 + k * 2] = 0.5974f;
kaiserWindow[1 + k * 3] = 0.6717f;
kaiserWindow[2 + k * 0] = 0.3846f;
kaiserWindow[2 + k * 1] = 0.5974f;
kaiserWindow[2 + k * 2] = 0.7688f;
kaiserWindow[2 + k * 3] = 0.8644f;
kaiserWindow[3 + k * 0] = 0.4325f;
kaiserWindow[3 + k * 1] = 0.6717f;
kaiserWindow[3 + k * 2] = 0.8644f;
kaiserWindow[3 + k * 3] = 0.9718f;
// Fill the rest of the matrix by symmetry
for(unsigned i = 0; i < k / 2; i++)
for (unsigned j = k / 2; j < k; j++)
kaiserWindow[i + k * j] = kaiserWindow[i + k * (k - j - 1)];
for (unsigned i = k / 2; i < k; i++)
for (unsigned j = 0; j < k; j++)
kaiserWindow[i + k * j] = kaiserWindow[k - i - 1 + k * j];
}
else
for (unsigned i = 0; i < k * k; i++)
kaiserWindow[i] = 1.0f;
// Copy images to device
for(uint i = 0; i < channels; ++i)
cuda_error_check( cudaMemcpy(d_noisy_image[i],
image.data()+i*image_size,image_size*sizeof(uchar), cudaMemcpyHostToDevice));
cuda_error_check( cudaMemcpy(d_kaiser_window, &kaiserWindow[0],
k*k*sizeof(float), cudaMemcpyHostToDevice));
// start measuring the total time
auto start = std::chrono::high_resolution_clock::now();
// repeat the execution of kernels
for (int n = 0; n < REPEAT; n++) {
for(auto & it : d_numerator)
cuda_error_check( cudaMemset(it, 0, image_size * sizeof(float)) );
for(auto & it : d_denominator)
cuda_error_check( cudaMemset(it, 0, image_size * sizeof(float)) );
//Batch processing: in each iteration only the batch_size reference patches are processed.
uint2 start_point;
for(start_point.y = 0; start_point.y < stacks_dim.y + p - 1;
start_point.y += (h_batch_size.y*p))
{
for(start_point.x = 0; start_point.x < stacks_dim.x + p - 1;
start_point.x += (h_batch_size.x*p))
{
//Finds similar patches for each reference patch of a batch and stores them in d_stacks array
run_block_matching(
d_noisy_image[0], // IN: Image
d_stacks, // OUT: Array of adresses of similar patches
d_num_patches_in_stack,// OUT: Array containing numbers of these addresses
image_dim, // IN: Image dimensions
stacks_dim, // IN: Dimensions limiting addresses of reference patches
h_hard_params, // IN: Denoising parameters
start_point, // IN: Address of the top-left reference patch of a batch
num_threads_bm, // Threads in block
num_blocks_bm, // Blocks in grid
lmem_size_bm // Shared memory size
);
//cuda_error_check( cudaGetLastError() );
//cuda_error_check( cudaDeviceSynchronize() );
for (uint channel = 0; channel < channels; ++channel)
{
//Assembles 3D groups of a batch according to the d_stacks array
run_get_block(
start_point, // IN: First reference patch of a batch
d_noisy_image[channel], // IN: Image
d_stacks, // IN: Array of adresses of similar patches
d_num_patches_in_stack, // IN: Numbers of patches in 3D groups
d_gathered_stacks, // OUT: Assembled 3D groups
image_dim, // IN: Image dimensions
stacks_dim, // IN: Dimensions limiting addresses of reference patches
h_hard_params, // IN: Denoising parameters
num_threads, // Threads in block
num_blocks // Blocks in grid
);
//cuda_error_check( cudaGetLastError() );
//cuda_error_check( cudaDeviceSynchronize() );
//Apply the 2D DCT transform to each layer of 3D group
run_DCT2D8x8(d_gathered_stacks, d_gathered_stacks, trans_size, num_threads_tr, num_blocks_tr);
cuda_error_check( cudaGetLastError() );
cuda_error_check( cudaDeviceSynchronize() );
// 1) 1D Walsh-Hadamard transform of proper size on the 3rd dimension of each
// 3D group of a batch to complete the 3D transform.
// 2) Hard thresholding
// 3) Inverse 1D Walsh-Hadamard trannsform.
// 4) Compute the weingt of each 3D group
run_hard_treshold_block(
start_point, // IN: First reference patch of a batch
d_gathered_stacks, // IN/OUT: 3D groups with transfomed patches
d_w_P, // OUT: Weight of each 3D group
d_num_patches_in_stack,// IN: Numbers of patches in 3D groups
stacks_dim, // IN: Dimensions limiting addresses of reference patches
h_hard_params, // IN: Denoising parameters
sigma2[channel], // IN: sigma
num_threads, // Threads in block
num_blocks, // Blocks in grid
s_size_t // Shared memory size
);
//cuda_error_check( cudaGetLastError() );
//cuda_error_check( cudaDeviceSynchronize() );
//Apply inverse 2D DCT transform to each layer of 3D group
run_IDCT2D8x8(d_gathered_stacks, d_gathered_stacks, trans_size, num_threads_tr, num_blocks_tr);
//cuda_error_check( cudaGetLastError() );
//cuda_error_check( cudaDeviceSynchronize() );
//Aggregates filtered patches of all 3D groups of a batch into numerator and denominator buffers
run_aggregate_block(
start_point, // IN: First reference patch of a batch
d_gathered_stacks, // IN: 3D groups with transfomed patches
d_w_P, // IN: Numbers of non zero coeficients after 3D thresholding
d_stacks, // IN: Array of adresses of similar patches
d_kaiser_window, // IN: Kaiser window
d_numerator[channel], // IN/OUT: Numerator aggregation buffer
d_denominator[channel],// IN/OUT: Denominator aggregation buffer
d_num_patches_in_stack,// IN: Numbers of patches in 3D groups
image_dim, // IN: Image dimensions
stacks_dim, // IN: Dimensions limiting addresses of reference patches
h_hard_params, // IN: Denoising parameters
num_threads, // Threads in block
num_blocks // Blocks in grid
);
//cuda_error_check( cudaGetLastError() );
//cuda_error_check( cudaDeviceSynchronize() );
}
}
}
//Divide numerator by denominator and save the result in output image
for (uint channel = 0; channel < channels; ++channel)
{
run_aggregate_final(
d_numerator[channel], // IN: Numerator aggregation buffer
d_denominator[channel], // IN: Denominator aggregation buffer
image_dim, // IN: Image dimensions
d_denoised_image[channel], // OUT: Image estimate
num_threads_f, // Threads in block
num_blocks_f // Blocks in grid
);
//cuda_error_check( cudaGetLastError() );
//cuda_error_check( cudaDeviceSynchronize() );
cuda_error_check( cudaMemcpy(
dst_image.data()+channel*image_size,
d_denoised_image[channel],
image_size*sizeof(uchar),
cudaMemcpyDeviceToHost) );
}
} // REPEAT
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed_seconds = end - start;
double gpuTime = (double)elapsed_seconds.count();
std::cout << "Total time (s):" << gpuTime << std::endl;
if (channels == 3)
dst_image = dst_image.get_channels(0,2).YCbCrtoRGB();
else
dst_image = dst_image.get_channel(0);
//Save denoised image
dst_image.save( argv[2] );
if (argc >= 6) {
CImg<unsigned char> reference_image(argv[5]);
std::cout << "PSNR:" << reference_image.PSNR(dst_image) << std::endl;
}
cuda_error_check( cudaFree(d_stacks) );
cuda_error_check( cudaFree(d_num_patches_in_stack) );
cuda_error_check( cudaFree(d_gathered_stacks) );
cuda_error_check( cudaFree(d_w_P) );
cuda_error_check( cudaFree(d_kaiser_window) );
for (auto & it : d_noisy_image)
cuda_error_check( cudaFree(it) );
d_noisy_image.clear();
for (auto & it : d_denoised_image)
cuda_error_check( cudaFree(it) );
d_denoised_image.clear();
for(auto & it : d_numerator)
cuda_error_check( cudaFree(it) );
d_numerator.clear();
for(auto & it : d_denominator)
cuda_error_check( cudaFree(it) );
d_denominator.clear();
return 0;
}
|
the_stack
|
#include "luaT.h"
#include "THC.h"
#include <lua.h>
#include "THCGeneral.h"
#define CAFFE_CUDA_NUM_THREADS 1024
// CUDA: various checks for different function calls.
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if(error != cudaSuccess) { printf("CUDA ERROR. %s\n", cudaGetErrorString(error)); }; \
} while (0)
// CUDA: number of blocks for threads.
inline int CAFFE_GET_BLOCKS(const int N) {
return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
}
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// CUDA: check for error after kernel execution and exit loudly if there is one.
#define CUDA_POST_KERNEL_CHECK CUDA_CHECK(cudaPeekAtLastError())
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
// For each ROI R = [batch_index, x_outer_1, y_outer_1, x_outer_2, y_outer_2, x_inner_1, y_inner_1, x_inner_2, y_inner_2]:
// where R_outer = [x_outer_1, y_outer_1, x_outer_2, y_outer_2] is the outer rectangle of the region and
// R_inner = [x_inner_1, y_inner_1, x_inner_2, y_inner_2] is the inner rectangle of the region
// max pooler over R by ignoring (setting to zero) the activations that lay inside the inner rectangle R_inner
bottom_rois += n * 9;
int roi_batch_ind = bottom_rois[0];
// outer rectangle of the region
int roi_start_w = int(bottom_rois[1] );//* spatial_scale);
int roi_start_h = int(bottom_rois[2] );//* spatial_scale);
int roi_end_w = int(bottom_rois[3] );//* spatial_scale);
int roi_end_h = int(bottom_rois[4] );//* spatial_scale);
// inner rectangle of the region
int roi_start_w_in = int(bottom_rois[5]);//* spatial_scale);
int roi_start_h_in = int(bottom_rois[6]);//* spatial_scale);
int roi_end_w_in = int(bottom_rois[7]);//* spatial_scale);
int roi_end_h_in = int(bottom_rois[8]);//* spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width);
const int hstart = min(height, max(0, static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h)) + roi_start_h));
const int hend = min(height, max(0, static_cast<int>(ceil( static_cast<Dtype>(ph+1) * bin_size_h)) + roi_start_h));
const int wstart = min(width, max(0, static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w)) + roi_start_w));
const int wend = min(width, max(0, static_cast<int>(ceil( static_cast<Dtype>(pw+1) * bin_size_w)) + roi_start_w));
Dtype maxval = 0;
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (!(w > roi_start_w_in && w < roi_end_w_in && h > roi_start_h_in && h < roi_end_h_in)) {
// if it is not inside the inner rectangle of the region
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 9;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
// outer rectangle of the region
int roi_start_w = int(offset_bottom_rois[1]);// * spatial_scale);
int roi_start_h = int(offset_bottom_rois[2]);// * spatial_scale);
int roi_end_w = int(offset_bottom_rois[3]);// * spatial_scale);
int roi_end_h = int(offset_bottom_rois[4]);// * spatial_scale);
// inner rectangle of the region
int roi_start_w_in= int(offset_bottom_rois[5]);// * spatial_scale);
int roi_start_h_in= int(offset_bottom_rois[6]);// * spatial_scale);
int roi_end_w_in = int(offset_bottom_rois[7]);// * spatial_scale);
int roi_end_h_in = int(offset_bottom_rois[8]);// * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h) &&
!(w > roi_start_w_in && w < roi_end_w_in &&
h > roi_start_h_in && h < roi_end_h_in);
if (!in_roi) {
continue;
}
int top_offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + top_offset;
const int* offset_argmax_data = argmax_data + top_offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
THCState* getCutorchState(lua_State* L)
{
lua_getglobal(L, "cutorch");
lua_getfield(L, -1, "getState");
lua_call(L, 0, 1);
THCState *state = (THCState*) lua_touserdata(L, -1);
lua_pop(L, 2);
return state;
}
static int updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *rois = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THCudaIntTensor *argmax = (THCudaIntTensor *)luaT_getfieldcheckudata(L, 1, "argmax", "torch.CudaIntTensor");
int pooled_height_ = luaT_getfieldcheckint(L, 1, "pooled_height");
int pooled_width_ = luaT_getfieldcheckint(L, 1, "pooled_width");
THCudaTensor_resize5d(state, output, THCudaTensor_size(state, rois, 0), THCudaTensor_size(state, rois, 1), THCudaTensor_size(state, input, 1), pooled_height_, pooled_width_);
THCudaIntTensor_resize5d(state, argmax, THCudaTensor_size(state, rois, 0), THCudaTensor_size(state, rois, 1), THCudaTensor_size(state, input, 1), pooled_height_, pooled_width_);
const float* bottom_data = THCudaTensor_data(state, input);
const float* bottom_rois = THCudaTensor_data(state, rois);
float* top_data = THCudaTensor_data(state, output);
int* argmax_data = THCudaIntTensor_data(state, argmax); // int -> float
// TODO: BATCH
// BDHW 1DHW
int count = THCudaTensor_nElement(state, output); // top[0]->count();
int channels_ = THCudaTensor_size(state, input, 1);
int height_ = THCudaTensor_size(state, input, 2);
int width_ = THCudaTensor_size(state, input, 3);
float spatial_scale_ = luaT_getfieldchecknumber(L, 1, "spatial_scale");
CUDA_POST_KERNEL_CHECK;
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolForward<float><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_,
pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
return 1;
}
static int updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *rois = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaIntTensor *argmax = (THCudaIntTensor *)luaT_getfieldcheckudata(L, 1, "argmax", "torch.CudaIntTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 4, "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
const float* bottom_rois = THCudaTensor_data(state, rois);
const float* top_diff = THCudaTensor_data(state, gradOutput);
float* bottom_diff = THCudaTensor_data(state, gradInput);
int* argmax_data = THCudaIntTensor_data(state, argmax);
const int count = THCudaTensor_nElement(state, gradInput);
int channels_ = THCudaTensor_size(state, input, 1);
int height_ = THCudaTensor_size(state, input, 2);
int width_ = THCudaTensor_size(state, input, 3);
int pooled_height_ = luaT_getfieldcheckint(L, 1, "pooled_height");
int pooled_width_ = luaT_getfieldcheckint(L, 1, "pooled_width");
float spatial_scale_ = luaT_getfieldchecknumber(L, 1, "spatial_scale");
int num_rois = THCudaTensor_size(state, rois, 0) * THCudaTensor_size(state, rois, 1); // bachSize x numRoisPerImage
// NOLINT_NEXT_LINE(whitespace/operators)
CUDA_POST_KERNEL_CHECK;
ROIPoolBackward<float><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_data, num_rois, spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
return 1;
}
static const struct luaL_Reg lua_registrations [] = {
{"updateOutput", updateOutput},
{"updateGradInput", updateGradInput},
{NULL, NULL}
};
LUA_EXTERNC DLL_EXPORT int luaopen_libcucontextlocnet(lua_State *L)
{
lua_newtable(L);
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, lua_registrations, "contextlocnet");
lua_pop(L,1);
return 1;
}
|
the_stack
|
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_device
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
using Eigen::RowMajor;
// Context for evaluation on cpu
struct CPUContext {
CPUContext(const Eigen::Tensor<float, 3>& in1, Eigen::Tensor<float, 3>& in2, Eigen::Tensor<float, 3>& out) : in1_(in1), in2_(in2), out_(out), kernel_1d_(2), kernel_2d_(2,2), kernel_3d_(2,2,2) {
kernel_1d_(0) = 3.14f;
kernel_1d_(1) = 2.7f;
kernel_2d_(0,0) = 3.14f;
kernel_2d_(1,0) = 2.7f;
kernel_2d_(0,1) = 0.2f;
kernel_2d_(1,1) = 7.0f;
kernel_3d_(0,0,0) = 3.14f;
kernel_3d_(0,1,0) = 2.7f;
kernel_3d_(0,0,1) = 0.2f;
kernel_3d_(0,1,1) = 7.0f;
kernel_3d_(1,0,0) = -1.0f;
kernel_3d_(1,1,0) = -0.3f;
kernel_3d_(1,0,1) = -0.7f;
kernel_3d_(1,1,1) = -0.5f;
}
const Eigen::DefaultDevice& device() const { return cpu_device_; }
const Eigen::Tensor<float, 3>& in1() const { return in1_; }
const Eigen::Tensor<float, 3>& in2() const { return in2_; }
Eigen::Tensor<float, 3>& out() { return out_; }
const Eigen::Tensor<float, 1>& kernel1d() const { return kernel_1d_; }
const Eigen::Tensor<float, 2>& kernel2d() const { return kernel_2d_; }
const Eigen::Tensor<float, 3>& kernel3d() const { return kernel_3d_; }
private:
const Eigen::Tensor<float, 3>& in1_;
const Eigen::Tensor<float, 3>& in2_;
Eigen::Tensor<float, 3>& out_;
Eigen::Tensor<float, 1> kernel_1d_;
Eigen::Tensor<float, 2> kernel_2d_;
Eigen::Tensor<float, 3> kernel_3d_;
Eigen::DefaultDevice cpu_device_;
};
// Context for evaluation on GPU
struct GPUContext {
GPUContext(const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1, Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2, Eigen::TensorMap<Eigen::Tensor<float, 3> >& out) : in1_(in1), in2_(in2), out_(out), gpu_device_(&stream_) {
assert(cudaMalloc((void**)(&kernel_1d_), 2*sizeof(float)) == cudaSuccess);
float kernel_1d_val[] = {3.14f, 2.7f};
assert(cudaMemcpy(kernel_1d_, kernel_1d_val, 2*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess);
assert(cudaMalloc((void**)(&kernel_2d_), 4*sizeof(float)) == cudaSuccess);
float kernel_2d_val[] = {3.14f, 2.7f, 0.2f, 7.0f};
assert(cudaMemcpy(kernel_2d_, kernel_2d_val, 4*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess);
assert(cudaMalloc((void**)(&kernel_3d_), 8*sizeof(float)) == cudaSuccess);
float kernel_3d_val[] = {3.14f, -1.0f, 2.7f, -0.3f, 0.2f, -0.7f, 7.0f, -0.5f};
assert(cudaMemcpy(kernel_3d_, kernel_3d_val, 8*sizeof(float), cudaMemcpyHostToDevice) == cudaSuccess);
}
~GPUContext() {
assert(cudaFree(kernel_1d_) == cudaSuccess);
assert(cudaFree(kernel_2d_) == cudaSuccess);
assert(cudaFree(kernel_3d_) == cudaSuccess);
}
const Eigen::GpuDevice& device() const { return gpu_device_; }
const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1() const { return in1_; }
const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2() const { return in2_; }
Eigen::TensorMap<Eigen::Tensor<float, 3> >& out() { return out_; }
Eigen::TensorMap<Eigen::Tensor<float, 1> > kernel1d() const { return Eigen::TensorMap<Eigen::Tensor<float, 1> >(kernel_1d_, 2); }
Eigen::TensorMap<Eigen::Tensor<float, 2> > kernel2d() const { return Eigen::TensorMap<Eigen::Tensor<float, 2> >(kernel_2d_, 2, 2); }
Eigen::TensorMap<Eigen::Tensor<float, 3> > kernel3d() const { return Eigen::TensorMap<Eigen::Tensor<float, 3> >(kernel_3d_, 2, 2, 2); }
private:
const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in1_;
const Eigen::TensorMap<Eigen::Tensor<float, 3> >& in2_;
Eigen::TensorMap<Eigen::Tensor<float, 3> >& out_;
float* kernel_1d_;
float* kernel_2d_;
float* kernel_3d_;
Eigen::CudaStreamDevice stream_;
Eigen::GpuDevice gpu_device_;
};
// The actual expression to evaluate
template <typename Context>
void test_contextual_eval(Context* context)
{
context->out().device(context->device()) = context->in1() + context->in2() * 3.14f + context->in1().constant(2.718f);
}
template <typename Context>
void test_forced_contextual_eval(Context* context)
{
context->out().device(context->device()) = (context->in1() + context->in2()).eval() * 3.14f + context->in1().constant(2.718f);
}
template <typename Context>
void test_compound_assignment(Context* context)
{
context->out().device(context->device()) = context->in1().constant(2.718f);
context->out().device(context->device()) += context->in1() + context->in2() * 3.14f;
}
template <typename Context>
void test_contraction(Context* context)
{
Eigen::array<std::pair<int, int>, 2> dims;
dims[0] = std::make_pair(1, 1);
dims[1] = std::make_pair(2, 2);
Eigen::array<int, 2> shape(40, 50*70);
Eigen::DSizes<int, 2> indices(0,0);
Eigen::DSizes<int, 2> sizes(40,40);
context->out().reshape(shape).slice(indices, sizes).device(context->device()) = context->in1().contract(context->in2(), dims);
}
template <typename Context>
void test_1d_convolution(Context* context)
{
Eigen::DSizes<int, 3> indices(0,0,0);
Eigen::DSizes<int, 3> sizes(40,49,70);
Eigen::array<int, 1> dims(1);
context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel1d(), dims);
}
template <typename Context>
void test_2d_convolution(Context* context)
{
Eigen::DSizes<int, 3> indices(0,0,0);
Eigen::DSizes<int, 3> sizes(40,49,69);
Eigen::array<int, 2> dims(1,2);
context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel2d(), dims);
}
template <typename Context>
void test_3d_convolution(Context* context)
{
Eigen::DSizes<int, 3> indices(0,0,0);
Eigen::DSizes<int, 3> sizes(39,49,69);
Eigen::array<int, 3> dims(0,1,2);
context->out().slice(indices, sizes).device(context->device()) = context->in1().convolve(context->kernel3d(), dims);
}
void test_cpu() {
Eigen::Tensor<float, 3> in1(40,50,70);
Eigen::Tensor<float, 3> in2(40,50,70);
Eigen::Tensor<float, 3> out(40,50,70);
in1 = in1.random() + in1.constant(10.0f);
in2 = in2.random() + in2.constant(10.0f);
CPUContext context(in1, in2, out);
test_contextual_eval(&context);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 50; ++j) {
for (int k = 0; k < 70; ++k) {
VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f);
}
}
}
test_forced_contextual_eval(&context);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 50; ++j) {
for (int k = 0; k < 70; ++k) {
VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) + in2(i,j,k)) * 3.14f + 2.718f);
}
}
}
test_compound_assignment(&context);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 50; ++j) {
for (int k = 0; k < 70; ++k) {
VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f);
}
}
}
test_contraction(&context);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 40; ++j) {
const float result = out(i,j,0);
float expected = 0;
for (int k = 0; k < 50; ++k) {
for (int l = 0; l < 70; ++l) {
expected += in1(i, k, l) * in2(j, k, l);
}
}
VERIFY_IS_APPROX(expected, result);
}
}
test_1d_convolution(&context);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 49; ++j) {
for (int k = 0; k < 70; ++k) {
VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f));
}
}
}
test_2d_convolution(&context);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 49; ++j) {
for (int k = 0; k < 69; ++k) {
const float result = out(i,j,k);
const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f) +
(in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f);
if (fabs(expected) < 1e-4f && fabs(result) < 1e-4f) {
continue;
}
VERIFY_IS_APPROX(expected, result);
}
}
}
test_3d_convolution(&context);
for (int i = 0; i < 39; ++i) {
for (int j = 0; j < 49; ++j) {
for (int k = 0; k < 69; ++k) {
const float result = out(i,j,k);
const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f +
in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f) +
(in1(i+1,j,k) * -1.0f + in1(i+1,j+1,k) * -0.3f +
in1(i+1,j,k+1) * -0.7f + in1(i+1,j+1,k+1) * -0.5f);
if (fabs(expected) < 1e-4f && fabs(result) < 1e-4f) {
continue;
}
VERIFY_IS_APPROX(expected, result);
}
}
}
}
void test_gpu() {
Eigen::Tensor<float, 3> in1(40,50,70);
Eigen::Tensor<float, 3> in2(40,50,70);
Eigen::Tensor<float, 3> out(40,50,70);
in1 = in1.random() + in1.constant(10.0f);
in2 = in2.random() + in2.constant(10.0f);
std::size_t in1_bytes = in1.size() * sizeof(float);
std::size_t in2_bytes = in2.size() * sizeof(float);
std::size_t out_bytes = out.size() * sizeof(float);
float* d_in1;
float* d_in2;
float* d_out;
cudaMalloc((void**)(&d_in1), in1_bytes);
cudaMalloc((void**)(&d_in2), in2_bytes);
cudaMalloc((void**)(&d_out), out_bytes);
cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, in2.data(), in2_bytes, cudaMemcpyHostToDevice);
Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in1(d_in1, 40,50,70);
Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in2(d_in2, 40,50,70);
Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_out(d_out, 40,50,70);
GPUContext context(gpu_in1, gpu_in2, gpu_out);
test_contextual_eval(&context);
assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 50; ++j) {
for (int k = 0; k < 70; ++k) {
VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f);
}
}
}
test_forced_contextual_eval(&context);
assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 50; ++j) {
for (int k = 0; k < 70; ++k) {
VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) + in2(i,j,k)) * 3.14f + 2.718f);
}
}
}
test_compound_assignment(&context);
assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 50; ++j) {
for (int k = 0; k < 70; ++k) {
VERIFY_IS_APPROX(out(i,j,k), in1(i,j,k) + in2(i,j,k) * 3.14f + 2.718f);
}
}
}
test_contraction(&context);
assert(cudaMemcpy(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost) == cudaSuccess);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 40; ++j) {
const float result = out(i,j,0);
float expected = 0;
for (int k = 0; k < 50; ++k) {
for (int l = 0; l < 70; ++l) {
expected += in1(i, k, l) * in2(j, k, l);
}
}
VERIFY_IS_APPROX(expected, result);
}
}
test_1d_convolution(&context);
assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, context.device().stream()) == cudaSuccess);
assert(cudaStreamSynchronize(context.device().stream()) == cudaSuccess);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 49; ++j) {
for (int k = 0; k < 70; ++k) {
VERIFY_IS_APPROX(out(i,j,k), (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f));
}
}
}
test_2d_convolution(&context);
assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, context.device().stream()) == cudaSuccess);
assert(cudaStreamSynchronize(context.device().stream()) == cudaSuccess);
for (int i = 0; i < 40; ++i) {
for (int j = 0; j < 49; ++j) {
for (int k = 0; k < 69; ++k) {
const float result = out(i,j,k);
const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f +
in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f);
VERIFY_IS_APPROX(expected, result);
}
}
}
test_3d_convolution(&context);
assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, context.device().stream()) == cudaSuccess);
assert(cudaStreamSynchronize(context.device().stream()) == cudaSuccess);
for (int i = 0; i < 39; ++i) {
for (int j = 0; j < 49; ++j) {
for (int k = 0; k < 69; ++k) {
const float result = out(i,j,k);
const float expected = (in1(i,j,k) * 3.14f + in1(i,j+1,k) * 2.7f +
in1(i,j,k+1) * 0.2f + in1(i,j+1,k+1) * 7.0f +
in1(i+1,j,k) * -1.0f + in1(i+1,j+1,k) * -0.3f +
in1(i+1,j,k+1) * -0.7f + in1(i+1,j+1,k+1) * -0.5f);
VERIFY_IS_APPROX(expected, result);
}
}
}
}
void test_cxx11_tensor_device()
{
CALL_SUBTEST_1(test_cpu());
CALL_SUBTEST_2(test_gpu());
}
|
the_stack
|
__device__
float4 firstEigenVector( float* matrix )
{
// 8 iterations seems to be more than enough.
float4 v = make_float4(1.0f, 1.0f, 1.0f, 0.0f);
#pragma unroll
for(int i = 0; i < 8; i++) {
float x = v.x * matrix[0] + v.y * matrix[1] + v.z * matrix[2];
float y = v.x * matrix[1] + v.y * matrix[3] + v.z * matrix[4];
float z = v.x * matrix[2] + v.y * matrix[4] + v.z * matrix[5];
float m = fmaxf(fmaxf(x, y), z);
float iv = 1.0f / m;
v.x = x * iv;
v.y = y * iv;
v.z = z * iv;
}
return v;
}
__device__
void colorSums( const float4 * colors, float4 * sums)
{
const int idx = threadIdx.x;
sums[idx] = colors[idx];
sums[idx] += sums[idx^8];
sums[idx] += sums[idx^4];
sums[idx] += sums[idx^2];
sums[idx] += sums[idx^1];
}
__device__
float4 bestFitLine( const float4 * colors, float4 color_sum, float* covariance)
{
// Compute covariance matrix of the given colors.
const int idx = threadIdx.x;
float4 diff = colors[idx] - color_sum * make_float4(0.0625f, 0.0625f, 0.0625f, 0.0625f); // * 1.0f / 16.0f
covariance[6 * idx + 0] = diff.x * diff.x; // 0, 6, 12, 2, 8, 14, 4, 10, 0
covariance[6 * idx + 1] = diff.x * diff.y;
covariance[6 * idx + 2] = diff.x * diff.z;
covariance[6 * idx + 3] = diff.y * diff.y;
covariance[6 * idx + 4] = diff.y * diff.z;
covariance[6 * idx + 5] = diff.z * diff.z;
#pragma unroll
for(int d = 8; d > 0; d >>= 1)
{
if (idx < d)
{
covariance[6 * idx + 0] += covariance[6 * (idx+d) + 0];
covariance[6 * idx + 1] += covariance[6 * (idx+d) + 1];
covariance[6 * idx + 2] += covariance[6 * (idx+d) + 2];
covariance[6 * idx + 3] += covariance[6 * (idx+d) + 3];
covariance[6 * idx + 4] += covariance[6 * (idx+d) + 4];
covariance[6 * idx + 5] += covariance[6 * (idx+d) + 5];
}
}
// Compute first eigen vector.
return firstEigenVector(covariance);
}
// ////////////////////////////////////////////////////////////////////////////////
// // Sort colors
// ////////////////////////////////////////////////////////////////////////////////
__device__
void sortColors( const float * values, int * ranks)
{
const int tid = threadIdx.x;
int rank = 0;
#pragma unroll
for (int i = 0; i < 16; i++)
{
rank += (values[i] < values[tid]);
}
ranks[tid] = rank;
// Resolve elements with the same index.
#pragma unroll
for (int i = 0; i < 15; i++)
{
if (tid > i && ranks[tid] == ranks[i]) ++ranks[tid];
}
}
////////////////////////////////////////////////////////////////////////////////
// Load color block to shared mem
////////////////////////////////////////////////////////////////////////////////
__device__
void loadColorBlock( const uint * image, float4 * colors, float4 * sums, int * xrefs, float* temp, int groupOffset)
{
const int bid = blockIdx.x + groupOffset;
const int idx = threadIdx.x;
float4 tmp;
if (idx < 16)
{
// Read color and copy to shared mem.
uint c = image[(bid) * 16 + idx];
colors[idx].x = ((c >> 0) & 0xFF) * 0.003921568627f; // * (1.0f / 255.0f);
colors[idx].y = ((c >> 8) & 0xFF) * 0.003921568627f; // * (1.0f / 255.0f);
colors[idx].z = ((c >> 16) & 0xFF) * 0.003921568627f; //* (1.0f / 255.0f);
// No need to synchronize, 16 < warp size.
// Sort colors along the best fit line.
colorSums(colors, sums);
float4 axis = bestFitLine(colors, sums[idx], temp);
temp[idx] = colors[idx].x * axis.x + colors[idx].y * axis.y + colors[idx].z * axis.z;
sortColors(temp, xrefs);
tmp = colors[idx];
colors[xrefs[idx]] = tmp;
}
}
// ////////////////////////////////////////////////////////////////////////////////
// // Round color to RGB565 and expand
// ////////////////////////////////////////////////////////////////////////////////
__device__
float4 roundAndExpand(float4 v, ushort * w)
{
ushort x = rint(__saturatef(v.x) * 31.0f);
ushort y = rint(__saturatef(v.y) * 63.0f);
ushort z = rint(__saturatef(v.z) * 31.0f);
*w = ((x << 11) | (y << 5) | z);
v.x = x * 0.03227752766457f; // approximate integer bit expansion.
v.y = y * 0.01583151765563f;
v.z = z * 0.03227752766457f;
return v;
}
////////////////////////////////////////////////////////////////////////////////
// Evaluate permutations
////////////////////////////////////////////////////////////////////////////////
__device__
float evalPermutation( const float4* colors, uint permutation, ushort* start, ushort* end, float4 color_sum,
float* alphaTable4, int* prods4, float weight)
{
float4 alphax_sum = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
int akku = 0;
// Compute alpha & beta for this permutation.
#pragma unroll
for (int i = 0; i < 16; i++)
{
const uint bits = permutation >> (2*i);
alphax_sum += alphaTable4[bits & 3] * colors[i];
akku += prods4[bits & 3];
}
float alpha2_sum = (akku >> 16);
float beta2_sum = ((akku >> 8) & 0xff);
float alphabeta_sum = ((akku >> 0) & 0xff);
float4 betax_sum = weight * color_sum - alphax_sum;
//// Compute endpoints using least squares.
// alpha2, beta2, alphabeta and factor could be precomputed for each permutation, but it's faster to recompute them.
const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
float4 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
float4 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
// Round a, b to the closest 5-6-5 color and expand...
a = roundAndExpand(a, start);
b = roundAndExpand(b, end);
// compute the error
float4 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
return (1.0f/weight) * (e.x + e.y + e.z);
}
__device__
float evalPermutation3(const float4 * colors, uint permutation, ushort * start, ushort * end, float4 color_sum,
float* alphaTable3, int* prods3)
{
float4 alphax_sum = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
int akku = 0;
// Compute alpha & beta for this permutation.
#pragma unroll
for (int i = 0; i < 16; i++)
{
const uint bits = permutation >> (2*i);
alphax_sum += alphaTable3[bits & 3] * colors[i];
akku += prods3[bits & 3];
}
float alpha2_sum = (akku >> 16);
float beta2_sum = ((akku >> 8) & 0xff);
float alphabeta_sum = ((akku >> 0) & 0xff);
float4 betax_sum = 4.0f * color_sum - alphax_sum;
const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
float4 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
float4 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
// Round a, b to the closest 5-6-5 color and expand...
a = roundAndExpand(a, start);
b = roundAndExpand(b, end);
// compute the error
float4 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
return (0.25f) * (e.x + e.y + e.z);
}
__device__
uint4 evalAllPermutations(const float4 * colors, const unsigned int * permutations,
float *errors, float4 color_sum, uint * s_permutations,
float* alphaTable4, int* prods4,
float* alphaTable3, int* prods3)
{
const int idx = threadIdx.x;
uint bestStart;
uint bestEnd;
uint bestPermutation;
uint temp;
float bestError = FLT_MAX;
#pragma unroll
for(int i = 0; i < 16; i++)
{
int pidx = idx + NUM_THREADS * i;
if (pidx >= 992) break;
ushort start, end;
uint permutation = permutations[pidx];
if (pidx < 160) s_permutations[pidx] = permutation;
float error = evalPermutation(colors, permutation, &start, &end, color_sum, alphaTable4, prods4, 9.0f);
if (error < bestError)
{
bestError = error;
bestPermutation = permutation;
bestStart = start;
bestEnd = end;
}
}
if (bestStart < bestEnd)
{
temp = bestEnd;
bestEnd = bestStart;
bestStart = temp;
bestPermutation ^= 0x55555555; // Flip indices.
}
#pragma unroll
for(int i = 0; i < 3; i++)
{
int pidx = idx + NUM_THREADS * i;
if (pidx >= 160) break;
ushort start, end;
uint permutation = s_permutations[pidx];
float error = evalPermutation(colors, permutation, &start, &end, color_sum, alphaTable3, prods3, 4.0f);
if (error < bestError)
{
bestError = error;
bestPermutation = permutation;
bestStart = start;
bestEnd = end;
if (bestStart > bestEnd)
{
temp = bestEnd;
bestEnd = bestStart;
bestStart = temp;
bestPermutation ^= (~bestPermutation >> 1) & 0x55555555; // Flip indices.
}
}
}
errors[idx] = bestError;
uint4 result = make_uint4(bestStart, bestEnd, bestPermutation, 0);
return result;
}
////////////////////////////////////////////////////////////////////////////////
// Find index with minimum error
////////////////////////////////////////////////////////////////////////////////
__device__
int findMinError( float * errors, int * indices)
{
const int idx = threadIdx.x;
indices[idx] = idx;
#pragma unroll
for(int d = NUM_THREADS/2; d > 32; d >>= 1)
{
__syncthreads();
if (idx < d)
{
float err0 = errors[idx];
float err1 = errors[idx + d];
if (err1 < err0) {
errors[idx] = err1;
indices[idx] = indices[idx + d];
}
}
}
__syncthreads();
// unroll last 6 iterations
if (idx < 32)
{
if (errors[idx + 32] < errors[idx]) {
errors[idx] = errors[idx + 32];
indices[idx] = indices[idx + 32];
}
if (errors[idx + 16] < errors[idx]) {
errors[idx] = errors[idx + 16];
indices[idx] = indices[idx + 16];
}
if (errors[idx + 8] < errors[idx]) {
errors[idx] = errors[idx + 8];
indices[idx] = indices[idx + 8];
}
if (errors[idx + 4] < errors[idx]) {
errors[idx] = errors[idx + 4];
indices[idx] = indices[idx + 4];
}
if (errors[idx + 2] < errors[idx]) {
errors[idx] = errors[idx + 2];
indices[idx] = indices[idx + 2];
}
if (errors[idx + 1] < errors[idx]) {
errors[idx] = errors[idx + 1];
indices[idx] = indices[idx + 1];
}
}
__syncthreads();
return indices[0];
}
//Save DXT block
__device__
void saveBlockDXT1(uint start, uint end, uint permutation, int* xrefs, uint2 * result, int groupOffset)
{
const int bid = blockIdx.x + groupOffset;
if (start == end)
{
permutation = 0;
}
// Reorder permutation.
uint indices = 0;
#pragma unroll
for(int i = 0; i < 16; i++)
{
int ref = xrefs[i];
indices |= ((permutation >> (2 * ref)) & 3) << (2 * i);
}
// Write endpoints.
result[bid].x = (end << 16) | start;
// Write palette indices.
result[bid].y = indices;
}
|
the_stack
|
#include "caffe/layers/domain_transform_layer.hpp"
#include "caffe/layer.hpp"
#include "caffe/common.cuh"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_horizontal_filter_left_to_right_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* intermediate_res, Dtype* output) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = 1; w < input_width; ++w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
intermediate_res[ind_out] = output[ind_out - 1] - output[ind_out];
output[ind_out] += weight[ind_wei] * intermediate_res[ind_out];
}
}
}
template <typename Dtype>
__global__ void kernel_horizontal_filter_left_to_right_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = input_width - 1; w >= 1; --w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out - 1] += weight[ind_wei] * output[ind_out];
output[ind_out] *= 1 - weight[ind_wei];
}
}
}
template <typename Dtype>
__global__ void kernel_horizontal_filter_right_to_left_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* intermediate_res, Dtype* output) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = input_width - 2; w >= 0; --w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
intermediate_res[ind_out] = output[ind_out + 1] - output[ind_out];
output[ind_out] += weight[ind_wei + 1] * intermediate_res[ind_out];
}
}
}
template <typename Dtype>
__global__ void kernel_horizontal_filter_right_to_left_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per row.
CUDA_KERNEL_LOOP(ind, channels * input_height) {
int h = ind % input_height;
int c = ind / input_height;
for (int w = 0; w < input_width - 1; ++w) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei + 1],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out + 1] += weight[ind_wei + 1] * output[ind_out];
output[ind_out] *= 1 - weight[ind_wei + 1];
}
}
}
template <typename Dtype>
__global__ void kernel_vertical_filter_top_to_bottom_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* intermediate_res, Dtype* output) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = 1; h < input_height; ++h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
intermediate_res[ind_out] = output[ind_out - width] - output[ind_out];
output[ind_out] += weight[ind_wei] * intermediate_res[ind_out];
}
}
}
template <typename Dtype>
__global__ void kernel_vertical_filter_top_to_bottom_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = input_height - 1; h >= 1; --h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out - width] += weight[ind_wei] * output[ind_out];
output[ind_out] = (1 - weight[ind_wei]) * output[ind_out];
}
}
}
template <typename Dtype>
__global__ void kernel_vertical_filter_bottom_to_top_forward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, Dtype* intermediate_res, Dtype* output) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = input_height - 2; h >= 0; --h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
intermediate_res[ind_out] = output[ind_out + width] - output[ind_out];
output[ind_out] += weight[ind_wei + width] * intermediate_res[ind_out];
}
}
}
template <typename Dtype>
__global__ void kernel_vertical_filter_bottom_to_top_backward(
const int channels, const int height, const int width,
const int input_height, const int input_width,
const Dtype* weight, const Dtype* intermediate_res,
Dtype* output, Dtype* weight_diff) {
// One thread per column.
CUDA_KERNEL_LOOP(ind, channels * input_width) {
int w = ind % input_width;
int c = ind / input_width;
for (int h = 0; h < input_height - 1; ++h) {
int ind_out = (c * height + h) * width + w;
int ind_wei = h * width + w;
atomicAdd(&weight_diff[ind_wei + width],
output[ind_out] * intermediate_res[ind_out]);
output[ind_out + width] += weight[ind_wei + width] * output[ind_out];
output[ind_out] *= 1 - weight[ind_wei + width];
}
}
}
template <typename Dtype>
__global__ void kernel_setup_weight_image(
const int count, const int input_width, const int width,
const Dtype sigma_i, const Dtype spatial_sigma, const Dtype range_sigma,
const Dtype min_weight, const Dtype* data, Dtype* weight) {
// Division by zero has been checked in LayerSetUp.
Dtype mult1 = -sqrt(2.) / sigma_i;
Dtype mult2 = spatial_sigma / range_sigma;
CUDA_KERNEL_LOOP(index, count) {
int h = index / input_width;
int w = index % input_width;
int pos = h * width + w;
// weight must be [min_weight_, 1]
weight[pos] = min(max(exp(mult1 * (1 + data[pos] * mult2)), min_weight), Dtype(1));
}
}
template <typename Dtype>
__global__ void kernel_compute_ref_grad_diff(
const int count, const int input_width, const int width,
const Dtype sigma_i, const Dtype spatial_sigma, const Dtype range_sigma,
const Dtype* weight, const Dtype* weight_diff, Dtype* ref_grad_diff) {
// Division by zero has been checked in LayerSetUp.
Dtype mult1 = -sqrt(2.) / sigma_i;
Dtype mult2 = spatial_sigma / range_sigma;
CUDA_KERNEL_LOOP(index, count) {
int h = index / input_width;
int w = index % input_width;
int pos = h * width + w;
ref_grad_diff[pos] += (mult1 * mult2 * weight_diff[pos] * weight[pos]);
}
}
template <typename Dtype>
void DomainTransformLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int spatial_dim = height_ * width_;
const int sample_dim = channels_ * spatial_dim;
Dtype* weight = weight_image_.mutable_gpu_data();
for (int n = 0; n < num_; ++n) {
const Dtype* feat_data = bottom[0]->gpu_data_at(n);
Dtype* top_data = top[0]->mutable_gpu_data_at(n);
caffe_copy<Dtype>(sample_dim, feat_data, top_data);
const Dtype* ref_grad_data = bottom[1]->gpu_data_at(n);
const int input_height = static_cast<int>(bottom[2]->cpu_data_at(n)[0]);
const int input_width = static_cast<int>(bottom[2]->cpu_data_at(n)[1]);
const int input_spatial_dim = input_height * input_width;
CHECK_LE(input_height, height_) <<
"input_height should be less than or equal to height.";
CHECK_LE(input_width, width_) <<
"input_width should be less than or equal to width.";
for (int iter = 0; iter < num_iter_; ++iter) {
Dtype sigma_i = ComputeSigma(iter);
kernel_setup_weight_image<Dtype><<<CAFFE_GET_BLOCKS(
input_spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(
input_spatial_dim, input_width, width_,
sigma_i, spatial_sigma_, range_sigma_, min_weight_,
ref_grad_data, weight);
/* TODO(gpapan): This CUDA implementation is inefficient, because there
* are dependencies within each row or col, so you can only use height
* or width threads. You can improve this by doing all channels in
* parallel and also being more careful with your <<< . >>> arguments.
* You can further significantly improve speed by using BLAS *axpby()
* routines. Right now caffe_gpu_axpby is not sufficient because it
* assumes strides = 1, but you need to use the full BLAS interface
* that allows strides > 1.
* Overload caffe_gpu_axpby(), also supplying a version that accepts
* a stride parameter. Use this to significantly improve speed. Also
* adding this functionality to caffe_cpu_axpby() would further allow
* you to have almost identical cpu / gpu implementations.
*/
// Filter the input four times in the following (forward) orders:
// (0) left->right (1) right->left (2) top->bottom (3) bottom->top.
for (int pass = 0; pass < num_passes_; ++pass) {
int ind = iter * num_passes_ + pass;
Dtype* intermediate_res =
intermediate_results_[ind]->mutable_gpu_data_at(n);
switch (pass) {
case 0:
kernel_horizontal_filter_left_to_right_forward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_height),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, top_data);
break;
case 1:
kernel_horizontal_filter_right_to_left_forward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_height),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, top_data);
break;
case 2:
kernel_vertical_filter_top_to_bottom_forward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_width),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, top_data);
break;
case 3:
kernel_vertical_filter_bottom_to_top_forward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_width),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, top_data);
break;
}
}
}
}
}
template <typename Dtype>
void DomainTransformLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[2]) {
LOG(FATAL) << this->type()
<< " Layer cannot back-propagate to image dimension.";
}
if (propagate_down[0] || propagate_down[1]) {
const int spatial_dim = height_ * width_;
const int sample_dim = channels_ * spatial_dim;
// weight_diff is a temporary buffer shared for all samples.
Dtype* weight_diff = blob_weight_diff_.mutable_gpu_diff();
Dtype* weight = weight_image_.mutable_gpu_data();
for (int n = 0; n < num_; ++n) {
const Dtype* top_diff = top[0]->gpu_diff_at(n);
Dtype* bottom_input_diff = bottom[0]->mutable_gpu_diff_at(n);
Dtype* bottom_ref_grad_diff = bottom[1]->mutable_gpu_diff_at(n);
caffe_copy<Dtype>(sample_dim, top_diff, bottom_input_diff);
caffe_gpu_set<Dtype>(spatial_dim, Dtype(0), bottom_ref_grad_diff);
const Dtype* ref_grad_data = bottom[1]->gpu_data_at(n);
const int input_height = static_cast<int>(bottom[2]->cpu_data_at(n)[0]);
const int input_width = static_cast<int>(bottom[2]->cpu_data_at(n)[1]);
CHECK_LE(input_height, height_) <<
"input_height should be less than or equal to height.";
CHECK_LE(input_width, width_) <<
"input_width should be less than or equal to width.";
const int input_spatial_dim = input_height * input_width;
for (int iter = num_iter_ - 1; iter >= 0; --iter) {
Dtype sigma_i = ComputeSigma(iter);
kernel_setup_weight_image<Dtype><<<CAFFE_GET_BLOCKS(
input_spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(
input_spatial_dim, input_width, width_,
sigma_i, spatial_sigma_, range_sigma_, min_weight_,
ref_grad_data, weight);
caffe_gpu_set<Dtype>(spatial_dim, Dtype(0), weight_diff);
/* TODO(gpapan): This CUDA implementation is inefficient, because there
* are dependencies within each row or col, so you can only use height
* or width threads. You can improve this by doing all channels in
* parallel and also being more careful with your <<< . >>> arguments.
* You can further significantly improve speed by using BLAS *axpby()
* routines. Right now caffe_gpu_axpby is not sufficient because it
* assumes strides = 1, but you need to use the full BLAS interface
* that allows strides > 1.
* Overload caffe_gpu_axpby(), also supplying a version that accepts
* a stride parameter. Use this to significantly improve speed. Also
* adding this functionality to caffe_cpu_axpby() would further allow
* you to have almost identical cpu / gpu implementations.
*/
// Filter the input four times in the following (backward) orders:
// (3) bottom->top (2) top->bottom (1) right->left (0) left->right.
for (int pass = num_passes_ - 1; pass >= 0; --pass) {
int ind = iter * num_passes_ + pass;
Dtype* intermediate_res =
intermediate_results_[ind]->mutable_gpu_data_at(n);
switch (pass) {
case 0:
kernel_horizontal_filter_left_to_right_backward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_height),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
case 1:
kernel_horizontal_filter_right_to_left_backward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_height),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
case 2:
kernel_vertical_filter_top_to_bottom_backward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_width),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
case 3:
kernel_vertical_filter_bottom_to_top_backward<Dtype><<<
CAFFE_GET_BLOCKS(channels_ * input_width),
CAFFE_CUDA_NUM_THREADS>>>(
channels_, height_, width_,
input_height, input_width,
weight, intermediate_res, bottom_input_diff, weight_diff);
break;
}
}
kernel_compute_ref_grad_diff<Dtype><<<
CAFFE_GET_BLOCKS(input_spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(
input_spatial_dim, input_width, width_,
sigma_i, spatial_sigma_, range_sigma_,
weight, weight_diff, bottom_ref_grad_diff);
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DomainTransformLayer);
} // namespace caffe
|
the_stack
|
* Copyright (c) 2017 by Contributors
* \file quantized_conv.cu
* \brief
* \author Ziheng Jiang, Jun Wu
*/
#include "../nn/convolution-inl.h"
#include "./quantization_utils.h"
#include "../tensor/matrix_op-inl.h"
namespace mxnet {
namespace op {
// value + bias_value * (range1 / limit_range1) * (limit_range2 / range2)
struct QuantizedBiasAddKernel {
MSHADOW_XINLINE static void Map(int i, size_t bias_size, int32_t *out,
const int8_t *bias, const float *min_out,
const float *max_out, const float *min_bias,
const float *max_bias, const size_t spatial_size) {
using mshadow::red::limits::MinValue;
using mshadow::red::limits::MaxValue;
float float_for_one_out_quant =
MaxAbs(*min_out, *max_out) / static_cast<double>(MaxValue<int32_t>());
float float_for_one_bias_quant =
MaxAbs(*min_bias, *max_bias) / static_cast<double>(MaxValue<int8_t>());
const size_t channel_id = (i / spatial_size) % bias_size;
out[i] = (out[i] * float_for_one_out_quant +
bias[channel_id] * float_for_one_bias_quant) /
float_for_one_out_quant;
}
};
#if MXNET_USE_CUDNN == 1 && CUDA_VERSION >= 8000
STATIC_ASSERT_CUDNN_VERSION_GE(6000);
template<typename SrcType, typename DstType, typename CmpType>
class QuantizedCuDNNConvOp {
public:
QuantizedCuDNNConvOp() {
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&data_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_));
CUDNN_CALL(cudnnCreateFilterDescriptor(&filter_desc_));
}
void Init(const ConvolutionParam& param,
const OpContext& ctx,
const mxnet::ShapeVector& in_shape,
const mxnet::ShapeVector& out_shape) {
param_ = param;
CHECK_EQ(param_.kernel.ndim(), 2U)
<< "QuantizedCuDNNConvOp only supports 2D convolution for now";
if (param_.layout.has_value()) {
CHECK_EQ(param_.layout.value(), mshadow::kNCHW)
<< "QuantizedConvOp only supports NCHW for now";
}
if (param_.stride.ndim() == 0U) param_.stride = mshadow::Shape2(1, 1);
if (param_.dilate.ndim() == 0U) param_.dilate = mshadow::Shape2(1, 1);
if (param_.pad.ndim() == 0U) param_.pad = mshadow::Shape2(0, 0);
N = 0, H = 2, W = 3, C = 1;
src_type_ = mshadow::DataType<SrcType>::kCudnnFlag;
dst_type_ = mshadow::DataType<DstType>::kCudnnFlag;
cmp_type_ = mshadow::DataType<CmpType>::kCudnnFlag;
algo_ = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
format_ = CUDNN_TENSOR_NHWC;
InitDescriptors(in_shape, out_shape);
GetTempSize(ctx);
}
~QuantizedCuDNNConvOp() {
CUDNN_CALL(cudnnDestroyFilterDescriptor(filter_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(data_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(out_desc_));
CUDNN_CALL(cudnnDestroyConvolutionDescriptor(conv_desc_));
}
void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data) {
CHECK_EQ(param_.kernel.ndim(), 2U)
<< "QuantizedCuDNNConvOp only supports 2D convolution for now";
using namespace mshadow;
CHECK_EQ(in_data.size(), param_.no_bias? 6U : 9U);
CHECK_EQ(out_data.size(), 3U);
Stream<gpu> *s = ctx.get_stream<gpu>();
CHECK_EQ(s->dnn_handle_ownership_, Stream<gpu>::OwnHandle);
const TBlob& data = in_data[0];
const TBlob& filter = in_data[1];
const TBlob& out = out_data[0];
const mxnet::TShape& dshape = data.shape_;
const mxnet::TShape& fshape = filter.shape_;
const mxnet::TShape& oshape = out.shape_;
// allocate workspace
const int dev_id = ctx.run_ctx.ctx.dev_id;
const int dev_mask = gpu::kDevMask;
if (!param_.layout.has_value() || param_.layout.value() == mshadow::kNCHW) {
const size_t data_size = dshape.Size();
const size_t weight_size = fshape.Size();
const size_t output_size = oshape.Size();
size_t total_temp_bytes = (workspace_ + data_size + weight_size) * sizeof(SrcType)
+ output_size * (sizeof(DstType) + sizeof(int32_t));
Tensor<gpu, 1, char> temp_space =
ctx.requested[0].get_space_typed<gpu, 1, char>(mshadow::Shape1(total_temp_bytes), s);
char* temp_dptr = temp_space.dptr_;
TBlob data_(reinterpret_cast<SrcType*>(temp_dptr),
mxnet::TShape({dshape[N], dshape[H], dshape[W], dshape[C]}),
dev_mask, DataType<SrcType>::kFlag, dev_id);
temp_dptr += data_size * sizeof(SrcType);
TBlob filter_(reinterpret_cast<SrcType*>(temp_dptr),
mxnet::TShape({fshape[N], fshape[H], fshape[W], fshape[C]}),
dev_mask, DataType<SrcType>::kFlag, dev_id);
temp_dptr += weight_size * sizeof(SrcType);
// input: [NCHW] => [NHWC](batch, in_height, in_width, in_channels)
// filter: [NCHW] => [NHWC](out_channels, filter_height, filter_width, in_channels)
TransposeImpl<gpu>(ctx.run_ctx, data, data_, mxnet::TShape({N, H, W, C}));
TransposeImpl<gpu>(ctx.run_ctx, filter, filter_, mxnet::TShape({N, H, W, C}));
TBlob out_(reinterpret_cast<DstType*>(temp_dptr),
mxnet::TShape({oshape[N], oshape[H], oshape[W], oshape[C]}),
dev_mask, DataType<DstType>::kFlag, dev_id);
temp_dptr += output_size * sizeof(DstType);
TBlob out_tcast(reinterpret_cast<int32_t*>(temp_dptr),
mxnet::TShape({oshape[N], oshape[H], oshape[W], oshape[C]}),
dev_mask, DataType<int32_t>::kFlag, dev_id);
temp_dptr += output_size * sizeof(int32_t);
// input: [NHWC](batch, in_height, in_width, in_channels)
// filter: [HWNC](out_channels, filter_height, filter_width, in_channels)
// output: [NHWC](batch, out_height, out_width, out_channels)
CUDNN_CALL(cudnnConvolutionForward(s->dnn_handle_,
&alpha_,
data_desc_,
data_.dptr_,
filter_desc_,
filter_.dptr_,
conv_desc_,
algo_,
temp_dptr,
workspace_byte_,
&beta_,
out_desc_,
out_.dptr_));
Tensor<gpu, 1, DstType> out_tensor = out_.FlatTo1D<gpu, DstType>(s);
Tensor<gpu, 1, int32_t> out_tcast_tensor = out_tcast.FlatTo1D<gpu, int32_t>(s);
Assign(out_tcast_tensor, kWriteTo, mshadow::expr::tcast<int32_t>(out_tensor));
// output: [NHWC](batch, out_height, out_width, out_channels) => [NCHW]
TransposeImpl<gpu>(ctx.run_ctx, out_tcast, out, mxnet::TShape({0, 3, 1, 2}));
} else {
LOG(FATAL) << "quantized_conv only supports NCHW for now";
}
// calculate the min/max range for out_data as it's a multiplication
// of in_data[0] and in_data[1]. Need to rescale the min/max range of out_data
// based on the min/max ranges of in_data[0] and in_data[1].
const size_t num_inputs = param_.no_bias ? 2 : 3;
mxnet_op::Kernel<QuantizationRangeForS8S8MultiplicationStruct, gpu>::Launch(s, 1,
out_data[1].dptr<float>(), out_data[2].dptr<float>(),
in_data[num_inputs].dptr<float>(), in_data[num_inputs+1].dptr<float>(),
in_data[num_inputs+2].dptr<float>(), in_data[num_inputs+3].dptr<float>());
if (!param_.no_bias) {
if (param_.layout.has_value()) {
CHECK_EQ(param_.layout.value(), mshadow::kNCHW)
<< "quantized_conv only supports NCHW when there is a bias";
}
const TBlob& bias = in_data[2];
mxnet_op::Kernel<QuantizedBiasAddKernel, gpu>::Launch(s, out.Size(),
bias.Size(), out.dptr<int32_t>(), bias.dptr<int8_t>(),
out_data[1].dptr<float>(), out_data[2].dptr<float>(),
in_data[7].dptr<float>(), in_data[8].dptr<float>(),
oshape[2] * oshape[3]);
}
}
void InitDescriptors(const mxnet::ShapeVector& in_shape,
const mxnet::ShapeVector& out_shape) {
const mxnet::TShape& dshape = in_shape[0];
const mxnet::TShape& kshape = in_shape[1];
const mxnet::TShape& oshape = out_shape[0];
CUDNN_CALL(cudnnSetConvolution2dDescriptor(conv_desc_,
param_.pad[0],
param_.pad[1],
param_.stride[0],
param_.stride[1],
1,
1,
CUDNN_CROSS_CORRELATION,
cmp_type_));
CUDNN_CALL(cudnnSetTensor4dDescriptor(data_desc_,
format_,
src_type_,
dshape[N],
dshape[C],
dshape[H],
dshape[W]));
CUDNN_CALL(cudnnSetTensor4dDescriptor(out_desc_,
format_,
dst_type_,
oshape[N],
oshape[C],
oshape[H],
oshape[W]));
CUDNN_CALL(cudnnSetFilter4dDescriptor(filter_desc_,
src_type_,
format_,
kshape[N],
kshape[C],
kshape[H],
kshape[W]));
}
void GetTempSize(const OpContext& ctx) {
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(s->dnn_handle_,
data_desc_,
filter_desc_,
conv_desc_,
out_desc_,
algo_,
&workspace_byte_));
workspace_ = workspace_byte_ / sizeof(SrcType) + 1;
}
private:
ConvolutionParam param_;
size_t workspace_;
size_t workspace_byte_;
cudnnDataType_t src_type_;
cudnnDataType_t dst_type_;
cudnnDataType_t cmp_type_;
cudnnTensorFormat_t format_;
cudnnConvolutionDescriptor_t conv_desc_;
cudnnTensorDescriptor_t data_desc_;
cudnnFilterDescriptor_t filter_desc_;
cudnnTensorDescriptor_t out_desc_;
cudnnConvolutionFwdAlgo_t algo_;
uint32_t N, H, W, C;
float alpha_ = 1.0f;
float beta_ = 0.0f;
}; // class QuantizedCuDNNConvOp
#endif // MXNET_USE_CUDNN == 1 && CUDA_VERSION >= 8000
void QuantizedConvForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const ConvolutionParam& param = nnvm::get<ConvolutionParam>(attrs.parsed);
CHECK_EQ(param.kernel.ndim(), 2U)
<< "QuantizedConvForward<gpu> only supports 2D convolution for now";
#if MXNET_USE_CUDNN == 1 && CUDA_VERSION >= 8000
typedef QuantizedCuDNNConvOp<int8_t, float, int32_t> QuantizedConvOpInt8;
#if DMLC_CXX11_THREAD_LOCAL
static thread_local QuantizedConvOpInt8 op;
#else
static MX_THREAD_LOCAL QuantizedConvOpInt8 op;
#endif // DMLC_CXX11_THREAD_LOCAL
op.Init(param, ctx, {inputs[0].shape_, inputs[1].shape_}, {outputs[0].shape_});
op.Forward(ctx, inputs, req, outputs);
#else
LOG(FATAL) << "QuantizedConvForward<gpu> only supports cudnnConvolutionForward "
"with CUDNN >= 6.0 and CUDA >= 8.0";
#endif // MXNET_USE_CUDNN == 1 && CUDA_VERSION >= 8000
}
NNVM_REGISTER_OP(_contrib_quantized_conv)
.set_attr<FCompute>("FCompute<gpu>", QuantizedConvForwardGPU);
} // namespace op
} // namespace mxnet
|
the_stack
|
//#include "util.h"
//#include "time.h"
#include <matrix_io.h>
#include <readers.h>
#include <matrix_coloring/min_max.h>
#include <matrix_coloring/round_robin.h>
#include <matrix_coloring/multi_hash.h>
#include <cycles/v_cycle.h>
#include <cycles/w_cycle.h>
#include <cycles/f_cycle.h>
#include <cycles/cg_cycle.h>
#include <cycles/cg_flex_cycle.h>
#include <solvers/algebraic_multigrid_solver.h>
#include <solvers/pcgf_solver.h>
#include <solvers/cg_solver.h>
#include <solvers/pcg_solver.h>
#include <solvers/pbicgstab_solver.h>
#include <solvers/bicgstab_solver.h>
#include <solvers/fgmres_solver.h>
#include <solvers/gauss_seidel_solver.h>
//#include <solvers/jacobi_solver.h>
#include <solvers/jacobi_l1_solver.h>
//#include <solvers/jacobi_nocusp_solver.h>
#include <solvers/kpz_polynomial_solver.h>
#include <solvers/polynomial_solver.h>
#include <solvers/block_jacobi_solver.h>
#include <solvers/multicolor_gauss_seidel_solver.h>
#include <solvers/multicolor_dilu_solver.h>
#include <convergence/absolute.h>
#include <convergence/relative_max.h>
#include <convergence/relative_ini.h>
#include <amg_level.h>
#include <classical/classical_amg_level.h>
#include <aggregation/aggregation_amg_level.h>
#include <classical/interpolators/distance1.h>
#include <classical/interpolators/distance2.h>
#include <classical/selectors/pmis.h>
#include <classical/selectors/cr.h>
#include <energymin/energymin_amg_level.h>
#include <energymin/interpolators/em.h>
#include <classical/strength/ahat.h>
#include <classical/strength/all.h>
#include <aggregation/selectors/dummy.h>
#include <aggregation/selectors/size2_selector.h>
#include <aggregation/selectors/size4_selector.h>
#include <aggregation/selectors/size8_selector.h>
#include <aggregation/coarseAgenerators/hybrid_coarse_A_generator.h>
#include <aggregation/coarseAgenerators/low_deg_coarse_A_generator.h>
#include <aggregation/coarseAgenerators/thrust_coarse_A_generator.h>
namespace amgx
{
// parameter is used as test name
DECLARE_UNITTEST_BEGIN(FactoriesTest);
/*inline void registerParameters() {
AMG_Config::registerParameter<int>("dummyPar1","dummy parameter of type int",100);
AMG_Config::registerParameter<double>("dummyPar2","dummy parameter of type double",1e-6);
AMG_Config::registerParameter<NormType>("dummyPar3","dummy parameter of type NormType",L2);
AMG_Config::registerParameter<std::string>("dummyPar4","dummy parameter of type std::string","DUMMY");
AMG_Config::registerParameter<AlgorithmType>("dummyPar5","dummy parameter of type AlgorithmType",CLASSICAL);
}*/
inline void registerClasses()
{
// Register AMG Level Factories
AMG_LevelFactory<TConfig>::registerFactory(CLASSICAL, new Classical_AMG_LevelFactory<TConfig>);
AMG_LevelFactory<TConfig>::registerFactory(AGGREGATION, new Aggregation_AMG_LevelFactory<TConfig>);
AMG_LevelFactory<TConfig>::registerFactory(ENERGYMIN, new Energymin_AMG_LevelFactory<TConfig>);
// Register MatrixColoring schemes
MatrixColoringFactory<TConfig>::registerFactory("MIN_MAX", new MinMaxMatrixColoringFactory<TConfig>);
MatrixColoringFactory<TConfig>::registerFactory("ROUND_ROBIN", new RoundRobinMatrixColoringFactory<TConfig>);
MatrixColoringFactory<TConfig>::registerFactory("MULTI_HASH", new MultiHashMatrixColoringFactory<TConfig>);
//Register Cycles
CycleFactory<TConfig>::registerFactory("V", new V_CycleFactory<TConfig>);
CycleFactory<TConfig>::registerFactory("F", new F_CycleFactory<TConfig>);
CycleFactory<TConfig>::registerFactory("W", new W_CycleFactory<TConfig>);
CycleFactory<TConfig>::registerFactory("CG", new CG_CycleFactory<TConfig>);
CycleFactory<TConfig>::registerFactory("CGF", new CG_Flex_CycleFactory<TConfig>);
//Register Solvers
SolverFactory<TConfig>::registerFactory("AMG", new AlgebraicMultigrid_SolverFactory<TConfig>);
SolverFactory<TConfig>::registerFactory("PCGF", new PCGF_SolverFactory<TConfig>);
SolverFactory<TConfig>::registerFactory("CG", new CG_SolverFactory<TConfig>);
SolverFactory<TConfig>::registerFactory("PCG", new PCG_SolverFactory<TConfig>);
SolverFactory<TConfig>::registerFactory("PBICGSTAB", new PBiCGStab_SolverFactory<TConfig>);
SolverFactory<TConfig>::registerFactory("BICGSTAB", new BiCGStab_SolverFactory<TConfig>);
SolverFactory<TConfig>::registerFactory("FGMRES", new FGMRES_SolverFactory<TConfig>);
SolverFactory<TConfig>::registerFactory("JACOBI_L1", new JacobiL1SolverFactory<TConfig>);
SolverFactory<TConfig>::registerFactory("GS", new GaussSeidelSolverFactory<TConfig>);
SolverFactory<TConfig>::registerFactory("POLYNOMIAL", new polynomial_solver::PolynomialSolverFactory<TConfig>);
SolverFactory<TConfig>::registerFactory("KPZ_POLYNOMIAL", new KPZPolynomialSolverFactory<TConfig>);
SolverFactory<TConfig>::registerFactory("BLOCK_JACOBI", new block_jacobi_solver::BlockJacobiSolverFactory<TConfig>);
SolverFactory<TConfig>::registerFactory("MULTICOLOR_GS", new multicolor_gauss_seidel_solver::MulticolorGaussSeidelSolverFactory<TConfig>);
SolverFactory<TConfig>::registerFactory("MULTICOLOR_DILU", new multicolor_dilu_solver::MulticolorDILUSolverFactory<TConfig>);
//Register Aggregation Selectors
aggregation::SelectorFactory<TConfig>::registerFactory("SIZE_2", new aggregation::size2_selector::Size2SelectorFactory<TConfig>);
aggregation::SelectorFactory<TConfig>::registerFactory("SIZE_4", new aggregation::size4_selector::Size4SelectorFactory<TConfig>);
aggregation::SelectorFactory<TConfig>::registerFactory("SIZE_8", new aggregation::size8_selector::Size8SelectorFactory<TConfig>);
aggregation::SelectorFactory<TConfig>::registerFactory("DUMMY", new aggregation::DUMMY_SelectorFactory<TConfig>);
//Register Energymin Selectors
classical::SelectorFactory<TConfig>::registerFactory("CR", new classical::CR_SelectorFactory<TConfig>);
//Register Aggregation Coarse Generators
aggregation::CoarseAGeneratorFactory<TConfig>::registerFactory("LOW_DEG", new aggregation::LowDegCoarseAGeneratorFactory<TConfig>);
aggregation::CoarseAGeneratorFactory<TConfig>::registerFactory("HYBRID", new aggregation::HybridCoarseAGeneratorFactory<TConfig>);
aggregation::CoarseAGeneratorFactory<TConfig>::registerFactory("THRUST", new aggregation::ThrustCoarseAGeneratorFactory<TConfig>);
//Register Energymin Interpolators
energymin::InterpolatorFactory<TConfig>::registerFactory("EM", new energymin::EM_InterpolatorFactory<TConfig>);
ConvergenceFactory<TConfig>::registerFactory("ABSOLUTE", new AbsoluteConvergenceFactory<TConfig>);
ConvergenceFactory<TConfig>::registerFactory("RELATIVE_INI_CORE", new RelativeIniConvergenceFactory<TConfig>);
}
inline void registerClassesClassical()
{
//Register Classical Interpolators
InterpolatorFactory<TConfig>::registerFactory("D1", new Distance1_InterpolatorFactory<TConfig>);
InterpolatorFactory<TConfig>::registerFactory("D2", new Distance2_InterpolatorFactory<TConfig>);
//Register Classical Selectors
classical::SelectorFactory<TConfig>::registerFactory("PMIS", new classical::PMIS_SelectorFactory<TConfig>);
//Register Classical Strength
StrengthFactory<TConfig>::registerFactory("AHAT", new Strength_Ahat_StrengthFactory<TConfig>);
StrengthFactory<TConfig>::registerFactory("ALL", new Strength_All_StrengthFactory<TConfig>);
}
inline void unregisterClasses()
{
AMG_LevelFactory<TConfig>::unregisterFactory(CLASSICAL);
AMG_LevelFactory<TConfig>::unregisterFactory(AGGREGATION);
AMG_LevelFactory<TConfig>::unregisterFactory(ENERGYMIN);
// Unegister MatrixColoring schemes
MatrixColoringFactory<TConfig>::unregisterFactory("MIN_MAX");
MatrixColoringFactory<TConfig>::unregisterFactory("ROUND_ROBIN");
MatrixColoringFactory<TConfig>::unregisterFactory("MULTI_HASH");
//Unegister Cycles
CycleFactory<TConfig>::unregisterFactory("V");
CycleFactory<TConfig>::unregisterFactory("F");
CycleFactory<TConfig>::unregisterFactory("W");
CycleFactory<TConfig>::unregisterFactory("CG");
CycleFactory<TConfig>::unregisterFactory("CGF");
//Unegister Solvers
SolverFactory<TConfig>::unregisterFactory("PCGF");
SolverFactory<TConfig>::unregisterFactory("CG");
SolverFactory<TConfig>::unregisterFactory("PCG");
SolverFactory<TConfig>::unregisterFactory("AMG");
SolverFactory<TConfig>::unregisterFactory("PBICGSTAB");
SolverFactory<TConfig>::unregisterFactory("BICGSTAB");
SolverFactory<TConfig>::unregisterFactory("FGMRES");
SolverFactory<TConfig>::unregisterFactory("JACOBI_L1");
SolverFactory<TConfig>::unregisterFactory("GS");
SolverFactory<TConfig>::unregisterFactory("POLYNOMIAL");
SolverFactory<TConfig>::unregisterFactory("KPZ_POLYNOMIAL");
SolverFactory<TConfig>::unregisterFactory("BLOCK_JACOBI");
SolverFactory<TConfig>::unregisterFactory("MULTICOLOR_GS");
SolverFactory<TConfig>::unregisterFactory("MULTICOLOR_DILU");
//Unegister Aggregation Selectors
aggregation::SelectorFactory<TConfig>::unregisterFactory("SIZE_2");
aggregation::SelectorFactory<TConfig>::unregisterFactory("SIZE_4");
aggregation::SelectorFactory<TConfig>::unregisterFactory("SIZE_8");
aggregation::SelectorFactory<TConfig>::unregisterFactory("DUMMY");
//Unegister Energymin Selectors
classical::SelectorFactory<TConfig>::unregisterFactory("CR");
//Unegister Aggregation Coarse Generators
aggregation::CoarseAGeneratorFactory<TConfig>::unregisterFactory("LOW_DEG");
aggregation::CoarseAGeneratorFactory<TConfig>::unregisterFactory("HYBRID");
aggregation::CoarseAGeneratorFactory<TConfig>::unregisterFactory("THRUST");
//Unegister Energymin Interpolators
energymin::InterpolatorFactory<TConfig>::unregisterFactory("EM");
ConvergenceFactory<TConfig>::unregisterFactory("ABSOLUTE");
ConvergenceFactory<TConfig>::unregisterFactory("RELATIVE_INI_CORE");
//MatrixIO<TConfig>::unregisterReaders();
}
inline void unregisterClassesClassical()
{
//Unegister Classical Interpolators
InterpolatorFactory<TConfig>::unregisterFactory("D1");
InterpolatorFactory<TConfig>::unregisterFactory("D2");
//Unegister Classical Selectors
classical::SelectorFactory<TConfig>::unregisterFactory("PMIS");
//Unegister Classical Strength
StrengthFactory<TConfig>::unregisterFactory("AHAT");
StrengthFactory<TConfig>::unregisterFactory("ALL");
}
AMGX_ERROR test_finalize()
{
try
{
unregisterClasses();
unregisterClassesClassical();
}
catch (amgx_exception e)
{
return AMGX_ERR_CORE;
}
return AMGX_OK;
}
AMGX_ERROR test_initialize()
{
try
{
registerClasses();
registerClassesClassical();
}
catch (amgx_exception e)
{
return AMGX_ERR_CORE;
}
return AMGX_OK;
}
void run()
{
AMGX_finalize_plugins();
AMGX_finalize();
UnitTest::amgx_intialized = false;
AMGX_ERROR errorCode;
errorCode = test_initialize();
UNITTEST_ASSERT_EQUAL(errorCode, AMGX_OK);
errorCode = test_initialize();
UNITTEST_ASSERT_EQUAL(errorCode, AMGX_ERR_CORE);
errorCode = test_finalize();
UNITTEST_ASSERT_EQUAL(errorCode, AMGX_OK);
errorCode = test_initialize();
UNITTEST_ASSERT_EQUAL(errorCode, AMGX_OK);
errorCode = test_finalize();
UNITTEST_ASSERT_EQUAL(errorCode, AMGX_OK);
errorCode = test_finalize();
UNITTEST_ASSERT_EQUAL(errorCode, AMGX_ERR_CORE);
AMGX_initialize();
AMGX_initialize_plugins();
UnitTest::amgx_intialized = true;
}
DECLARE_UNITTEST_END(FactoriesTest);
#define AMGX_CASE_LINE(CASE) FactoriesTest <TemplateMode<CASE>::Type> FactoriesTest_##CASE;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} //namespace amgx
|
the_stack
|
#include "nnpooling.hpp"
#include "datacu.hpp"
#include <assert.h>
#include <float.h>
#include <sm_20_atomic_functions.h>
// -------------------------------------------------------------------
// Max pooling helpers
// -------------------------------------------------------------------
template<typename T> __global__ void
pooling_max_kernel
(T* output,
const T* data,
const int outputWidth,
const int outputHeight,
const int outputVolume,
const int width,
const int height,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (outputIndex < outputVolume) {
int px = outputIndex ;
int py = px / outputWidth ;
int pz = py / outputHeight ;
px %= outputWidth ;
py %= outputHeight ;
data += pz * (width*height) ;
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = min(x1 + poolWidth, width) ;
int y2 = min(y1 + poolHeight, height) ;
x1 = max(x1, 0) ;
y1 = max(y1, 0) ;
T bestValue = data[y1 * width + x1] ;
for (int y = y1 ; y < y2 ; ++y) {
for (int x = x1 ; x < x2 ; ++x) {
bestValue = max(bestValue, data[y * width + x]) ;
}
}
output[outputIndex] = bestValue ;
}
}
#ifdef VLNN_CAFFELIKE_BPPOOL
// In order to be able to use this, BP would need to have access to both
// bottom data and output data (currently only passed bottom data...)
template <typename T> __global__ void
pooling_max_backward_with_output_data
(T* derData,
const T* data,
const T* output,
const T* derOutput,
const int nthreads,
const int outputWidth,
const int outputHeight,
const int width,
const int height,
const int depth,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
// find out the local index
// find out the local offset
int x = index % width;
int y = (index / width) % height;
int z = (index / width / height) % depth;
int py1 = (y < poolHeight) ? 0 : (y - poolHeight) / strideY + 1;
int py2 = min(y / strideY + 1, outputHeight);
int px1 = (x < poolWidth) ? 0 : (x - poolWidth) / strideX + 1;
int px2 = min(x / strideX + 1, outputWidth);
T gradient = 0;
T datum = data[(z * height + y) * width + x];
output += z * outputHeight * outputWidth;
dzdy += z * outputHeight * outputWidth;
for (int py = py1; py < py2; ++py) {
for (int px = px1; px < px2; ++px) {
gradient += dzdy[py * outputWidth + px] *
(datum == output[py * outputWidth + px]);
}
}
dzdx[index] = gradient;
}
}
#endif
template<typename T> __global__ void
pooling_max_backward_kernel
(T* derData,
const T* data,
const T* derOutput,
const int outputWidth,
const int outputHeight,
const int outputVolume,
const int width,
const int height,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (outputIndex < outputVolume) {
int px = outputIndex ;
int py = px / outputWidth ;
int pz = py / outputHeight ;
px %= outputWidth ;
py %= outputHeight ;
data += pz * (width*height) ;
derData += pz * (width*height) ;
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = min(x1 + poolWidth, width) ;
int y2 = min(y1 + poolHeight, height) ;
x1 = max(x1, 0) ;
y1 = max(y1, 0) ;
int bestIndex = y1 * width + x1 ;
T bestValue = data[bestIndex] ;
for (int y = y1 ; y < y2 ; ++y) {
for (int x = x1 ; x < x2 ; ++x) {
int index = y * width + x ;
T value = data[index] ;
if (value > bestValue) {
bestValue = value ;
bestIndex = index ;
}
}
}
/*
This is bad, but required to eliminate a race condition when writing
to bottom_diff.
Caffe goes the other way around, but requrires remembering the layer
output, or the maximal indexes.
atomicAdd(add, val)
*/
atomicAdd(derData + bestIndex, derOutput[outputIndex]) ;
}
}
// -------------------------------------------------------------------
// Average pooling helpers
// -------------------------------------------------------------------
template<typename T> __global__ void
pooling_average_kernel
(T* output,
const T* data,
const int outputWidth,
const int outputHeight,
const int outputVolume,
const int width,
const int height,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
/* outputIndex = x + y * outputWidth + z * (outputWidth * outputHeight) */
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (outputIndex < outputVolume) {
int px = outputIndex ;
int py = px / outputWidth ;
int pz = py / outputHeight ;
px %= outputWidth ;
py %= outputHeight ;
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = min(x1 + poolWidth, width) ;
int y2 = min(y1 + poolHeight, height) ;
x1 = max(x1, 0) ;
y1 = max(y1, 0) ;
data += pz * (width*height) ;
T accum = 0;
T poolSize = (y2 - y1)*(x2 - x1);
for (int y = y1 ; y < y2 ; ++y) {
for (int x = x1 ; x < x2 ; ++x) {
accum += data[y * width + x] ;
}
}
output[outputIndex] = accum / poolSize ;
}
}
template <typename T> __global__ void
pooling_average_backward_kernel
(T* derData,
const T* derOutput,
const int nthreads,
const int outputWidth,
const int outputHeight,
const int width,
const int height,
const int depth,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
/* To understand the logic of this piece of code see the
comments to of the row2im backward kernel */
int x_data = index ;
int y_data = x_data / width ;
int z = y_data / height ;
x_data %= width ;
y_data %= height ;
int dx = x_data + padLeft - poolWidth ;
int dy = y_data + padTop - poolHeight ;
int px1 = (dx >= 0) ? dx/strideX + 1 : 0 ;
int py1 = (dy >= 0) ? dy/strideY + 1 : 0 ;
int px2 = min((x_data + padLeft) / strideX, outputWidth - 1) ;
int py2 = min((y_data + padTop) / strideY, outputHeight - 1) ;
T accumulator = 0 ;
derOutput += z * outputHeight * outputWidth;
for (int py = py1 ; py <= py2 ; ++py) {
for (int px = px1 ; px <= px2 ; ++px) {
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = min(x1 + poolWidth, width) ;
int y2 = min(y1 + poolHeight, height) ;
x1 = max(x1, 0) ;
y1 = max(y1, 0) ;
T poolSize = (y2 - y1) * (x2 - x1);
accumulator += derOutput[py * outputWidth + px] / poolSize ;
}
}
derData[index] = accumulator ;
}
}
// -------------------------------------------------------------------
// Forward
// -------------------------------------------------------------------
template<DataType dataType, Pooling::Method method>
struct PoolingForwardGPU
{
vl::ErrorCode operator()(Pooling &op,
Tensor &output,
Tensor const &input)
{
typedef typename vl::DataTypeTraits<dataType>::type type ;
auto height = input.getHeight() ;
auto width = input.getWidth() ;
auto depth = input.getDepth() ;
auto size = input.getSize() ;
auto inputData = (type const*)input.getMemory() ;
auto outputData = (type*)output.getMemory() ;
auto outputWidth = (width + (op.padLeft + op.padRight) - op.poolWidth)/op.strideX + 1 ;
auto outputHeight = (height + (op.padTop + op.padBottom) - op.poolHeight)/op.strideY + 1 ;
auto outputVolume = outputWidth * outputHeight * depth * size ;
if (method == Pooling::Max) {
pooling_max_kernel<type>
<<< divideAndRoundUp(outputVolume, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(outputData, inputData,
outputHeight, outputWidth, outputVolume,
height, width,
op.poolHeight, op.poolWidth,
op.strideY, op.strideX,
op.padTop, op.padLeft);
}
else if (method == Pooling::Average) {
pooling_average_kernel<type>
<<< divideAndRoundUp(outputVolume, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(outputData, inputData,
outputHeight, outputWidth, outputVolume,
height, width,
op.poolHeight, op.poolWidth,
op.strideY, op.strideX,
op.padTop, op.padLeft);
}
else {
assert(false) ;
}
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
} ;
template<DataType dataType>
struct PoolingForward<VLDT_GPU,dataType>
{
vl::ErrorCode operator()(Pooling &op,
Tensor output,
Tensor input)
{
switch (op.method) {
case Pooling::Max:
return
PoolingForwardGPU<dataType,Pooling::Max>
()(op,output,input) ;
case Pooling::Average:
return
PoolingForwardGPU<dataType,Pooling::Average>
()(op,output,input) ;
default:
return VLE_IllegalArgument ;
}
}
} ;
// -------------------------------------------------------------------
// Backward
// -------------------------------------------------------------------
template<DataType dataType, Pooling::Method method>
struct PoolingBackwardGPU
{
vl::ErrorCode operator()(Pooling &op,
Tensor &derInput,
Tensor const &input,
Tensor const &derOutput)
{
typedef typename vl::DataTypeTraits<dataType>::type type ;
auto height = input.getHeight() ;
auto width = input.getWidth() ;
auto depth = input.getDepth() ;
auto size = input.getSize() ;
auto inputData = (type const*)input.getMemory() ;
auto derOutputData = (type const*)derOutput.getMemory() ;
auto derInputData = (type*)derInput.getMemory() ;
auto outputWidth = (width + (op.padLeft + op.padRight) - op.poolWidth)/op.strideX + 1 ;
auto outputHeight = (height + (op.padTop + op.padBottom) - op.poolHeight)/op.strideY + 1 ;
auto outputVolume = outputWidth * outputHeight * depth * size ;
auto inputVolume = width * height * size * depth ;
if (method == Pooling::Max) {
pooling_max_backward_kernel<type>
<<< divideAndRoundUp(outputVolume, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(derInputData, inputData, derOutputData,
outputHeight, outputWidth, outputVolume,
height, width,
op.poolHeight, op.poolWidth,
op.strideY, op.strideX,
op.padTop, op.padLeft);
}
else if (method == Pooling::Average) {
pooling_average_backward_kernel<type>
<<< divideAndRoundUp(inputVolume, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(derInputData, derOutputData, inputVolume,
outputHeight, outputWidth,
height, width, size * depth,
op.poolHeight, op.poolWidth,
op.strideY, op.strideX,
op.padTop, op.padLeft) ;
}
else {
assert(false) ;
}
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
} ; // pooling_max
template<DataType dataType>
struct PoolingBackward<VLDT_GPU,dataType>
{
vl::ErrorCode operator()(Pooling &op,
Tensor &derInput,
Tensor const &input,
Tensor const &derOutput)
{
switch (op.method) {
case Pooling::Max:
return
PoolingBackwardGPU<dataType,Pooling::Max>
()(op,derInput,input,derOutput) ;
case Pooling::Average:
return
PoolingBackwardGPU<dataType,Pooling::Average>
()(op,derInput,input,derOutput) ;
default:
return VLE_IllegalArgument ;
}
}
} ;
|
the_stack
|
#pragma once
#include <gunrock/util/array_utils.cuh>
#include <gunrock/app/problem_base.cuh>
#include <gunrock/oprtr/1D_oprtr/for_all.cuh>
#include <unordered_set>
namespace gunrock {
namespace app {
namespace knn {
/**
* @brief Speciflying parameters for KNN Problem
* @param parameters The util::Parameter<...> structure holding all parameter
* info \return cudaError_t error message(s), if any
*/
cudaError_t UseParameters_problem(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(gunrock::app::UseParameters_problem(parameters));
return retval;
}
/**
* @brief KNN Problem structure.
* @tparam _GraphT Type of the graph
* @tparam _FLAG Problem flags
*/
template <typename _GraphT, ProblemFlag _FLAG = Problem_None>
struct Problem : ProblemBase<_GraphT, _FLAG> {
typedef _GraphT GraphT;
static const ProblemFlag FLAG = _FLAG;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef typename GraphT::CsrT CsrT;
typedef typename GraphT::GpT GpT;
typedef typename util::Array1D<SizeT, ValueT> ArrayT;
typedef ProblemBase<GraphT, FLAG> BaseProblem;
typedef DataSliceBase<GraphT, FLAG> BaseDataSlice;
// ----------------------------------------------------------------
// Dataslice structure
/**
* @brief Data structure containing problem specific data on indivual GPU.
*/
struct DataSlice : BaseDataSlice {
util::Array1D<SizeT, ValueT> points;
// Nearest Neighbors
util::Array1D<SizeT, SizeT> knns;
util::Array1D<SizeT, int> sem;
// Number of neighbors
SizeT k;
// Number of points
SizeT num_points;
// Dimension of points labels
SizeT dim;
// Sorted
util::Array1D<SizeT, ValueT> distance_out;
/*
* @brief Default constructor
*/
DataSlice() : BaseDataSlice() {
points.SetName("points");
knns.SetName("knns");
sem.SetName("sem");
distance_out.SetName("distance_out");
}
/*
* @brief Default destructor
*/
virtual ~DataSlice() { Release(); }
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx));
GUARD_CU(knns.Release(target));
GUARD_CU(distance_out.Release(target));
GUARD_CU(sem.Release(target));
GUARD_CU(BaseDataSlice ::Release(target));
return retval;
}
/**
* @brief initializing KNN-specific Data Slice on each gpu
* @param sub_graph Sub graph on the GPU.
* @param num_points_ Number of points
* @param k_ Number of Nearest Neighbors
* @param dim_ Dimension of the points labels
* @param num_gpus Number of GPU devices
* @param[in] gpu_idx GPU device index
* @param[in] target Targeting device location
* @param[in] flag Problem flag containling options
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(GraphT sub_graph, SizeT num_points_, SizeT k_, SizeT dim_,
int num_gpus = 1, int gpu_idx = 0, util::Location target = util::DEVICE,
ProblemFlag flag = Problem_None) {
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseDataSlice::Init(sub_graph, num_gpus, gpu_idx, target, flag));
// Basic problem parameters
num_points = num_points_;
k = k_;
dim = dim_;
// k-nearest neighbors
GUARD_CU(knns.Allocate(k * num_points, target));
GUARD_CU(distance_out.Allocate(k * num_points, target));
GUARD_CU(sem.Allocate(num_points, target));
return retval;
}
/**
* @brief Reset problem function. Must be called prior to each run.
* @param[in] target Targeting device location
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(ValueT* h_points, util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
SizeT num_points = this->num_points;
typedef typename GraphT::CsrT CsrT;
// K-Nearest Neighbors
GUARD_CU(knns.EnsureSize_(k * num_points, target));
GUARD_CU(knns.ForAll(
[] __host__ __device__(SizeT * k_, const SizeT &p) {
k_[p] = util::PreDefinedValues<SizeT>::InvalidValue;
},
k * num_points, target, this->stream));
GUARD_CU(distance_out.EnsureSize_(k * num_points, target));
GUARD_CU(distance_out.ForAll(
[] __host__ __device__(ValueT * d, const SizeT &p) {
d[p] = util::PreDefinedValues<ValueT>::MaxValue;
},
k * num_points, target, this->stream));
GUARD_CU(sem.EnsureSize_(num_points, target));
GUARD_CU(sem.ForAll(
[] __host__ __device__(int* d, const SizeT &p) {
d[p] = 0;
},
num_points, target, this->stream));
//int k_ = k;
GUARD_CU(util::SetDevice(this->gpu_idx));
GUARD_CU(points.SetPointer(h_points, num_points*dim, util::HOST));
GUARD_CU(points.Move(util::HOST, target, num_points*dim, 0, this->stream));
return retval;
}
}; // DataSlice
// Set of data slices (one for each GPU)
util::Array1D<SizeT, DataSlice> *data_slices;
SizeT k;
SizeT num_points;
SizeT dim;
bool transpose;
bool use_shared_mem;
int num_threads;
int block_size;
int grid_size;
int data_size;
int points_size;
int dist_size;
int keys_size;
int shared_point_size;
int shared_mem_size;
// ----------------------------------------------------------------
// Problem Methods
/**
* @brief KNN Problem default constructor
*/
Problem(util::Parameters &_parameters, ProblemFlag _flag = Problem_None)
: BaseProblem(_parameters, _flag), data_slices(NULL) {}
/**
* @brief KNN Problem default destructor
*/
virtual ~Problem() { Release(); }
/*
* @brief Releasing KNN Problem allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
if (data_slices == NULL) return retval;
for (int i = 0; i < this->num_gpus; i++)
GUARD_CU(data_slices[i].Release(target));
if ((target & util::HOST) != 0 &&
data_slices[0].GetPointer(util::DEVICE) == NULL) {
delete[] data_slices;
data_slices = NULL;
}
GUARD_CU(BaseProblem::Release(target));
return retval;
}
/**
* @brief Copy result k Nearest Neighbors computed on GPUs back to host-side arrays.
* @param[in] h_knns Empty array to store kNN computed on GPU
* \return cudaError_t Error message(s), if any
*/
cudaError_t Extract(SizeT *h_knns, util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
auto &data_slice = data_slices[0][0];
if (this->num_gpus == 1) {
// Set device
if (target == util::DEVICE) {
GUARD_CU(util::SetDevice(this->gpu_idx[0]));
// Extract KNNs
// knns array
GUARD_CU(data_slice.knns.SetPointer(h_knns,num_points*k,util::HOST));
GUARD_CU(data_slice.knns.Move(util::DEVICE, util::HOST));
}
} else if (target == util::HOST) {
GUARD_CU(data_slice.knns.ForEach(
h_knns,
[] __host__ __device__(const SizeT &device_val, SizeT &host_val) {
host_val = device_val;
},
num_points * k, util::HOST));
}
return retval;
}
/**
* @brief initialization function (Problem struct)
* @param graph The graph that KNN processes on
* @param[in] Location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(GraphT &graph, util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseProblem::Init(graph, target));
data_slices = new util::Array1D<SizeT, DataSlice>[this->num_gpus];
// Assign the parameters to problem
this->k = this->parameters.template Get<SizeT>("k");
this->num_points = this->parameters.template Get<SizeT>("n");
this->dim = this->parameters.template Get<SizeT>("dim");
this->transpose = this->parameters.template Get <bool>("transpose");
int use_shared_mem = this->parameters.template Get <bool>("use-shared-mem");
int num_threads = this->parameters.template Get <int>("NUM-THREADS");
if (num_threads == 0) num_threads = 128;
int block_size = (num_points < num_threads ? num_points : num_threads);
int data_size = sizeof(ValueT);
printf("data_size = %d\n", data_size);
int points_size = ((((block_size + 1) * dim * data_size) + 127)/128) * 128;
int dist_size = ((((block_size + 1) * k * data_size) + 127)/128) * 128;
int keys_size = ((((block_size + 1) * k * sizeof(int)) + 127)/128) * 128;
int shared_point_size = dim * data_size;
int shared_mem_size = points_size + dist_size + keys_size + shared_point_size;
if (use_shared_mem){
auto dev = this->parameters.template Get <int>("device");
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
while (shared_mem_size > deviceProp.sharedMemPerBlock){
block_size -= 32;
points_size = (((block_size + 1) * dim * data_size + 127)/128) * 128;
dist_size = (((block_size + 1) * k * data_size + 127)/128) * 128;
keys_size = (((block_size + 1) * k * sizeof(int) + 127)/128) * 128;
shared_point_size = dim * data_size;
shared_mem_size = points_size + dist_size + keys_size + shared_point_size;
}
if (block_size == 0){
use_shared_mem = false;
block_size = 128;
}
}
int grid_size = 65536/block_size;
this->use_shared_mem = use_shared_mem;
this->num_threads = num_threads;
this->block_size = block_size;
this->grid_size = grid_size;
this->data_size = data_size;
this->points_size = points_size;
this->dist_size = dist_size;
this->keys_size = keys_size;
this->shared_point_size = shared_point_size;
this->shared_mem_size = shared_mem_size;
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
data_slices[gpu].SetName("data_slices[" + std::to_string(gpu) + "]");
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu].Allocate(1, target | util::HOST));
auto &data_slice = data_slices[gpu][0];
GUARD_CU(data_slice.Init(this->sub_graphs[gpu], this->num_points, this->k,
this->dim, this->num_gpus, this->gpu_idx[gpu], target, this->flag));
}
return retval;
}
/**
* @brief Reset problem function. Must be called prior to each run (Problem struct).
* @param[in] points Array of points
* @param[in] location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(ValueT* points, util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
// Reset data slices
for (int gpu = 0; gpu < this->num_gpus; ++gpu) {
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slices[gpu]->Reset(points,target));
GUARD_CU(data_slices[gpu].Move(util::HOST, target));
}
GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed");
return retval;
}
};
} // namespace knn
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
#include <assert.h>
#define OSD_PATCH_BASIS_CUDA
#include "../osd/patchBasisCommonTypes.h"
#include "../osd/patchBasisCommon.h"
#include "../osd/patchBasisCommonEval.h"
// -----------------------------------------------------------------------------
template<int N> struct DeviceVertex {
float v[N];
__device__ void addWithWeight(DeviceVertex<N> const & src, float weight) {
#pragma unroll
for(int i = 0; i < N; ++i){
v[i] += src.v[i] * weight;
}
}
__device__ void clear() {
#pragma unroll
for(int i = 0; i < N; ++i){
v[i] = 0.0f;
}
}
};
// Specialize DeviceVertex for N=0 to avoid compile error:
// "flexible array member in otherwise empty struct"
template<> struct DeviceVertex<0> {
__device__ void addWithWeight(DeviceVertex<0> &src, float weight) {}
__device__ void clear() {}
};
// -----------------------------------------------------------------------------
__device__ void clear(float *dst, int count)
{
for(int i = 0; i < count; ++i) dst[i] = 0;
}
__device__ void addWithWeight(float *dst, float const *src, float weight, int count)
{
for(int i = 0; i < count; ++i) dst[i] += src[i] * weight;
}
// --------------------------------------------------------------------------------------------
template <int NUM_ELEMENTS> __global__ void
computeStencils(float const * cvs, float * vbuffer,
int const * sizes,
int const * offsets,
int const * indices,
float const * weights,
int start, int end) {
DeviceVertex<NUM_ELEMENTS> const * src =
(DeviceVertex<NUM_ELEMENTS> const *)cvs;
DeviceVertex<NUM_ELEMENTS> * verts =
(DeviceVertex<NUM_ELEMENTS> *)vbuffer;
int first = start + threadIdx.x + blockIdx.x*blockDim.x;
for (int i=first; i<end; i += blockDim.x * gridDim.x) {
int const * lindices = indices + offsets[i];
float const * lweights = weights + offsets[i];
DeviceVertex<NUM_ELEMENTS> dst;
dst.clear();
for (int j=0; j<sizes[i]; ++j) {
dst.addWithWeight(src[lindices[j]], lweights[j]);
}
verts[i] = dst;
}
}
__global__ void
computeStencils(float const * cvs, float * dst,
int length,
int srcStride,
int dstStride,
int const * sizes,
int const * offsets,
int const * indices,
float const * weights,
int start, int end) {
int first = start + threadIdx.x + blockIdx.x*blockDim.x;
for (int i=first; i<end; i += blockDim.x * gridDim.x) {
int const * lindices = indices + offsets[i];
float const * lweights = weights + offsets[i];
float * dstVert = dst + i*dstStride;
clear(dstVert, length);
for (int j=0; j<sizes[i]; ++j) {
float const * srcVert = cvs + lindices[j]*srcStride;
addWithWeight(dstVert, srcVert, lweights[j], length);
}
}
}
// -----------------------------------------------------------------------------
#define USE_NVIDIA_OPTIMIZATION
#ifdef USE_NVIDIA_OPTIMIZATION
template< int NUM_ELEMENTS, int NUM_THREADS_PER_BLOCK >
__global__ void computeStencilsNv(float const *__restrict cvs,
float * vbuffer,
int const *__restrict sizes,
int const *__restrict offsets,
int const *__restrict indices,
float const *__restrict weights,
int start,
int end)
{
// Shared memory to stage indices/weights.
__shared__ int smem_indices_buffer[NUM_THREADS_PER_BLOCK];
__shared__ float smem_weights_buffer[NUM_THREADS_PER_BLOCK];
// The size of a single warp.
const int WARP_SIZE = 32;
// The number of warps per block.
const int NUM_WARPS_PER_BLOCK = NUM_THREADS_PER_BLOCK / WARP_SIZE;
// The number of outputs computed by a single warp.
const int NUM_OUTPUTS_PER_WARP = WARP_SIZE / NUM_ELEMENTS;
// The number of outputs computed by a block of threads.
const int NUM_OUTPUTS_PER_BLOCK = NUM_OUTPUTS_PER_WARP*NUM_WARPS_PER_BLOCK;
// The number of active threads in a warp.
const int NUM_ACTIVE_THREADS_PER_WARP = NUM_OUTPUTS_PER_WARP * NUM_ELEMENTS;
// The number of the warp inside the block.
const int warpId = threadIdx.x / WARP_SIZE;
const int laneId = threadIdx.x % WARP_SIZE;
// We use NUM_ELEMENTS threads per output. Find which output/element a thread works on.
int outputIdx = warpId*NUM_OUTPUTS_PER_WARP + laneId/NUM_ELEMENTS, elementIdx = laneId%NUM_ELEMENTS;
// Each output corresponds to a section of shared memory.
volatile int *smem_indices = &smem_indices_buffer[warpId*WARP_SIZE + (laneId/NUM_ELEMENTS)*NUM_ELEMENTS];
volatile float *smem_weights = &smem_weights_buffer[warpId*WARP_SIZE + (laneId/NUM_ELEMENTS)*NUM_ELEMENTS];
// Disable threads that have nothing to do inside the warp.
int i = end;
if( laneId < NUM_ACTIVE_THREADS_PER_WARP )
i = start + blockIdx.x*NUM_OUTPUTS_PER_BLOCK + outputIdx;
// Iterate over the vertices.
for( ; i < end ; i += gridDim.x*NUM_OUTPUTS_PER_BLOCK )
{
// Each thread computes an element of the final vertex.
float x = 0.f;
// Load the offset and the size for each vertex. We have NUM_THREADS_PER_VERTEX threads loading the same value.
const int offset_i = offsets[i], size_i = sizes[i];
// Iterate over the stencil.
for( int j = offset_i, j_end = offset_i+size_i ; j < j_end ; )
{
int j_it = j + elementIdx;
// Load some indices and some weights. The transaction is coalesced.
smem_indices[elementIdx] = j_it < j_end ? indices[j_it] : 0;
smem_weights[elementIdx] = j_it < j_end ? weights[j_it] : 0.f;
// Thread now collaborates to load the vertices.
#pragma unroll
for( int k = 0 ; k < NUM_ELEMENTS ; ++k, ++j )
if( j < j_end )
x += smem_weights[k] * cvs[smem_indices[k]*NUM_ELEMENTS + elementIdx];
}
// Store the vertex.
vbuffer[NUM_ELEMENTS*i + elementIdx] = x;
}
}
template< int NUM_THREADS_PER_BLOCK >
__global__ void computeStencilsNv_v4(float const *__restrict cvs,
float * vbuffer,
int const *__restrict sizes,
int const *__restrict offsets,
int const *__restrict indices,
float const *__restrict weights,
int start,
int end)
{
// Iterate over the vertices.
for( int i = start + blockIdx.x*NUM_THREADS_PER_BLOCK + threadIdx.x ; i < end ; i += gridDim.x*NUM_THREADS_PER_BLOCK )
{
// Each thread computes an element of the final vertex.
float4 x = make_float4(0.f, 0.f, 0.f, 0.f);
// Iterate over the stencil.
for( int j = offsets[i], j_end = offsets[i]+sizes[i] ; j < j_end ; ++j )
{
float w = weights[j];
float4 tmp = reinterpret_cast<const float4 *>(cvs)[indices[j]];
x.x += w*tmp.x;
x.y += w*tmp.y;
x.z += w*tmp.z;
x.w += w*tmp.w;
}
// Store the vertex.
reinterpret_cast<float4*>(vbuffer)[i] = x;
}
}
#endif // USE_NVIDIA_OPTIMIZATION
// -----------------------------------------------------------------------------
__global__ void
computePatches(const float *src, float *dst,
float *dstDu, float *dstDv,
float *dstDuu, float *dstDuv, float *dstDvv,
int length, int srcStride, int dstStride,
int dstDuStride, int dstDvStride,
int dstDuuStride, int dstDuvStride, int dstDvvStride,
int numPatchCoords, const OsdPatchCoord *patchCoords,
const OsdPatchArray *patchArrayBuffer,
const int *patchIndexBuffer,
const OsdPatchParam *patchParamBuffer) {
int first = threadIdx.x + blockIdx.x * blockDim.x;
// PERFORMANCE: not yet optimized
for (int i = first; i < numPatchCoords; i += blockDim.x * gridDim.x) {
OsdPatchCoord const &coord = patchCoords[i];
int arrayIndex = coord.arrayIndex;
int patchIndex = coord.patchIndex;
OsdPatchArray const &array = patchArrayBuffer[arrayIndex];
OsdPatchParam const ¶m = patchParamBuffer[patchIndex];
int patchType = OsdPatchParamIsRegular(param)
? array.regDesc : array.desc;
float wP[20], wDu[20], wDv[20], wDuu[20], wDuv[20], wDvv[20];
int nPoints = OsdEvaluatePatchBasis(patchType, param,
coord.s, coord.t, wP, wDu, wDv, wDuu, wDuv, wDvv);
int indexBase = array.indexBase + array.stride *
(patchIndex - array.primitiveIdBase);
const int *cvs = patchIndexBuffer + indexBase;
float * dstVert = dst + i * dstStride;
clear(dstVert, length);
for (int j = 0; j < nPoints; ++j) {
const float * srcVert = src + cvs[j] * srcStride;
addWithWeight(dstVert, srcVert, wP[j], length);
}
if (dstDu) {
float *d = dstDu + i * dstDuStride;
clear(d, length);
for (int j = 0; j < nPoints; ++j) {
const float * srcVert = src + cvs[j] * srcStride;
addWithWeight(d, srcVert, wDu[j], length);
}
}
if (dstDv) {
float *d = dstDv + i * dstDvStride;
clear(d, length);
for (int j = 0; j < nPoints; ++j) {
const float * srcVert = src + cvs[j] * srcStride;
addWithWeight(d, srcVert, wDv[j], length);
}
}
if (dstDuu) {
float *d = dstDuu + i * dstDuuStride;
clear(d, length);
for (int j = 0; j < nPoints; ++j) {
const float * srcVert = src + cvs[j] * srcStride;
addWithWeight(d, srcVert, wDuu[j], length);
}
}
if (dstDuv) {
float *d = dstDuv + i * dstDuvStride;
clear(d, length);
for (int j = 0; j < nPoints; ++j) {
const float * srcVert = src + cvs[j] * srcStride;
addWithWeight(d, srcVert, wDuv[j], length);
}
}
if (dstDvv) {
float *d = dstDvv + i * dstDvvStride;
clear(d, length);
for (int j = 0; j < nPoints; ++j) {
const float * srcVert = src + cvs[j] * srcStride;
addWithWeight(d, srcVert, wDvv[j], length);
}
}
}
}
// -----------------------------------------------------------------------------
#include "../version.h"
#define OPT_KERNEL(NUM_ELEMENTS, KERNEL, X, Y, ARG) \
if (length==NUM_ELEMENTS && srcStride==length && dstStride==length) { \
KERNEL<NUM_ELEMENTS><<<X,Y>>>ARG; \
return; \
}
#ifdef USE_NVIDIA_OPTIMIZATION
#define OPT_KERNEL_NVIDIA(NUM_ELEMENTS, KERNEL, X, Y, ARG) \
if (length==NUM_ELEMENTS && srcStride==length && dstStride==length) { \
int gridDim = min(X, (end-start+Y-1)/Y); \
KERNEL<NUM_ELEMENTS, Y><<<gridDim, Y>>>ARG; \
return; \
}
#endif
extern "C" {
void CudaEvalStencils(
const float *src, float *dst,
int length, int srcStride, int dstStride,
const int * sizes, const int * offsets, const int * indices,
const float * weights,
int start, int end) {
if (length == 0 || srcStride == 0 || dstStride == 0 || (end <= start)) {
return;
}
#ifdef USE_NVIDIA_OPTIMIZATION
OPT_KERNEL_NVIDIA(3, computeStencilsNv, 2048, 256,
(src, dst, sizes, offsets, indices, weights, start, end));
//OPT_KERNEL_NVIDIA(4, computeStencilsNv, 2048, 256,
// (cvs, dst, sizes, offsets, indices, weights, start, end));
if (length == 4 && srcStride == length && dstStride == length) {
int gridDim = min(2048, (end-start+256-1)/256);
computeStencilsNv_v4<256><<<gridDim, 256>>>(
src, dst, sizes, offsets, indices, weights, start, end);
return;
}
#else
OPT_KERNEL(3, computeStencils, 512, 32,
(src, dst, sizes, offsets, indices, weights, start, end));
OPT_KERNEL(4, computeStencils, 512, 32,
(src, dst, sizes, offsets, indices, weights, start, end));
#endif
// generic case (slow)
computeStencils <<<512, 32>>>(
src, dst, length, srcStride, dstStride,
sizes, offsets, indices, weights, start, end);
}
// -----------------------------------------------------------------------------
void CudaEvalPatches(
const float *src, float *dst,
int length, int srcStride, int dstStride,
int numPatchCoords, const OsdPatchCoord *patchCoords,
const OsdPatchArray *patchArrayBuffer,
const int *patchIndexBuffer,
const OsdPatchParam *patchParamBuffer) {
// PERFORMANCE: not optimized at all
computePatches <<<512, 32>>>(
src, dst, NULL, NULL, NULL, NULL, NULL,
length, srcStride, dstStride, 0, 0, 0, 0, 0,
numPatchCoords, patchCoords,
patchArrayBuffer, patchIndexBuffer, patchParamBuffer);
}
void CudaEvalPatchesWithDerivatives(
const float *src, float *dst,
float *dstDu, float *dstDv,
float *dstDuu, float *dstDuv, float *dstDvv,
int length, int srcStride, int dstStride,
int dstDuStride, int dstDvStride,
int dstDuuStride, int dstDuvStride, int dstDvvStride,
int numPatchCoords, const OsdPatchCoord *patchCoords,
const OsdPatchArray *patchArrayBuffer,
const int *patchIndexBuffer,
const OsdPatchParam *patchParamBuffer) {
// PERFORMANCE: not optimized at all
computePatches <<<512, 32>>>(
src, dst, dstDu, dstDv, dstDuu, dstDuv, dstDvv,
length, srcStride, dstStride,
dstDuStride, dstDvStride, dstDuuStride, dstDuvStride, dstDvvStride,
numPatchCoords, patchCoords,
patchArrayBuffer, patchIndexBuffer, patchParamBuffer);
}
} /* extern "C" */
|
the_stack
|
// For this example, there are quite a few template parameters that are used to generate the actual code.
// In order to simplify passing many parameters, we use the same approach as the CGBN library, which is to
// create a container class with static constants and then pass the class.
// The CGBN context uses the following three parameters:
// TBP - threads per block (zero means to use the blockDim.x)
// MAX_ROTATION - must be small power of 2, imperically, 4 works well
// SHM_LIMIT - number of bytes of dynamic shared memory available to the kernel
// CONSTANT_TIME - require constant time algorithms (currently, constant time algorithms are not available)
// Locally it will also be helpful to have several parameters:
// TPI - threads per instance
// BITS - number of bits per instance
// WINDOW_BITS - number of bits to use for the windowed exponentiation
template<uint32_t tpi, uint32_t bits, uint32_t window_bits>
class powm_params_t {
public:
// parameters used by the CGBN context
static const uint32_t TPB=0; // get TPB from blockDim.x
static const uint32_t MAX_ROTATION=4; // good default value
static const uint32_t SHM_LIMIT=0; // no shared mem available
static const bool CONSTANT_TIME=false; // constant time implementations aren't available yet
// parameters used locally in the application
static const uint32_t TPI=tpi; // threads per instance
static const uint32_t BITS=bits; // instance size
static const uint32_t WINDOW_BITS=window_bits; // window size
};
template<class params>
class powm_odd_t {
public:
static const uint32_t window_bits=params::WINDOW_BITS; // used a lot, give it an instance variable
// define the instance structure
typedef struct {
cgbn_mem_t<params::BITS> x;
cgbn_mem_t<params::BITS> power;
cgbn_mem_t<params::BITS> modulus;
cgbn_mem_t<params::BITS> result;
} instance_t;
typedef cgbn_context_t<params::TPI, params> context_t;
typedef cgbn_env_t<context_t, params::BITS> env_t;
typedef typename env_t::cgbn_t bn_t;
typedef typename env_t::cgbn_local_t bn_local_t;
context_t _context;
env_t _env;
int32_t _instance;
__device__ __forceinline__ powm_odd_t(cgbn_monitor_t monitor, cgbn_error_report_t *report, int32_t instance) : _context(monitor, report, (uint32_t)instance), _env(_context), _instance(instance) {
}
__device__ __forceinline__ void fixed_window_powm_odd(bn_t &result, const bn_t &x, const bn_t &power, const bn_t &modulus) {
bn_t t;
bn_local_t window[1<<window_bits];
int32_t index, position, offset;
uint32_t np0;
// conmpute x^power mod modulus, using the fixed window algorithm
// requires: x<modulus, modulus is odd
// compute x^0 (in Montgomery space, this is just 2^BITS - modulus)
cgbn_negate(_env, t, modulus);
cgbn_store(_env, window+0, t);
// convert x into Montgomery space, store into window table
np0=cgbn_bn2mont(_env, result, x, modulus);
cgbn_store(_env, window+1, result);
cgbn_set(_env, t, result);
// compute x^2, x^3, ... x^(2^window_bits-1), store into window table
#pragma nounroll
for(index=2;index<(1<<window_bits);index++) {
cgbn_mont_mul(_env, result, result, t, modulus, np0);
cgbn_store(_env, window+index, result);
}
// find leading high bit
position=params::BITS - cgbn_clz(_env, power);
// break the exponent into chunks, each window_bits in length
// load the most significant non-zero exponent chunk
offset=position % window_bits;
if(offset==0)
position=position-window_bits;
else
position=position-offset;
index=cgbn_extract_bits_ui32(_env, power, position, window_bits);
cgbn_load(_env, result, window+index);
// process the remaining exponent chunks
while(position>0) {
// square the result window_bits times
#pragma nounroll
for(int sqr_count=0;sqr_count<window_bits;sqr_count++)
cgbn_mont_sqr(_env, result, result, modulus, np0);
// multiply by next exponent chunk
position=position-window_bits;
index=cgbn_extract_bits_ui32(_env, power, position, window_bits);
cgbn_load(_env, t, window+index);
cgbn_mont_mul(_env, result, result, t, modulus, np0);
}
// we've processed the exponent now, convert back to normal space
cgbn_mont2bn(_env, result, result, modulus, np0);
}
__device__ __forceinline__ void sliding_window_powm_odd(bn_t &result, const bn_t &x, const bn_t &power, const bn_t &modulus) {
bn_t t, starts;
int32_t index, position, leading;
uint32_t mont_inv;
bn_local_t odd_powers[1<<window_bits-1];
// conmpute x^power mod modulus, using Constant Length Non-Zero windows (CLNZ).
// requires: x<modulus, modulus is odd
// find the leading one in the power
leading=params::BITS-1-cgbn_clz(_env, power);
if(leading>=0) {
// convert x into Montgomery space, store in the odd powers table
mont_inv=cgbn_bn2mont(_env, result, x, modulus);
// compute t=x^2 mod modulus
cgbn_mont_sqr(_env, t, result, modulus, mont_inv);
// compute odd powers window table: x^1, x^3, x^5, ...
cgbn_store(_env, odd_powers, result);
#pragma nounroll
for(index=1;index<(1<<window_bits-1);index++) {
cgbn_mont_mul(_env, result, result, t, modulus, mont_inv);
cgbn_store(_env, odd_powers+index, result);
}
// starts contains an array of bits indicating the start of a window
cgbn_set_ui32(_env, starts, 0);
// organize p as a sequence of odd window indexes
position=0;
while(true) {
if(cgbn_extract_bits_ui32(_env, power, position, 1)==0)
position++;
else {
cgbn_insert_bits_ui32(_env, starts, starts, position, 1, 1);
if(position+window_bits>leading)
break;
position=position+window_bits;
}
}
// load first window. Note, since the window index must be odd, we have to
// divide it by two before indexing the window table. Instead, we just don't
// load the index LSB from power
index=cgbn_extract_bits_ui32(_env, power, position+1, window_bits-1);
cgbn_load(_env, result, odd_powers+index);
position--;
// Process remaining windows
while(position>=0) {
cgbn_mont_sqr(_env, result, result, modulus, mont_inv);
if(cgbn_extract_bits_ui32(_env, starts, position, 1)==1) {
// found a window, load the index
index=cgbn_extract_bits_ui32(_env, power, position+1, window_bits-1);
cgbn_load(_env, t, odd_powers+index);
cgbn_mont_mul(_env, result, result, t, modulus, mont_inv);
}
position--;
}
// convert result from Montgomery space
cgbn_mont2bn(_env, result, result, modulus, mont_inv);
}
else {
// p=0, thus x^p mod modulus=1
cgbn_set_ui32(_env, result, 1);
}
}
__host__ static instance_t *generate_instances(uint32_t count) {
instance_t *instances=(instance_t *)malloc(sizeof(instance_t)*count);
int index;
for(index=0;index<count;index++) {
random_words(instances[index].x._limbs, params::BITS/32);
random_words(instances[index].power._limbs, params::BITS/32);
random_words(instances[index].modulus._limbs, params::BITS/32);
// ensure modulus is odd
instances[index].modulus._limbs[0] |= 1;
// ensure modulus is greater than
if(compare_words(instances[index].x._limbs, instances[index].modulus._limbs, params::BITS/32)>0) {
swap_words(instances[index].x._limbs, instances[index].modulus._limbs, params::BITS/32);
// modulus might now be even, ensure it's odd
instances[index].modulus._limbs[0] |= 1;
}
else if(compare_words(instances[index].x._limbs, instances[index].modulus._limbs, params::BITS/32)==0) {
// since modulus is odd and modulus = x, we can just subtract 1 from x
instances[index].x._limbs[0] -= 1;
}
}
return instances;
}
__host__ static void verify_results(instance_t *instances, uint32_t count) {
mpz_t x, p, m, computed, correct;
mpz_init(x);
mpz_init(p);
mpz_init(m);
mpz_init(computed);
mpz_init(correct);
for(int index=0;index<count;index++) {
to_mpz(x, instances[index].x._limbs, params::BITS/32);
to_mpz(p, instances[index].power._limbs, params::BITS/32);
to_mpz(m, instances[index].modulus._limbs, params::BITS/32);
to_mpz(computed, instances[index].result._limbs, params::BITS/32);
mpz_powm(correct, x, p, m);
if(mpz_cmp(correct, computed)!=0) {
printf("gpu inverse kernel failed on instance %d\n", index);
return;
}
}
mpz_clear(x);
mpz_clear(p);
mpz_clear(m);
mpz_clear(computed);
mpz_clear(correct);
printf("All results match\n");
}
};
// kernel implementation using cgbn
//
// Unfortunately, the kernel must be separate from the powm_odd_t class
template<class params>
__global__ void kernel_powm_odd(cgbn_error_report_t *report, typename powm_odd_t<params>::instance_t *instances, uint32_t count) {
int32_t instance;
// decode an instance number from the blockIdx and threadIdx
instance=(blockIdx.x*blockDim.x + threadIdx.x)/params::TPI;
if(instance>=count)
return;
powm_odd_t<params> po(cgbn_report_monitor, report, instance);
typename powm_odd_t<params>::bn_t r, x, p, m;
// the loads and stores can go in the class, but it seems more natural to have them
// here and to pass in and out bignums
cgbn_load(po._env, x, &(instances[instance].x));
cgbn_load(po._env, p, &(instances[instance].power));
cgbn_load(po._env, m, &(instances[instance].modulus));
// this can be either fixed_window_powm_odd or sliding_window_powm_odd.
// when TPI<32, fixed window runs much faster because it is less divergent, so we use it here
po.fixed_window_powm_odd(r, x, p, m);
// OR
// po.sliding_window_powm_odd(r, x, p, m);
cgbn_store(po._env, &(instances[instance].result), r);
}
template<class params>
void run_test(uint32_t instance_count) {
typedef typename powm_odd_t<params>::instance_t instance_t;
instance_t *instances, *gpuInstances;
cgbn_error_report_t *report;
int32_t TPB=(params::TPB==0) ? 128 : params::TPB; // default threads per block to 128
int32_t TPI=params::TPI, IPB=TPB/TPI; // IPB is instances per block
printf("Genereating instances ...\n");
instances=powm_odd_t<params>::generate_instances(instance_count);
printf("Copying instances to the GPU ...\n");
CUDA_CHECK(cudaSetDevice(0));
CUDA_CHECK(cudaMalloc((void **)&gpuInstances, sizeof(instance_t)*instance_count));
CUDA_CHECK(cudaMemcpy(gpuInstances, instances, sizeof(instance_t)*instance_count, cudaMemcpyHostToDevice));
// create a cgbn_error_report for CGBN to report back errors
CUDA_CHECK(cgbn_error_report_alloc(&report));
printf("Running GPU kernel ...\n");
// launch kernel with blocks=ceil(instance_count/IPB) and threads=TPB
kernel_powm_odd<params><<<(instance_count+IPB-1)/IPB, TPB>>>(report, gpuInstances, instance_count);
// error report uses managed memory, so we sync the device (or stream) and check for cgbn errors
CUDA_CHECK(cudaDeviceSynchronize());
CGBN_CHECK(report);
// copy the instances back from gpuMemory
printf("Copying results back to CPU ...\n");
CUDA_CHECK(cudaMemcpy(instances, gpuInstances, sizeof(instance_t)*instance_count, cudaMemcpyDeviceToHost));
printf("Verifying the results ...\n");
powm_odd_t<params>::verify_results(instances, instance_count);
// clean up
free(instances);
CUDA_CHECK(cudaFree(gpuInstances));
CUDA_CHECK(cgbn_error_report_free(report));
}
int main() {
typedef powm_params_t<8, 1024, 5> params;
run_test<params>(10000);
}
|
the_stack
|
__device__ void kernel_ecc( const fp timeinst,
const fp* d_initvalu,
fp *d_finavalu,
const int valu_offset,
const fp* d_params){
//=====================================================================
// VARIABLES
//=====================================================================
// input parameters
fp cycleLength;
// variable references // GET VARIABLES FROM MEMORY AND SAVE LOCALLY !!!!!!!!!!!!!!!!!!
int offset_1;
int offset_2;
int offset_3;
int offset_4;
int offset_5;
int offset_6;
int offset_7;
int offset_8;
int offset_9;
int offset_10;
int offset_11;
int offset_12;
int offset_13;
int offset_14;
int offset_15;
int offset_16;
int offset_17;
int offset_18;
int offset_19;
int offset_20;
int offset_21;
int offset_22;
int offset_23;
int offset_24;
int offset_25;
int offset_26;
int offset_27;
int offset_28;
int offset_29;
int offset_30;
int offset_31;
int offset_32;
int offset_33;
int offset_34;
int offset_35;
int offset_36;
int offset_37;
int offset_38;
int offset_39;
int offset_40;
int offset_41;
int offset_42;
int offset_43;
int offset_44;
int offset_45;
int offset_46;
// stored input array
fp d_initvalu_1;
fp d_initvalu_2;
fp d_initvalu_3;
fp d_initvalu_4;
fp d_initvalu_5;
fp d_initvalu_6;
fp d_initvalu_7;
fp d_initvalu_8;
fp d_initvalu_9;
fp d_initvalu_10;
fp d_initvalu_11;
fp d_initvalu_12;
fp d_initvalu_13;
fp d_initvalu_14;
fp d_initvalu_15;
fp d_initvalu_16;
fp d_initvalu_17;
fp d_initvalu_18;
fp d_initvalu_19;
fp d_initvalu_20;
fp d_initvalu_21;
// fp d_initvalu_22;
fp d_initvalu_23;
fp d_initvalu_24;
fp d_initvalu_25;
fp d_initvalu_26;
fp d_initvalu_27;
fp d_initvalu_28;
fp d_initvalu_29;
fp d_initvalu_30;
fp d_initvalu_31;
fp d_initvalu_32;
fp d_initvalu_33;
fp d_initvalu_34;
fp d_initvalu_35;
fp d_initvalu_36;
fp d_initvalu_37;
fp d_initvalu_38;
fp d_initvalu_39;
fp d_initvalu_40;
// fp d_initvalu_41;
// fp d_initvalu_42;
// fp d_initvalu_43;
// fp d_initvalu_44;
// fp d_initvalu_45;
// fp d_initvalu_46;
// matlab constants undefined in c
fp pi;
// Constants
fp R; // [J/kmol*K]
fp Frdy; // [C/mol]
fp Temp; // [K] 310
fp FoRT; //
fp Cmem; // [F] membrane capacitance
fp Qpow;
// Cell geometry
fp cellLength; // cell length [um]
fp cellRadius; // cell radius [um]
// fp junctionLength; // junc length [um]
// fp junctionRadius; // junc radius [um]
// fp distSLcyto; // dist. SL to cytosol [um]
// fp distJuncSL; // dist. junc to SL [um]
// fp DcaJuncSL; // Dca junc to SL [cm^2/sec]
// fp DcaSLcyto; // Dca SL to cyto [cm^2/sec]
// fp DnaJuncSL; // Dna junc to SL [cm^2/sec]
// fp DnaSLcyto; // Dna SL to cyto [cm^2/sec]
fp Vcell; // [L]
fp Vmyo;
fp Vsr;
fp Vsl;
fp Vjunc;
// fp SAjunc; // [um^2]
// fp SAsl; // [um^2]
fp J_ca_juncsl; // [L/msec]
fp J_ca_slmyo; // [L/msec]
fp J_na_juncsl; // [L/msec]
fp J_na_slmyo; // [L/msec]
// Fractional currents in compartments
fp Fjunc;
fp Fsl;
fp Fjunc_CaL;
fp Fsl_CaL;
// Fixed ion concentrations
fp Cli; // Intracellular Cl [mM]
fp Clo; // Extracellular Cl [mM]
fp Ko; // Extracellular K [mM]
fp Nao; // Extracellular Na [mM]
fp Cao; // Extracellular Ca [mM]
fp Mgi; // Intracellular Mg [mM]
// Nernst Potentials
fp ena_junc; // [mV]
fp ena_sl; // [mV]
fp ek; // [mV]
fp eca_junc; // [mV]
fp eca_sl; // [mV]
fp ecl; // [mV]
// Na transport parameters
fp GNa; // [mS/uF]
fp GNaB; // [mS/uF]
fp IbarNaK; // [uA/uF]
fp KmNaip; // [mM]
fp KmKo; // [mM]
// fp Q10NaK;
// fp Q10KmNai;
// K current parameters
fp pNaK;
fp GtoSlow; // [mS/uF]
fp GtoFast; // [mS/uF]
fp gkp;
// Cl current parameters
fp GClCa; // [mS/uF]
fp GClB; // [mS/uF]
fp KdClCa; // [mM] // [mM]
// I_Ca parameters
fp pNa; // [cm/sec]
fp pCa; // [cm/sec]
fp pK; // [cm/sec]
// fp KmCa; // [mM]
fp Q10CaL;
// Ca transport parameters
fp IbarNCX; // [uA/uF]
fp KmCai; // [mM]
fp KmCao; // [mM]
fp KmNai; // [mM]
fp KmNao; // [mM]
fp ksat; // [none]
fp nu; // [none]
fp Kdact; // [mM]
fp Q10NCX; // [none]
fp IbarSLCaP; // [uA/uF]
fp KmPCa; // [mM]
fp GCaB; // [uA/uF]
fp Q10SLCaP; // [none] // [none]
// SR flux parameters
fp Q10SRCaP; // [none]
fp Vmax_SRCaP; // [mM/msec] (mmol/L cytosol/msec)
fp Kmf; // [mM]
fp Kmr; // [mM]L cytosol
fp hillSRCaP; // [mM]
fp ks; // [1/ms]
fp koCa; // [mM^-2 1/ms]
fp kom; // [1/ms]
fp kiCa; // [1/mM/ms]
fp kim; // [1/ms]
fp ec50SR; // [mM]
// Buffering parameters
fp Bmax_Naj; // [mM]
fp Bmax_Nasl; // [mM]
fp koff_na; // [1/ms]
fp kon_na; // [1/mM/ms]
fp Bmax_TnClow; // [mM], TnC low affinity
fp koff_tncl; // [1/ms]
fp kon_tncl; // [1/mM/ms]
fp Bmax_TnChigh; // [mM], TnC high affinity
fp koff_tnchca; // [1/ms]
fp kon_tnchca; // [1/mM/ms]
fp koff_tnchmg; // [1/ms]
fp kon_tnchmg; // [1/mM/ms]
// fp Bmax_CaM; // [mM], CaM buffering
// fp koff_cam; // [1/ms]
// fp kon_cam; // [1/mM/ms]
fp Bmax_myosin; // [mM], Myosin buffering
fp koff_myoca; // [1/ms]
fp kon_myoca; // [1/mM/ms]
fp koff_myomg; // [1/ms]
fp kon_myomg; // [1/mM/ms]
fp Bmax_SR; // [mM]
fp koff_sr; // [1/ms]
fp kon_sr; // [1/mM/ms]
fp Bmax_SLlowsl; // [mM], SL buffering
fp Bmax_SLlowj; // [mM]
fp koff_sll; // [1/ms]
fp kon_sll; // [1/mM/ms]
fp Bmax_SLhighsl; // [mM]
fp Bmax_SLhighj; // [mM]
fp koff_slh; // [1/ms]
fp kon_slh; // [1/mM/ms]
fp Bmax_Csqn; // 140e-3*Vmyo/Vsr; [mM]
fp koff_csqn; // [1/ms]
fp kon_csqn; // [1/mM/ms]
// I_Na: Fast Na Current
fp am;
fp bm;
fp ah;
fp bh;
fp aj;
fp bj;
fp I_Na_junc;
fp I_Na_sl;
// fp I_Na;
// I_nabk: Na Background Current
fp I_nabk_junc;
fp I_nabk_sl;
// fp I_nabk;
// I_nak: Na/K Pump Current
fp sigma;
fp fnak;
fp I_nak_junc;
fp I_nak_sl;
fp I_nak;
// I_kr: Rapidly Activating K Current
fp gkr;
fp xrss;
fp tauxr;
fp rkr;
fp I_kr;
// I_ks: Slowly Activating K Current
fp pcaks_junc;
fp pcaks_sl;
fp gks_junc;
fp gks_sl;
fp eks;
fp xsss;
fp tauxs;
fp I_ks_junc;
fp I_ks_sl;
fp I_ks;
// I_kp: Plateau K current
fp kp_kp;
fp I_kp_junc;
fp I_kp_sl;
fp I_kp;
// I_to: Transient Outward K Current (slow and fast components)
fp xtoss;
fp ytoss;
fp rtoss;
fp tauxtos;
fp tauytos;
fp taurtos;
fp I_tos;
//
fp tauxtof;
fp tauytof;
fp I_tof;
fp I_to;
// I_ki: Time-Independent K Current
fp aki;
fp bki;
fp kiss;
fp I_ki;
// I_ClCa: Ca-activated Cl Current, I_Clbk: background Cl Current
fp I_ClCa_junc;
fp I_ClCa_sl;
fp I_ClCa;
fp I_Clbk;
// I_Ca: L-type Calcium Current
fp dss;
fp taud;
fp fss;
fp tauf;
//
fp ibarca_j;
fp ibarca_sl;
fp ibark;
fp ibarna_j;
fp ibarna_sl;
fp I_Ca_junc;
fp I_Ca_sl;
fp I_Ca;
fp I_CaK;
fp I_CaNa_junc;
fp I_CaNa_sl;
// fp I_CaNa;
// fp I_Catot;
// I_ncx: Na/Ca Exchanger flux
fp Ka_junc;
fp Ka_sl;
fp s1_junc;
fp s1_sl;
fp s2_junc;
fp s3_junc;
fp s2_sl;
fp s3_sl;
fp I_ncx_junc;
fp I_ncx_sl;
fp I_ncx;
// I_pca: Sarcolemmal Ca Pump Current
fp I_pca_junc;
fp I_pca_sl;
fp I_pca;
// I_cabk: Ca Background Current
fp I_cabk_junc;
fp I_cabk_sl;
fp I_cabk;
// SR fluxes: Calcium Release, SR Ca pump, SR Ca leak
fp MaxSR;
fp MinSR;
fp kCaSR;
fp koSRCa;
fp kiSRCa;
fp RI;
fp J_SRCarel; // [mM/ms]
fp J_serca;
fp J_SRleak; // [mM/ms]
// Cytosolic Ca Buffers
fp J_CaB_cytosol;
// Junctional and SL Ca Buffers
fp J_CaB_junction;
fp J_CaB_sl;
// SR Ca Concentrations
fp oneovervsr;
// Sodium Concentrations
fp I_Na_tot_junc; // [uA/uF]
fp I_Na_tot_sl; // [uA/uF]
fp oneovervsl;
// Potassium Concentration
fp I_K_tot;
// Calcium Concentrations
fp I_Ca_tot_junc; // [uA/uF]
fp I_Ca_tot_sl; // [uA/uF]
// fp junc_sl;
// fp sl_junc;
// fp sl_myo;
// fp myo_sl;
// Simulation type
int state; // 0-none; 1-pace; 2-vclamp
fp I_app;
fp V_hold;
fp V_test;
fp V_clamp;
fp R_clamp;
// Membrane Potential
fp I_Na_tot; // [uA/uF]
fp I_Cl_tot; // [uA/uF]
fp I_Ca_tot;
fp I_tot;
//=====================================================================
// EXECUTION
//=====================================================================
// input parameters
cycleLength = d_params[15];
// variable references
offset_1 = valu_offset;
offset_2 = valu_offset+1;
offset_3 = valu_offset+2;
offset_4 = valu_offset+3;
offset_5 = valu_offset+4;
offset_6 = valu_offset+5;
offset_7 = valu_offset+6;
offset_8 = valu_offset+7;
offset_9 = valu_offset+8;
offset_10 = valu_offset+9;
offset_11 = valu_offset+10;
offset_12 = valu_offset+11;
offset_13 = valu_offset+12;
offset_14 = valu_offset+13;
offset_15 = valu_offset+14;
offset_16 = valu_offset+15;
offset_17 = valu_offset+16;
offset_18 = valu_offset+17;
offset_19 = valu_offset+18;
offset_20 = valu_offset+19;
offset_21 = valu_offset+20;
offset_22 = valu_offset+21;
offset_23 = valu_offset+22;
offset_24 = valu_offset+23;
offset_25 = valu_offset+24;
offset_26 = valu_offset+25;
offset_27 = valu_offset+26;
offset_28 = valu_offset+27;
offset_29 = valu_offset+28;
offset_30 = valu_offset+29;
offset_31 = valu_offset+30;
offset_32 = valu_offset+31;
offset_33 = valu_offset+32;
offset_34 = valu_offset+33;
offset_35 = valu_offset+34;
offset_36 = valu_offset+35;
offset_37 = valu_offset+36;
offset_38 = valu_offset+37;
offset_39 = valu_offset+38;
offset_40 = valu_offset+39;
offset_41 = valu_offset+40;
offset_42 = valu_offset+41;
offset_43 = valu_offset+42;
offset_44 = valu_offset+43;
offset_45 = valu_offset+44;
offset_46 = valu_offset+45;
// stored input array
d_initvalu_1 = d_initvalu[offset_1];
d_initvalu_2 = d_initvalu[offset_2];
d_initvalu_3 = d_initvalu[offset_3];
d_initvalu_4 = d_initvalu[offset_4];
d_initvalu_5 = d_initvalu[offset_5];
d_initvalu_6 = d_initvalu[offset_6];
d_initvalu_7 = d_initvalu[offset_7];
d_initvalu_8 = d_initvalu[offset_8];
d_initvalu_9 = d_initvalu[offset_9];
d_initvalu_10 = d_initvalu[offset_10];
d_initvalu_11 = d_initvalu[offset_11];
d_initvalu_12 = d_initvalu[offset_12];
d_initvalu_13 = d_initvalu[offset_13];
d_initvalu_14 = d_initvalu[offset_14];
d_initvalu_15 = d_initvalu[offset_15];
d_initvalu_16 = d_initvalu[offset_16];
d_initvalu_17 = d_initvalu[offset_17];
d_initvalu_18 = d_initvalu[offset_18];
d_initvalu_19 = d_initvalu[offset_19];
d_initvalu_20 = d_initvalu[offset_20];
d_initvalu_21 = d_initvalu[offset_21];
// d_initvalu_22 = d_initvalu[offset_22];
d_initvalu_23 = d_initvalu[offset_23];
d_initvalu_24 = d_initvalu[offset_24];
d_initvalu_25 = d_initvalu[offset_25];
d_initvalu_26 = d_initvalu[offset_26];
d_initvalu_27 = d_initvalu[offset_27];
d_initvalu_28 = d_initvalu[offset_28];
d_initvalu_29 = d_initvalu[offset_29];
d_initvalu_30 = d_initvalu[offset_30];
d_initvalu_31 = d_initvalu[offset_31];
d_initvalu_32 = d_initvalu[offset_32];
d_initvalu_33 = d_initvalu[offset_33];
d_initvalu_34 = d_initvalu[offset_34];
d_initvalu_35 = d_initvalu[offset_35];
d_initvalu_36 = d_initvalu[offset_36];
d_initvalu_37 = d_initvalu[offset_37];
d_initvalu_38 = d_initvalu[offset_38];
d_initvalu_39 = d_initvalu[offset_39];
d_initvalu_40 = d_initvalu[offset_40];
// d_initvalu_41 = d_initvalu[offset_41];
// d_initvalu_42 = d_initvalu[offset_42];
// d_initvalu_43 = d_initvalu[offset_43];
// d_initvalu_44 = d_initvalu[offset_44];
// d_initvalu_45 = d_initvalu[offset_45];
// d_initvalu_46 = d_initvalu[offset_46];
// matlab constants undefined in c
pi = 3.1416;
// Constants
R = 8314; // [J/kmol*K]
Frdy = 96485; // [C/mol]
Temp = 310; // [K] 310
FoRT = Frdy/R/Temp; //
Cmem = 1.3810e-10; // [F] membrane capacitance
Qpow = (Temp-310)/10;
// Cell geometry
cellLength = 100; // cell length [um]
cellRadius = 10.25; // cell radius [um]
// junctionLength = 160e-3; // junc length [um]
// junctionRadius = 15e-3; // junc radius [um]
// distSLcyto = 0.45; // dist. SL to cytosol [um]
// distJuncSL = 0.5; // dist. junc to SL [um]
// DcaJuncSL = 1.64e-6; // Dca junc to SL [cm^2/sec]
// DcaSLcyto = 1.22e-6; // Dca SL to cyto [cm^2/sec]
// DnaJuncSL = 1.09e-5; // Dna junc to SL [cm^2/sec]
// DnaSLcyto = 1.79e-5; // Dna SL to cyto [cm^2/sec]
Vcell = pi*pow(cellRadius,2)*cellLength*1e-15; // [L]
Vmyo = 0.65*Vcell;
Vsr = 0.035*Vcell;
Vsl = 0.02*Vcell;
Vjunc = 0.0539*0.01*Vcell;
// SAjunc = 20150*pi*2*junctionLength*junctionRadius; // [um^2]
// SAsl = pi*2*cellRadius*cellLength; // [um^2]
J_ca_juncsl = 1/1.2134e12; // [L/msec]
J_ca_slmyo = 1/2.68510e11; // [L/msec]
J_na_juncsl = 1/(1.6382e12/3*100); // [L/msec]
J_na_slmyo = 1/(1.8308e10/3*100); // [L/msec]
// Fractional currents in compartments
Fjunc = 0.11;
Fsl = 1-Fjunc;
Fjunc_CaL = 0.9;
Fsl_CaL = 1-Fjunc_CaL;
// Fixed ion concentrations
Cli = 15; // Intracellular Cl [mM]
Clo = 150; // Extracellular Cl [mM]
Ko = 5.4; // Extracellular K [mM]
Nao = 140; // Extracellular Na [mM]
Cao = 1.8; // Extracellular Ca [mM]
Mgi = 1; // Intracellular Mg [mM]
// Nernst Potentials
ena_junc = (1/FoRT)*log(Nao/d_initvalu_32); // [mV]
ena_sl = (1/FoRT)*log(Nao/d_initvalu_33); // [mV]
ek = (1/FoRT)*log(Ko/d_initvalu_35); // [mV]
eca_junc = (1/FoRT/2)*log(Cao/d_initvalu_36); // [mV]
eca_sl = (1/FoRT/2)*log(Cao/d_initvalu_37); // [mV]
ecl = (1/FoRT)*log(Cli/Clo); // [mV]
// Na transport parameters
GNa = 16.0; // [mS/uF]
GNaB = 0.297e-3; // [mS/uF]
IbarNaK = 1.90719; // [uA/uF]
KmNaip = 11; // [mM]
KmKo = 1.5; // [mM]
// Q10NaK = 1.63;
// Q10KmNai = 1.39;
// K current parameters
pNaK = 0.01833;
GtoSlow = 0.06; // [mS/uF]
GtoFast = 0.02; // [mS/uF]
gkp = 0.001;
// Cl current parameters
GClCa = 0.109625; // [mS/uF]
GClB = 9e-3; // [mS/uF]
KdClCa = 100e-3; // [mM]
// I_Ca parameters
pNa = 1.5e-8; // [cm/sec]
pCa = 5.4e-4; // [cm/sec]
pK = 2.7e-7; // [cm/sec]
// KmCa = 0.6e-3; // [mM]
Q10CaL = 1.8;
// Ca transport parameters
IbarNCX = 9.0; // [uA/uF]
KmCai = 3.59e-3; // [mM]
KmCao = 1.3; // [mM]
KmNai = 12.29; // [mM]
KmNao = 87.5; // [mM]
ksat = 0.27; // [none]
nu = 0.35; // [none]
Kdact = 0.256e-3; // [mM]
Q10NCX = 1.57; // [none]
IbarSLCaP = 0.0673; // [uA/uF]
KmPCa = 0.5e-3; // [mM]
GCaB = 2.513e-4; // [uA/uF]
Q10SLCaP = 2.35; // [none]
// SR flux parameters
Q10SRCaP = 2.6; // [none]
Vmax_SRCaP = 2.86e-4; // [mM/msec] (mmol/L cytosol/msec)
Kmf = 0.246e-3; // [mM]
Kmr = 1.7; // [mM]L cytosol
hillSRCaP = 1.787; // [mM]
ks = 25; // [1/ms]
koCa = 10; // [mM^-2 1/ms]
kom = 0.06; // [1/ms]
kiCa = 0.5; // [1/mM/ms]
kim = 0.005; // [1/ms]
ec50SR = 0.45; // [mM]
// Buffering parameters
Bmax_Naj = 7.561; // [mM]
Bmax_Nasl = 1.65; // [mM]
koff_na = 1e-3; // [1/ms]
kon_na = 0.1e-3; // [1/mM/ms]
Bmax_TnClow = 70e-3; // [mM], TnC low affinity
koff_tncl = 19.6e-3; // [1/ms]
kon_tncl = 32.7; // [1/mM/ms]
Bmax_TnChigh = 140e-3; // [mM], TnC high affinity
koff_tnchca = 0.032e-3; // [1/ms]
kon_tnchca = 2.37; // [1/mM/ms]
koff_tnchmg = 3.33e-3; // [1/ms]
kon_tnchmg = 3e-3; // [1/mM/ms]
// Bmax_CaM = 24e-3; // [mM], CaM buffering
// koff_cam = 238e-3; // [1/ms]
// kon_cam = 34; // [1/mM/ms]
Bmax_myosin = 140e-3; // [mM], Myosin buffering
koff_myoca = 0.46e-3; // [1/ms]
kon_myoca = 13.8; // [1/mM/ms]
koff_myomg = 0.057e-3; // [1/ms]
kon_myomg = 0.0157; // [1/mM/ms]
Bmax_SR = 19*0.9e-3; // [mM]
koff_sr = 60e-3; // [1/ms]
kon_sr = 100; // [1/mM/ms]
Bmax_SLlowsl = 37.38e-3*Vmyo/Vsl; // [mM], SL buffering
Bmax_SLlowj = 4.62e-3*Vmyo/Vjunc*0.1; // [mM]
koff_sll = 1300e-3; // [1/ms]
kon_sll = 100; // [1/mM/ms]
Bmax_SLhighsl = 13.35e-3*Vmyo/Vsl; // [mM]
Bmax_SLhighj = 1.65e-3*Vmyo/Vjunc*0.1; // [mM]
koff_slh = 30e-3; // [1/ms]
kon_slh = 100; // [1/mM/ms]
Bmax_Csqn = 2.7; // 140e-3*Vmyo/Vsr; [mM]
koff_csqn = 65; // [1/ms]
kon_csqn = 100; // [1/mM/ms]
// I_Na: Fast Na Current
am = 0.32*(d_initvalu_39+47.13)/(1-exp(-0.1*(d_initvalu_39+47.13)));
bm = 0.08*exp(-d_initvalu_39/11);
if(d_initvalu_39 >= -40){
ah = 0; aj = 0;
bh = 1/(0.13*(1+exp(-(d_initvalu_39+10.66)/11.1)));
bj = 0.3*exp(-2.535e-7*d_initvalu_39)/(1+exp(-0.1*(d_initvalu_39+32)));
}
else{
ah = 0.135*exp((80+d_initvalu_39)/-6.8);
bh = 3.56*exp(0.079*d_initvalu_39)+3.1e5*exp(0.35*d_initvalu_39);
aj = (-127140*exp(0.2444*d_initvalu_39)-3.474e-5*exp(-0.04391*d_initvalu_39))*(d_initvalu_39+37.78)/(1+exp(0.311*(d_initvalu_39+79.23)));
bj = 0.1212*exp(-0.01052*d_initvalu_39)/(1+exp(-0.1378*(d_initvalu_39+40.14)));
}
d_finavalu[offset_1] = am*(1-d_initvalu_1)-bm*d_initvalu_1;
d_finavalu[offset_2] = ah*(1-d_initvalu_2)-bh*d_initvalu_2;
d_finavalu[offset_3] = aj*(1-d_initvalu_3)-bj*d_initvalu_3;
I_Na_junc = Fjunc*GNa*pow(d_initvalu_1,3)*d_initvalu_2*d_initvalu_3*(d_initvalu_39-ena_junc);
I_Na_sl = Fsl*GNa*pow(d_initvalu_1,3)*d_initvalu_2*d_initvalu_3*(d_initvalu_39-ena_sl);
// I_Na = I_Na_junc+I_Na_sl;
// I_nabk: Na Background Current
I_nabk_junc = Fjunc*GNaB*(d_initvalu_39-ena_junc);
I_nabk_sl = Fsl*GNaB*(d_initvalu_39-ena_sl);
// I_nabk = I_nabk_junc+I_nabk_sl;
// I_nak: Na/K Pump Current
sigma = (exp(Nao/67.3)-1)/7;
fnak = 1/(1+0.1245*exp(-0.1*d_initvalu_39*FoRT)+0.0365*sigma*exp(-d_initvalu_39*FoRT));
I_nak_junc = Fjunc*IbarNaK*fnak*Ko /(1+pow((KmNaip/d_initvalu_32),4)) /(Ko+KmKo);
I_nak_sl = Fsl*IbarNaK*fnak*Ko /(1+pow((KmNaip/d_initvalu_33),4)) /(Ko+KmKo);
I_nak = I_nak_junc+I_nak_sl;
// I_kr: Rapidly Activating K Current
gkr = 0.03*sqrt(Ko/5.4);
xrss = 1/(1+exp(-(d_initvalu_39+50)/7.5));
tauxr = 1/(0.00138*(d_initvalu_39+7)/(1-exp(-0.123*(d_initvalu_39+7)))+6.1e-4*(d_initvalu_39+10)/(exp(0.145*(d_initvalu_39+10))-1));
d_finavalu[offset_12] = (xrss-d_initvalu_12)/tauxr;
rkr = 1/(1+exp((d_initvalu_39+33)/22.4));
I_kr = gkr*d_initvalu_12*rkr*(d_initvalu_39-ek);
// I_ks: Slowly Activating K Current
pcaks_junc = -log10(d_initvalu_36)+3.0;
pcaks_sl = -log10(d_initvalu_37)+3.0;
gks_junc = 0.07*(0.057 +0.19/(1+ exp((-7.2+pcaks_junc)/0.6)));
gks_sl = 0.07*(0.057 +0.19/(1+ exp((-7.2+pcaks_sl)/0.6)));
eks = (1/FoRT)*log((Ko+pNaK*Nao)/(d_initvalu_35+pNaK*d_initvalu_34));
xsss = 1/(1+exp(-(d_initvalu_39-1.5)/16.7));
tauxs = 1/(7.19e-5*(d_initvalu_39+30)/(1-exp(-0.148*(d_initvalu_39+30)))+1.31e-4*(d_initvalu_39+30)/(exp(0.0687*(d_initvalu_39+30))-1));
d_finavalu[offset_13] = (xsss-d_initvalu_13)/tauxs;
I_ks_junc = Fjunc*gks_junc*pow(d_initvalu_12,2)*(d_initvalu_39-eks);
I_ks_sl = Fsl*gks_sl*pow(d_initvalu_13,2)*(d_initvalu_39-eks);
I_ks = I_ks_junc+I_ks_sl;
// I_kp: Plateau K current
kp_kp = 1/(1+exp(7.488-d_initvalu_39/5.98));
I_kp_junc = Fjunc*gkp*kp_kp*(d_initvalu_39-ek);
I_kp_sl = Fsl*gkp*kp_kp*(d_initvalu_39-ek);
I_kp = I_kp_junc+I_kp_sl;
// I_to: Transient Outward K Current (slow and fast components)
xtoss = 1/(1+exp(-(d_initvalu_39+3.0)/15));
ytoss = 1/(1+exp((d_initvalu_39+33.5)/10));
rtoss = 1/(1+exp((d_initvalu_39+33.5)/10));
tauxtos = 9/(1+exp((d_initvalu_39+3.0)/15))+0.5;
tauytos = 3e3/(1+exp((d_initvalu_39+60.0)/10))+30;
taurtos = 2800/(1+exp((d_initvalu_39+60.0)/10))+220;
d_finavalu[offset_8] = (xtoss-d_initvalu_8)/tauxtos;
d_finavalu[offset_9] = (ytoss-d_initvalu_9)/tauytos;
d_finavalu[offset_40]= (rtoss-d_initvalu_40)/taurtos;
I_tos = GtoSlow*d_initvalu_8*(d_initvalu_9+0.5*d_initvalu_40)*(d_initvalu_39-ek); // [uA/uF]
//
tauxtof = 3.5*exp(-d_initvalu_39*d_initvalu_39/30/30)+1.5;
tauytof = 20.0/(1+exp((d_initvalu_39+33.5)/10))+20.0;
d_finavalu[offset_10] = (xtoss-d_initvalu_10)/tauxtof;
d_finavalu[offset_11] = (ytoss-d_initvalu_11)/tauytof;
I_tof = GtoFast*d_initvalu_10*d_initvalu_11*(d_initvalu_39-ek);
I_to = I_tos + I_tof;
// I_ki: Time-Independent K Current
aki = 1.02/(1+exp(0.2385*(d_initvalu_39-ek-59.215)));
bki =(0.49124*exp(0.08032*(d_initvalu_39+5.476-ek)) + exp(0.06175*(d_initvalu_39-ek-594.31))) /(1 + exp(-0.5143*(d_initvalu_39-ek+4.753)));
kiss = aki/(aki+bki);
I_ki = 0.9*sqrt(Ko/5.4)*kiss*(d_initvalu_39-ek);
// I_ClCa: Ca-activated Cl Current, I_Clbk: background Cl Current
I_ClCa_junc = Fjunc*GClCa/(1+KdClCa/d_initvalu_36)*(d_initvalu_39-ecl);
I_ClCa_sl = Fsl*GClCa/(1+KdClCa/d_initvalu_37)*(d_initvalu_39-ecl);
I_ClCa = I_ClCa_junc+I_ClCa_sl;
I_Clbk = GClB*(d_initvalu_39-ecl);
// I_Ca: L-type Calcium Current
dss = 1/(1+exp(-(d_initvalu_39+14.5)/6.0));
taud = dss*(1-exp(-(d_initvalu_39+14.5)/6.0))/(0.035*(d_initvalu_39+14.5));
fss = 1/(1+exp((d_initvalu_39+35.06)/3.6))+0.6/(1+exp((50-d_initvalu_39)/20));
tauf = 1/(0.0197*exp(-pow(0.0337*(d_initvalu_39+14.5),2))+0.02);
d_finavalu[offset_4] = (dss-d_initvalu_4)/taud;
d_finavalu[offset_5] = (fss-d_initvalu_5)/tauf;
d_finavalu[offset_6] = 1.7*d_initvalu_36*(1-d_initvalu_6)-11.9e-3*d_initvalu_6; // fCa_junc
d_finavalu[offset_7] = 1.7*d_initvalu_37*(1-d_initvalu_7)-11.9e-3*d_initvalu_7; // fCa_sl
//
ibarca_j = pCa*4*(d_initvalu_39*Frdy*FoRT) * (0.341*d_initvalu_36*exp(2*d_initvalu_39*FoRT)-0.341*Cao) /(exp(2*d_initvalu_39*FoRT)-1);
ibarca_sl = pCa*4*(d_initvalu_39*Frdy*FoRT) * (0.341*d_initvalu_37*exp(2*d_initvalu_39*FoRT)-0.341*Cao) /(exp(2*d_initvalu_39*FoRT)-1);
ibark = pK*(d_initvalu_39*Frdy*FoRT)*(0.75*d_initvalu_35*exp(d_initvalu_39*FoRT)-0.75*Ko) /(exp(d_initvalu_39*FoRT)-1);
ibarna_j = pNa*(d_initvalu_39*Frdy*FoRT) *(0.75*d_initvalu_32*exp(d_initvalu_39*FoRT)-0.75*Nao) /(exp(d_initvalu_39*FoRT)-1);
ibarna_sl = pNa*(d_initvalu_39*Frdy*FoRT) *(0.75*d_initvalu_33*exp(d_initvalu_39*FoRT)-0.75*Nao) /(exp(d_initvalu_39*FoRT)-1);
I_Ca_junc = (Fjunc_CaL*ibarca_j*d_initvalu_4*d_initvalu_5*(1-d_initvalu_6)*pow(Q10CaL,Qpow))*0.45;
I_Ca_sl = (Fsl_CaL*ibarca_sl*d_initvalu_4*d_initvalu_5*(1-d_initvalu_7)*pow(Q10CaL,Qpow))*0.45;
I_Ca = I_Ca_junc+I_Ca_sl;
d_finavalu[offset_43]=-I_Ca*Cmem/(Vmyo*2*Frdy)*1e3;
I_CaK = (ibark*d_initvalu_4*d_initvalu_5*(Fjunc_CaL*(1-d_initvalu_6)+Fsl_CaL*(1-d_initvalu_7))*pow(Q10CaL,Qpow))*0.45;
I_CaNa_junc = (Fjunc_CaL*ibarna_j*d_initvalu_4*d_initvalu_5*(1-d_initvalu_6)*pow(Q10CaL,Qpow))*0.45;
I_CaNa_sl = (Fsl_CaL*ibarna_sl*d_initvalu_4*d_initvalu_5*(1-d_initvalu_7)*pow(Q10CaL,Qpow))*0.45;
// I_CaNa = I_CaNa_junc+I_CaNa_sl;
// I_Catot = I_Ca+I_CaK+I_CaNa;
// I_ncx: Na/Ca Exchanger flux
Ka_junc = 1/(1+pow((Kdact/d_initvalu_36),3));
Ka_sl = 1/(1+pow((Kdact/d_initvalu_37),3));
s1_junc = exp(nu*d_initvalu_39*FoRT)*pow(d_initvalu_32,3)*Cao;
s1_sl = exp(nu*d_initvalu_39*FoRT)*pow(d_initvalu_33,3)*Cao;
s2_junc = exp((nu-1)*d_initvalu_39*FoRT)*pow(Nao,3)*d_initvalu_36;
s3_junc = (KmCai*pow(Nao,3)*(1+pow((d_initvalu_32/KmNai),3))+pow(KmNao,3)*d_initvalu_36+ pow(KmNai,3)*Cao*(1+d_initvalu_36/KmCai)+KmCao*pow(d_initvalu_32,3)+pow(d_initvalu_32,3)*Cao+pow(Nao,3)*d_initvalu_36)*(1+ksat*exp((nu-1)*d_initvalu_39*FoRT));
s2_sl = exp((nu-1)*d_initvalu_39*FoRT)*pow(Nao,3)*d_initvalu_37;
s3_sl = (KmCai*pow(Nao,3)*(1+pow((d_initvalu_33/KmNai),3)) + pow(KmNao,3)*d_initvalu_37+pow(KmNai,3)*Cao*(1+d_initvalu_37/KmCai)+KmCao*pow(d_initvalu_33,3)+pow(d_initvalu_33,3)*Cao+pow(Nao,3)*d_initvalu_37)*(1+ksat*exp((nu-1)*d_initvalu_39*FoRT));
I_ncx_junc = Fjunc*IbarNCX*pow(Q10NCX,Qpow)*Ka_junc*(s1_junc-s2_junc)/s3_junc;
I_ncx_sl = Fsl*IbarNCX*pow(Q10NCX,Qpow)*Ka_sl*(s1_sl-s2_sl)/s3_sl;
I_ncx = I_ncx_junc+I_ncx_sl;
d_finavalu[offset_45]=2*I_ncx*Cmem/(Vmyo*2*Frdy)*1e3;
// I_pca: Sarcolemmal Ca Pump Current
I_pca_junc = Fjunc*pow(Q10SLCaP,Qpow)*IbarSLCaP*pow(d_initvalu_36,fp(1.6))/(pow(KmPCa,fp(1.6))+pow(d_initvalu_36,fp(1.6)));
I_pca_sl = Fsl*pow(Q10SLCaP,Qpow)*IbarSLCaP*pow(d_initvalu_37,fp(1.6))/(pow(KmPCa,fp(1.6))+pow(d_initvalu_37,fp(1.6)));
I_pca = I_pca_junc+I_pca_sl;
d_finavalu[offset_44]=-I_pca*Cmem/(Vmyo*2*Frdy)*1e3;
// I_cabk: Ca Background Current
I_cabk_junc = Fjunc*GCaB*(d_initvalu_39-eca_junc);
I_cabk_sl = Fsl*GCaB*(d_initvalu_39-eca_sl);
I_cabk = I_cabk_junc+I_cabk_sl;
d_finavalu[offset_46]=-I_cabk*Cmem/(Vmyo*2*Frdy)*1e3;
// SR fluxes: Calcium Release, SR Ca pump, SR Ca leak
MaxSR = 15;
MinSR = 1;
kCaSR = MaxSR - (MaxSR-MinSR)/(1+pow(ec50SR/d_initvalu_31,fp(2.5)));
koSRCa = koCa/kCaSR;
kiSRCa = kiCa*kCaSR;
RI = 1-d_initvalu_14-d_initvalu_15-d_initvalu_16;
d_finavalu[offset_14] = (kim*RI-kiSRCa*d_initvalu_36*d_initvalu_14)-(koSRCa*pow(d_initvalu_36,2)*d_initvalu_14-kom*d_initvalu_15); // R
d_finavalu[offset_15] = (koSRCa*pow(d_initvalu_36,2)*d_initvalu_14-kom*d_initvalu_15)-(kiSRCa*d_initvalu_36*d_initvalu_15-kim*d_initvalu_16); // O
d_finavalu[offset_16] = (kiSRCa*d_initvalu_36*d_initvalu_15-kim*d_initvalu_16)-(kom*d_initvalu_16-koSRCa*pow(d_initvalu_36,2)*RI); // I
J_SRCarel = ks*d_initvalu_15*(d_initvalu_31-d_initvalu_36); // [mM/ms]
J_serca = pow(Q10SRCaP,Qpow)*Vmax_SRCaP*(pow((d_initvalu_38/Kmf),hillSRCaP)-pow((d_initvalu_31/Kmr),hillSRCaP))
/(1+pow((d_initvalu_38/Kmf),hillSRCaP)+pow((d_initvalu_31/Kmr),hillSRCaP));
J_SRleak = 5.348e-6*(d_initvalu_31-d_initvalu_36); // [mM/ms]
// Sodium and Calcium Buffering
d_finavalu[offset_17] = kon_na*d_initvalu_32*(Bmax_Naj-d_initvalu_17)-koff_na*d_initvalu_17; // NaBj [mM/ms]
d_finavalu[offset_18] = kon_na*d_initvalu_33*(Bmax_Nasl-d_initvalu_18)-koff_na*d_initvalu_18; // NaBsl [mM/ms]
// Cytosolic Ca Buffers
d_finavalu[offset_19] = kon_tncl*d_initvalu_38*(Bmax_TnClow-d_initvalu_19)-koff_tncl*d_initvalu_19; // TnCL [mM/ms]
d_finavalu[offset_20] = kon_tnchca*d_initvalu_38*(Bmax_TnChigh-d_initvalu_20-d_initvalu_21)-koff_tnchca*d_initvalu_20; // TnCHc [mM/ms]
d_finavalu[offset_21] = kon_tnchmg*Mgi*(Bmax_TnChigh-d_initvalu_20-d_initvalu_21)-koff_tnchmg*d_initvalu_21; // TnCHm [mM/ms]
d_finavalu[offset_22] = 0; // CaM [mM/ms]
d_finavalu[offset_23] = kon_myoca*d_initvalu_38*(Bmax_myosin-d_initvalu_23-d_initvalu_24)-koff_myoca*d_initvalu_23; // Myosin_ca [mM/ms]
d_finavalu[offset_24] = kon_myomg*Mgi*(Bmax_myosin-d_initvalu_23-d_initvalu_24)-koff_myomg*d_initvalu_24; // Myosin_mg [mM/ms]
d_finavalu[offset_25] = kon_sr*d_initvalu_38*(Bmax_SR-d_initvalu_25)-koff_sr*d_initvalu_25; // SRB [mM/ms]
J_CaB_cytosol = d_finavalu[offset_19] + d_finavalu[offset_20] + d_finavalu[offset_21] + d_finavalu[offset_22] + d_finavalu[offset_23] + d_finavalu[offset_24] + d_finavalu[offset_25];
// Junctional and SL Ca Buffers
d_finavalu[offset_26] = kon_sll*d_initvalu_36*(Bmax_SLlowj-d_initvalu_26)-koff_sll*d_initvalu_26; // SLLj [mM/ms]
d_finavalu[offset_27] = kon_sll*d_initvalu_37*(Bmax_SLlowsl-d_initvalu_27)-koff_sll*d_initvalu_27; // SLLsl [mM/ms]
d_finavalu[offset_28] = kon_slh*d_initvalu_36*(Bmax_SLhighj-d_initvalu_28)-koff_slh*d_initvalu_28; // SLHj [mM/ms]
d_finavalu[offset_29] = kon_slh*d_initvalu_37*(Bmax_SLhighsl-d_initvalu_29)-koff_slh*d_initvalu_29; // SLHsl [mM/ms]
J_CaB_junction = d_finavalu[offset_26]+d_finavalu[offset_28];
J_CaB_sl = d_finavalu[offset_27]+d_finavalu[offset_29];
// SR Ca Concentrations
d_finavalu[offset_30] = kon_csqn*d_initvalu_31*(Bmax_Csqn-d_initvalu_30)-koff_csqn*d_initvalu_30; // Csqn [mM/ms]
oneovervsr = 1/Vsr;
d_finavalu[offset_31] = J_serca*Vmyo*oneovervsr-(J_SRleak*Vmyo*oneovervsr+J_SRCarel)-d_finavalu[offset_30]; // Ca_sr [mM/ms] %Ratio 3 leak current
// Sodium Concentrations
I_Na_tot_junc = I_Na_junc+I_nabk_junc+3*I_ncx_junc+3*I_nak_junc+I_CaNa_junc; // [uA/uF]
I_Na_tot_sl = I_Na_sl+I_nabk_sl+3*I_ncx_sl+3*I_nak_sl+I_CaNa_sl; // [uA/uF]
d_finavalu[offset_32] = -I_Na_tot_junc*Cmem/(Vjunc*Frdy)+J_na_juncsl/Vjunc*(d_initvalu_33-d_initvalu_32)-d_finavalu[offset_17];
oneovervsl = 1/Vsl;
d_finavalu[offset_33] = -I_Na_tot_sl*Cmem*oneovervsl/Frdy+J_na_juncsl*oneovervsl*(d_initvalu_32-d_initvalu_33)+J_na_slmyo*oneovervsl*(d_initvalu_34-d_initvalu_33)-d_finavalu[offset_18];
d_finavalu[offset_34] = J_na_slmyo/Vmyo*(d_initvalu_33-d_initvalu_34); // [mM/msec]
// Potassium Concentration
I_K_tot = I_to+I_kr+I_ks+I_ki-2*I_nak+I_CaK+I_kp; // [uA/uF]
d_finavalu[offset_35] = 0; // [mM/msec]
// Calcium Concentrations
I_Ca_tot_junc = I_Ca_junc+I_cabk_junc+I_pca_junc-2*I_ncx_junc; // [uA/uF]
I_Ca_tot_sl = I_Ca_sl+I_cabk_sl+I_pca_sl-2*I_ncx_sl; // [uA/uF]
d_finavalu[offset_36] = -I_Ca_tot_junc*Cmem/(Vjunc*2*Frdy)+J_ca_juncsl/Vjunc*(d_initvalu_37-d_initvalu_36)
- J_CaB_junction+(J_SRCarel)*Vsr/Vjunc+J_SRleak*Vmyo/Vjunc; // Ca_j
d_finavalu[offset_37] = -I_Ca_tot_sl*Cmem/(Vsl*2*Frdy)+J_ca_juncsl/Vsl*(d_initvalu_36-d_initvalu_37)
+ J_ca_slmyo/Vsl*(d_initvalu_38-d_initvalu_37)-J_CaB_sl; // Ca_sl
d_finavalu[offset_38] = -J_serca-J_CaB_cytosol +J_ca_slmyo/Vmyo*(d_initvalu_37-d_initvalu_38);
// junc_sl=J_ca_juncsl/Vsl*(d_initvalu_36-d_initvalu_37);
// sl_junc=J_ca_juncsl/Vjunc*(d_initvalu_37-d_initvalu_36);
// sl_myo=J_ca_slmyo/Vsl*(d_initvalu_38-d_initvalu_37);
// myo_sl=J_ca_slmyo/Vmyo*(d_initvalu_37-d_initvalu_38);
// Simulation type
state = 1;
switch(state){
case 0:
I_app = 0;
break;
case 1: // pace w/ current injection at cycleLength 'cycleLength'
if(fmod(timeinst,cycleLength) <= 5){
I_app = 9.5;
}
else{
I_app = 0.0;
}
break;
case 2:
V_hold = -55;
V_test = 0;
if(timeinst>0.5 & timeinst<200.5){
V_clamp = V_test;
}
else{
V_clamp = V_hold;
}
R_clamp = 0.04;
I_app = (V_clamp-d_initvalu_39)/R_clamp;
break;
}
// Membrane Potential
I_Na_tot = I_Na_tot_junc + I_Na_tot_sl; // [uA/uF]
I_Cl_tot = I_ClCa+I_Clbk; // [uA/uF]
I_Ca_tot = I_Ca_tot_junc+I_Ca_tot_sl;
I_tot = I_Na_tot+I_Cl_tot+I_Ca_tot+I_K_tot;
d_finavalu[offset_39] = -(I_tot-I_app);
// Set unused output values to 0 (MATLAB does it by default)
d_finavalu[offset_41] = 0;
d_finavalu[offset_42] = 0;
}
|
the_stack
|
#include <stdint.h>
#include "miner.h"
#include "cuda_helper.h"
void bitcoin_cpu_init(int thr_id);
void bitcoin_cpu_hash(int thr_id, uint32_t threads, uint32_t startNounce, const uint32_t *const ms, uint32_t merkle, uint32_t time, uint32_t compacttarget, uint32_t *const h_nounce);
void bitcoin_midstate(const uint32_t *data, uint32_t *midstate);
__constant__ uint32_t pTarget[8];
static uint32_t *d_result[MAX_GPUS];
#define TPB 512
#define NONCES_PER_THREAD 32
#define rrot(x, n) ((x >> n) | (x << (32 - n)))
__global__ __launch_bounds__(TPB, 2)
void bitcoin_gpu_hash(const uint32_t threads, const uint32_t startNounce, uint32_t *const result, const uint32_t t1c, const uint32_t t2c, const uint32_t w16, const uint32_t w16rot, const uint32_t w17, const uint32_t w17rot, const uint32_t b2, const uint32_t c2, const uint32_t d2, const uint32_t f2, const uint32_t g2, const uint32_t h2, const uint32_t ms0, const uint32_t ms1, const uint32_t ms2, const uint32_t ms3, const uint32_t ms4, const uint32_t ms5, const uint32_t ms6, const uint32_t ms7, const uint32_t compacttarget)
{
uint32_t threadindex = (blockDim.x * blockIdx.x + threadIdx.x);
if (threadindex < threads)
{
uint32_t t1, a, b, c, d, e, f, g, h;
uint32_t w[64];
const uint32_t numberofthreads = blockDim.x*gridDim.x;
const uint32_t maxnonce = startNounce + threadindex + numberofthreads*NONCES_PER_THREAD - 1;
const uint32_t threadindex = blockIdx.x*blockDim.x + threadIdx.x;
#pragma unroll
for (uint32_t nonce = startNounce + threadindex; nonce <= maxnonce; nonce += numberofthreads)
{
w[18] = (rrot(nonce, 7) ^ rrot(nonce, 18) ^ (nonce >> 3)) + w16rot;
w[19] = nonce + w17rot;
w[20] = 0x80000000U + (rrot(w[18], 17) ^ rrot(w[18], 19) ^ (w[18] >> 10));
w[21] = (rrot(w[19], 17) ^ rrot(w[19], 19) ^ (w[19] >> 10));
w[22] = 0x280U + (rrot(w[20], 17) ^ rrot(w[20], 19) ^ (w[20] >> 10));
w[23] = w16 + (rrot(w[21], 17) ^ rrot(w[21], 19) ^ (w[21] >> 10));
w[24] = w17 + (rrot(w[22], 17) ^ rrot(w[22], 19) ^ (w[22] >> 10));
w[25] = w[18] + (rrot(w[23], 17) ^ rrot(w[23], 19) ^ (w[23] >> 10));
w[26] = w[19] + (rrot(w[24], 17) ^ rrot(w[24], 19) ^ (w[24] >> 10));
w[27] = w[20] + (rrot(w[25], 17) ^ rrot(w[25], 19) ^ (w[25] >> 10));
w[28] = w[21] + (rrot(w[26], 17) ^ rrot(w[26], 19) ^ (w[26] >> 10));
w[29] = w[22] + (rrot(w[27], 17) ^ rrot(w[27], 19) ^ (w[27] >> 10));
w[30] = w[23] + 0xa00055U + (rrot(w[28], 17) ^ rrot(w[28], 19) ^ (w[28] >> 10));
w[31] = 0x280U + w[24] + (rrot(w16, 7) ^ rrot(w16, 18) ^ (w16 >> 3)) + (rrot(w[29], 17) ^ rrot(w[29], 19) ^ (w[29] >> 10));
w[32] = w16 + w[25] + (rrot(w17, 7) ^ rrot(w17, 18) ^ (w17 >> 3)) + (rrot(w[30], 17) ^ rrot(w[30], 19) ^ (w[30] >> 10));
w[33] = w17 + w[26] + (rrot(w[18], 7) ^ rrot(w[18], 18) ^ (w[18] >> 3)) + (rrot(w[31], 17) ^ rrot(w[31], 19) ^ (w[31] >> 10));
#pragma unroll
for (int i = 34; i < 62; i++)
w[i] = w[i-16] + w[i-7] + (rrot(w[i-15], 7) ^ rrot(w[i-15], 18) ^ (w[i-15] >> 3)) + (rrot(w[i-2], 17) ^ rrot(w[i-2], 19) ^ (w[i-2] >> 10));
t1 = t1c + (uint32_t)nonce;
a = ms0 + t1;
e = t1 + t2c;
//
t1 = d2 + (rrot(a, 6) ^ rrot(a, 11) ^ rrot(a, 25)) + (c2 ^ (a & (b2 ^ c2))) + 0xb956c25bU;
h = h2 + t1;
d = t1 + (rrot(e, 2) ^ rrot(e, 13) ^ rrot(e, 22)) + ((g2 & f2) | (e & (g2 | f2)));
//
t1 = c2 + (rrot(h, 6) ^ rrot(h, 11) ^ rrot(h, 25)) + (b2 ^ (h & (a ^ b2))) + 0x59f111f1U;
g = g2 + t1;
c = t1 + (rrot(d, 2) ^ rrot(d, 13) ^ rrot(d, 22)) + ((f2 & e) | (d & (f2 | e)));
//
t1 = b2 + (rrot(g, 6) ^ rrot(g, 11) ^ rrot(g, 25)) + (a ^ (g & (h ^ a))) + 0x923f82a4U;
f = f2 + t1;
b = t1 + (rrot(c, 2) ^ rrot(c, 13) ^ rrot(c, 22)) + ((e & d) | (c & (e | d)));
//
t1 = a + (rrot(f, 6) ^ rrot(f, 11) ^ rrot(f, 25)) + (h ^ (f & (g ^ h))) + 0xab1c5ed5U;
e += t1;
a = t1 + (rrot(b, 2) ^ rrot(b, 13) ^ rrot(b, 22)) + ((d & c) | (b & (d | c)));
//
t1 = h + (rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25)) + (g ^ (e & (f ^ g))) + 0xd807aa98U;
d += t1;
h = t1 + (rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22)) + ((c & b) | (a & (c | b)));
//
t1 = g + (rrot(d, 6) ^ rrot(d, 11) ^ rrot(d, 25)) + (f ^ (d & (e ^ f))) + 0x12835b01U;
c += t1;
g = t1 + (rrot(h, 2) ^ rrot(h, 13) ^ rrot(h, 22)) + ((b & a) | (h & (b | a)));
//
t1 = f + (rrot(c, 6) ^ rrot(c, 11) ^ rrot(c, 25)) + (e ^ (c & (d ^ e))) + 0x243185beU;
b += t1;
f = t1 + (rrot(g, 2) ^ rrot(g, 13) ^ rrot(g, 22)) + ((a & h) | (g & (a | h)));
//
t1 = e + (rrot(b, 6) ^ rrot(b, 11) ^ rrot(b, 25)) + (d ^ (b & (c ^ d))) + 0x550c7dc3U;
a += t1;
e = t1 + (rrot(f, 2) ^ rrot(f, 13) ^ rrot(f, 22)) + ((h & g) | (f & (h | g)));
//
t1 = d + (rrot(a, 6) ^ rrot(a, 11) ^ rrot(a, 25)) + (c ^ (a & (b ^ c))) + 0x72be5d74U;
h += t1;
d = t1 + (rrot(e, 2) ^ rrot(e, 13) ^ rrot(e, 22)) + ((g & f) | (e & (g | f)));
//
t1 = c + (rrot(h, 6) ^ rrot(h, 11) ^ rrot(h, 25)) + (b ^ (h & (a ^ b))) + 0x80deb1feU;
g += t1;
c = t1 + (rrot(d, 2) ^ rrot(d, 13) ^ rrot(d, 22)) + ((f & e) | (d & (f | e)));
//
t1 = b + (rrot(g, 6) ^ rrot(g, 11) ^ rrot(g, 25)) + (a ^ (g & (h ^ a))) + 0x9bdc06a7U;
f += t1;
b = t1 + (rrot(c, 2) ^ rrot(c, 13) ^ rrot(c, 22)) + ((e & d) | (c & (e | d)));
//
t1 = a + (rrot(f, 6) ^ rrot(f, 11) ^ rrot(f, 25)) + (h ^ (f & (g ^ h))) + 0xc19bf3f4U;
e += t1;
a = t1 + (rrot(b, 2) ^ rrot(b, 13) ^ rrot(b, 22)) + ((d & c) | (b & (d | c)));
//
t1 = h + (rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25)) + (g ^ (e & (f ^ g))) + 0xe49b69c1U + w16;
d += t1;
h = t1 + (rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22)) + ((c & b) | (a & (c | b)));
//
t1 = g + (rrot(d, 6) ^ rrot(d, 11) ^ rrot(d, 25)) + (f ^ (d & (e ^ f))) + 0xefbe4786U + w17;
c += t1;
g = t1 + (rrot(h, 2) ^ rrot(h, 13) ^ rrot(h, 22)) + ((b & a) | (h & (b | a)));
//
t1 = f + (rrot(c, 6) ^ rrot(c, 11) ^ rrot(c, 25)) + (e ^ (c & (d ^ e))) + 0x0fc19dc6U + w[18];
b += t1;
f = t1 + (rrot(g, 2) ^ rrot(g, 13) ^ rrot(g, 22)) + ((a & h) | (g & (a | h)));
//
t1 = e + (rrot(b, 6) ^ rrot(b, 11) ^ rrot(b, 25)) + (d ^ (b & (c ^ d))) + 0x240ca1ccU + w[19];
a += t1;
e = t1 + (rrot(f, 2) ^ rrot(f, 13) ^ rrot(f, 22)) + ((h & g) | (f & (h | g)));
//
t1 = d + (rrot(a, 6) ^ rrot(a, 11) ^ rrot(a, 25)) + (c ^ (a & (b ^ c))) + 0x2de92c6fU + w[20];
h += t1;
d = t1 + (rrot(e, 2) ^ rrot(e, 13) ^ rrot(e, 22)) + ((g & f) | (e & (g | f)));
//
t1 = c + (rrot(h, 6) ^ rrot(h, 11) ^ rrot(h, 25)) + (b ^ (h & (a ^ b))) + 0x4a7484aaU + w[21];
g += t1;
c = t1 + (rrot(d, 2) ^ rrot(d, 13) ^ rrot(d, 22)) + ((f & e) | (d & (f | e)));
//
t1 = b + (rrot(g, 6) ^ rrot(g, 11) ^ rrot(g, 25)) + (a ^ (g & (h ^ a))) + 0x5cb0a9dcU + w[22];
f += t1;
b = t1 + (rrot(c, 2) ^ rrot(c, 13) ^ rrot(c, 22)) + ((e & d) | (c & (e | d)));
//
t1 = a + (rrot(f, 6) ^ rrot(f, 11) ^ rrot(f, 25)) + (h ^ (f & (g ^ h))) + 0x76f988daU + w[23];
e += t1;
a = t1 + (rrot(b, 2) ^ rrot(b, 13) ^ rrot(b, 22)) + ((d & c) | (b & (d | c)));
//
t1 = h + (rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25)) + (g ^ (e & (f ^ g))) + 0x983e5152U + w[24];
d += t1;
h = t1 + (rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22)) + ((c & b) | (a & (c | b)));
//
t1 = g + (rrot(d, 6) ^ rrot(d, 11) ^ rrot(d, 25)) + (f ^ (d & (e ^ f))) + 0xa831c66dU + w[25];
c += t1;
g = t1 + (rrot(h, 2) ^ rrot(h, 13) ^ rrot(h, 22)) + ((b & a) | (h & (b | a)));
//
t1 = f + (rrot(c, 6) ^ rrot(c, 11) ^ rrot(c, 25)) + (e ^ (c & (d ^ e))) + 0xb00327c8U + w[26];
b += t1;
f = t1 + (rrot(g, 2) ^ rrot(g, 13) ^ rrot(g, 22)) + ((a & h) | (g & (a | h)));
//
t1 = e + (rrot(b, 6) ^ rrot(b, 11) ^ rrot(b, 25)) + (d ^ (b & (c ^ d))) + 0xbf597fc7U + w[27];
a += t1;
e = t1 + (rrot(f, 2) ^ rrot(f, 13) ^ rrot(f, 22)) + ((h & g) | (f & (h | g)));
//
t1 = d + (rrot(a, 6) ^ rrot(a, 11) ^ rrot(a, 25)) + (c ^ (a & (b ^ c))) + 0xc6e00bf3U + w[28];
h += t1;
d = t1 + (rrot(e, 2) ^ rrot(e, 13) ^ rrot(e, 22)) + ((g & f) | (e & (g | f)));
//
t1 = c + (rrot(h, 6) ^ rrot(h, 11) ^ rrot(h, 25)) + (b ^ (h & (a ^ b))) + 0xd5a79147U + w[29];
g += t1;
c = t1 + (rrot(d, 2) ^ rrot(d, 13) ^ rrot(d, 22)) + ((f & e) | (d & (f | e)));
//
t1 = b + (rrot(g, 6) ^ rrot(g, 11) ^ rrot(g, 25)) + (a ^ (g & (h ^ a))) + 0x06ca6351U + w[30];
f += t1;
b = t1 + (rrot(c, 2) ^ rrot(c, 13) ^ rrot(c, 22)) + ((e & d) | (c & (e | d)));
//
t1 = a + (rrot(f, 6) ^ rrot(f, 11) ^ rrot(f, 25)) + (h ^ (f & (g ^ h))) + 0x14292967U + w[31];
e += t1;
a = t1 + (rrot(b, 2) ^ rrot(b, 13) ^ rrot(b, 22)) + ((d & c) | (b & (d | c)));
//
t1 = h + (rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25)) + (g ^ (e & (f ^ g))) + 0x27b70a85U + w[32];
d += t1;
h = t1 + (rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22)) + ((c & b) | (a & (c | b)));
//
t1 = g + (rrot(d, 6) ^ rrot(d, 11) ^ rrot(d, 25)) + (f ^ (d & (e ^ f))) + 0x2e1b2138U + w[33];
c += t1;
g = t1 + (rrot(h, 2) ^ rrot(h, 13) ^ rrot(h, 22)) + ((b & a) | (h & (b | a)));
//
t1 = f + (rrot(c, 6) ^ rrot(c, 11) ^ rrot(c, 25)) + (e ^ (c & (d ^ e))) + 0x4d2c6dfcU + w[34];
b += t1;
f = t1 + (rrot(g, 2) ^ rrot(g, 13) ^ rrot(g, 22)) + ((a & h) | (g & (a | h)));
//
t1 = e + (rrot(b, 6) ^ rrot(b, 11) ^ rrot(b, 25)) + (d ^ (b & (c ^ d))) + 0x53380d13U + w[35];
a += t1;
e = t1 + (rrot(f, 2) ^ rrot(f, 13) ^ rrot(f, 22)) + ((h & g) | (f & (h | g)));
//
t1 = d + (rrot(a, 6) ^ rrot(a, 11) ^ rrot(a, 25)) + (c ^ (a & (b ^ c))) + 0x650a7354U + w[36];
h += t1;
d = t1 + (rrot(e, 2) ^ rrot(e, 13) ^ rrot(e, 22)) + ((g & f) | (e & (g | f)));
//
t1 = c + (rrot(h, 6) ^ rrot(h, 11) ^ rrot(h, 25)) + (b ^ (h & (a ^ b))) + 0x766a0abbU + w[37];
g += t1;
c = t1 + (rrot(d, 2) ^ rrot(d, 13) ^ rrot(d, 22)) + ((f & e) | (d & (f | e)));
//
t1 = b + (rrot(g, 6) ^ rrot(g, 11) ^ rrot(g, 25)) + (a ^ (g & (h ^ a))) + 0x81c2c92eU + w[38];
f += t1;
b = t1 + (rrot(c, 2) ^ rrot(c, 13) ^ rrot(c, 22)) + ((e & d) | (c & (e | d)));
//
t1 = a + (rrot(f, 6) ^ rrot(f, 11) ^ rrot(f, 25)) + (h ^ (f & (g ^ h))) + 0x92722c85U + w[39];
e += t1;
a = t1 + (rrot(b, 2) ^ rrot(b, 13) ^ rrot(b, 22)) + ((d & c) | (b & (d | c)));
//
t1 = h + (rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25)) + (g ^ (e & (f ^ g))) + 0xa2bfe8a1U + w[40];
d += t1;
h = t1 + (rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22)) + ((c & b) | (a & (c | b)));
//
t1 = g + (rrot(d, 6) ^ rrot(d, 11) ^ rrot(d, 25)) + (f ^ (d & (e ^ f))) + 0xa81a664bU + w[41];
c += t1;
g = t1 + (rrot(h, 2) ^ rrot(h, 13) ^ rrot(h, 22)) + ((b & a) | (h & (b | a)));
//
t1 = f + (rrot(c, 6) ^ rrot(c, 11) ^ rrot(c, 25)) + (e ^ (c & (d ^ e))) + 0xc24b8b70U + w[42];
b += t1;
f = t1 + (rrot(g, 2) ^ rrot(g, 13) ^ rrot(g, 22)) + ((a & h) | (g & (a | h)));
//
t1 = e + (rrot(b, 6) ^ rrot(b, 11) ^ rrot(b, 25)) + (d ^ (b & (c ^ d))) + 0xc76c51a3U + w[43];
a += t1;
e = t1 + (rrot(f, 2) ^ rrot(f, 13) ^ rrot(f, 22)) + ((h & g) | (f & (h | g)));
//
t1 = d + (rrot(a, 6) ^ rrot(a, 11) ^ rrot(a, 25)) + (c ^ (a & (b ^ c))) + 0xd192e819U + w[44];
h += t1;
d = t1 + (rrot(e, 2) ^ rrot(e, 13) ^ rrot(e, 22)) + ((g & f) | (e & (g | f)));
//
t1 = c + (rrot(h, 6) ^ rrot(h, 11) ^ rrot(h, 25)) + (b ^ (h & (a ^ b))) + 0xd6990624U + w[45];
g += t1;
c = t1 + (rrot(d, 2) ^ rrot(d, 13) ^ rrot(d, 22)) + ((f & e) | (d & (f | e)));
//
t1 = b + (rrot(g, 6) ^ rrot(g, 11) ^ rrot(g, 25)) + (a ^ (g & (h ^ a))) + 0xf40e3585U + w[46];
f += t1;
b = t1 + (rrot(c, 2) ^ rrot(c, 13) ^ rrot(c, 22)) + ((e & d) | (c & (e | d)));
//
t1 = a + (rrot(f, 6) ^ rrot(f, 11) ^ rrot(f, 25)) + (h ^ (f & (g ^ h))) + 0x106aa070U + w[47];
e += t1;
a = t1 + (rrot(b, 2) ^ rrot(b, 13) ^ rrot(b, 22)) + ((d & c) | (b & (d | c)));
//
t1 = h + (rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25)) + (g ^ (e & (f ^ g))) + 0x19a4c116U + w[48];
d += t1;
h = t1 + (rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22)) + ((c & b) | (a & (c | b)));
//
t1 = g + (rrot(d, 6) ^ rrot(d, 11) ^ rrot(d, 25)) + (f ^ (d & (e ^ f))) + 0x1e376c08U + w[49];
c += t1;
g = t1 + (rrot(h, 2) ^ rrot(h, 13) ^ rrot(h, 22)) + ((b & a) | (h & (b | a)));
//
t1 = f + (rrot(c, 6) ^ rrot(c, 11) ^ rrot(c, 25)) + (e ^ (c & (d ^ e))) + 0x2748774cU + w[50];
b += t1;
f = t1 + (rrot(g, 2) ^ rrot(g, 13) ^ rrot(g, 22)) + ((a & h) | (g & (a | h)));
//
t1 = e + (rrot(b, 6) ^ rrot(b, 11) ^ rrot(b, 25)) + (d ^ (b & (c ^ d))) + 0x34b0bcb5U + w[51];
a += t1;
e = t1 + (rrot(f, 2) ^ rrot(f, 13) ^ rrot(f, 22)) + ((h & g) | (f & (h | g)));
//
t1 = d + (rrot(a, 6) ^ rrot(a, 11) ^ rrot(a, 25)) + (c ^ (a & (b ^ c))) + 0x391c0cb3U + w[52];
h += t1;
d = t1 + (rrot(e, 2) ^ rrot(e, 13) ^ rrot(e, 22)) + ((g & f) | (e & (g | f)));
//
t1 = c + (rrot(h, 6) ^ rrot(h, 11) ^ rrot(h, 25)) + (b ^ (h & (a ^ b))) + 0x4ed8aa4aU + w[53];
g += t1;
c = t1 + (rrot(d, 2) ^ rrot(d, 13) ^ rrot(d, 22)) + ((f & e) | (d & (f | e)));
//
t1 = b + (rrot(g, 6) ^ rrot(g, 11) ^ rrot(g, 25)) + (a ^ (g & (h ^ a))) + 0x5b9cca4fU + w[54];
f += t1;
b = t1 + (rrot(c, 2) ^ rrot(c, 13) ^ rrot(c, 22)) + ((e & d) | (c & (e | d)));
//
t1 = a + (rrot(f, 6) ^ rrot(f, 11) ^ rrot(f, 25)) + (h ^ (f & (g ^ h))) + 0x682e6ff3U + w[55];
e += t1;
a = t1 + (rrot(b, 2) ^ rrot(b, 13) ^ rrot(b, 22)) + ((d & c) | (b & (d | c)));
//
t1 = h + (rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25)) + (g ^ (e & (f ^ g))) + 0x748f82eeU + w[56];
d += t1;
h = t1 + (rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22)) + ((c & b) | (a & (c | b)));
//
t1 = g + (rrot(d, 6) ^ rrot(d, 11) ^ rrot(d, 25)) + (f ^ (d & (e ^ f))) + 0x78a5636fU + w[57];
c += t1;
g = t1 + (rrot(h, 2) ^ rrot(h, 13) ^ rrot(h, 22)) + ((b & a) | (h & (b | a)));
//
t1 = f + (rrot(c, 6) ^ rrot(c, 11) ^ rrot(c, 25)) + (e ^ (c & (d ^ e))) + 0x84c87814U + w[58];
b += t1;
f = t1 + (rrot(g, 2) ^ rrot(g, 13) ^ rrot(g, 22)) + ((a & h) | (g & (a | h)));
//
t1 = e + (rrot(b, 6) ^ rrot(b, 11) ^ rrot(b, 25)) + (d ^ (b & (c ^ d))) + 0x8cc70208U + w[59];
a += t1;
e = t1 + (rrot(f, 2) ^ rrot(f, 13) ^ rrot(f, 22)) + ((h & g) | (f & (h | g)));
//
t1 = d + (rrot(a, 6) ^ rrot(a, 11) ^ rrot(a, 25)) + (c ^ (a & (b ^ c))) + 0x90befffaU + w[60];
h += t1;
d = t1 + (rrot(e, 2) ^ rrot(e, 13) ^ rrot(e, 22)) + ((g & f) | (e & (g | f)));
//
t1 = c + (rrot(h, 6) ^ rrot(h, 11) ^ rrot(h, 25)) + (b ^ (h & (a ^ b))) + 0xa4506cebU + w[61];
g += t1;
c = t1 + (rrot(d, 2) ^ rrot(d, 13) ^ rrot(d, 22)) + ((f & e) | (d & (f | e)));
//
t1 = b + (rrot(g, 6) ^ rrot(g, 11) ^ rrot(g, 25)) + (a ^ (g & (h ^ a))) + 0xbef9a3f7U + w[46] + w[55] + (rrot(w[47], 7) ^ rrot(w[47], 18) ^ (w[47] >> 3)) + (rrot(w[60], 17) ^ rrot(w[60], 19) ^ (w[60] >> 10));
f += t1;
b = t1 + (rrot(c, 2) ^ rrot(c, 13) ^ rrot(c, 22)) + ((e & d) | (c & (e | d)));
//
t1 = a + (rrot(f, 6) ^ rrot(f, 11) ^ rrot(f, 25)) + (h ^ (f & (g ^ h))) + 0xc67178f2U + w[47] + w[56] + (rrot(w[48], 7) ^ rrot(w[48], 18) ^ (w[48] >> 3)) + (rrot(w[61], 17) ^ rrot(w[61], 19) ^ (w[61] >> 10));
e += t1;
a = t1 + (rrot(b, 2) ^ rrot(b, 13) ^ rrot(b, 22)) + ((d & c) | (b & (d | c)));
//
w[0] = a + ms0; w[1] = b + ms1; w[2] = c + ms2; w[3] = d + ms3;
w[4] = e + ms4; w[5] = f + ms5; w[6] = g + ms6; w[7] = h + ms7;
// hash the hash ***************************************************************
w[16] = w[0] + (rrot(w[1], 7) ^ rrot(w[1], 18) ^ (w[1] >> 3));
w[17] = w[1] + (rrot(w[2], 7) ^ rrot(w[2], 18) ^ (w[2] >> 3)) + (rrot(0x100, 17) ^ rrot(0x100, 19) ^ (0x100 >> 10));
w[18] = w[2] + (rrot(w[3], 7) ^ rrot(w[3], 18) ^ (w[3] >> 3)) + (rrot(w[16], 17) ^ rrot(w[16], 19) ^ (w[16] >> 10));
w[19] = w[3] + (rrot(w[4], 7) ^ rrot(w[4], 18) ^ (w[4] >> 3)) + (rrot(w[17], 17) ^ rrot(w[17], 19) ^ (w[17] >> 10));
w[20] = w[4] + (rrot(w[5], 7) ^ rrot(w[5], 18) ^ (w[5] >> 3)) + (rrot(w[18], 17) ^ rrot(w[18], 19) ^ (w[18] >> 10));
w[21] = w[5] + (rrot(w[6], 7) ^ rrot(w[6], 18) ^ (w[6] >> 3)) + (rrot(w[19], 17) ^ rrot(w[19], 19) ^ (w[19] >> 10));
w[22] = w[6] + 0x100 + (rrot(w[7], 7) ^ rrot(w[7], 18) ^ (w[7] >> 3)) + (rrot(w[20], 17) ^ rrot(w[20], 19) ^ (w[20] >> 10));
w[23] = w[7] + w[16] + 0x11002000U + (rrot(w[21], 17) ^ rrot(w[21], 19) ^ (w[21] >> 10));
w[24] = 0x80000000U + w[17] + (rrot(w[22], 17) ^ rrot(w[22], 19) ^ (w[22] >> 10));
w[25] = w[18] + (rrot(w[23], 17) ^ rrot(w[23], 19) ^ (w[23] >> 10));
w[26] = w[19] + (rrot(w[24], 17) ^ rrot(w[24], 19) ^ (w[24] >> 10));
w[27] = w[20] + (rrot(w[25], 17) ^ rrot(w[25], 19) ^ (w[25] >> 10));
w[28] = w[21] + (rrot(w[26], 17) ^ rrot(w[26], 19) ^ (w[26] >> 10));
w[29] = w[22] + (rrot(w[27], 17) ^ rrot(w[27], 19) ^ (w[27] >> 10));
w[30] = w[23] + (rrot(0x100, 7) ^ rrot(0x100, 18) ^ (0x100 >> 3)) + (rrot(w[28], 17) ^ rrot(w[28], 19) ^ (w[28] >> 10));
w[31] = 0x100 + w[24] + (rrot(w[16], 7) ^ rrot(w[16], 18) ^ (w[16] >> 3)) + (rrot(w[29], 17) ^ rrot(w[29], 19) ^ (w[29] >> 10));
#pragma unroll
for (int i = 32; i < 59; i++)
w[i] = w[i - 16] + w[i - 7] + (rrot(w[i - 15], 7) ^ rrot(w[i - 15], 18) ^ (w[i - 15] >> 3)) + (rrot(w[i - 2], 17) ^ rrot(w[i - 2], 19) ^ (w[i - 2] >> 10));
d = 0x98c7e2a2U + w[0];
h = 0xfc08884dU + w[0];
//
t1 = (rrot(d, 6) ^ rrot(d, 11) ^ rrot(d, 25)) + (0x9b05688cU ^ (d & 0xca0b3af3)) + 0x90bb1e3cU + w[1];
c = 0x3c6ef372U + t1;
g = t1 + (rrot(h, 2) ^ rrot(h, 13) ^ rrot(h, 22)) + (0x2A01A605 | (h & 0xfb6feee7));
//
t1 = (rrot(c, 6) ^ rrot(c, 11) ^ rrot(c, 25)) + (0x510e527fU ^ (c & (d ^ 0x510e527fU))) + 0x50C6645BU + w[2];
b = 0xbb67ae85U + t1;
f = t1 + (rrot(g, 2) ^ rrot(g, 13) ^ rrot(g, 22)) + ((0x6a09e667U & h) | (g & (0x6a09e667U | h)));
//
t1 = (rrot(b, 6) ^ rrot(b, 11) ^ rrot(b, 25)) + (d ^ (b & (c ^ d))) + 0x3AC42E24U + w[3];
a = 0x6a09e667U + t1;
e = t1 + (rrot(f, 2) ^ rrot(f, 13) ^ rrot(f, 22)) + ((h & g) | (f & (h | g)));
//
t1 = d + (rrot(a, 6) ^ rrot(a, 11) ^ rrot(a, 25)) + (c ^ (a & (b ^ c))) + 0x3956c25bU + w[4];
h += t1;
d = t1 + (rrot(e, 2) ^ rrot(e, 13) ^ rrot(e, 22)) + ((g & f) | (e & (g | f)));
//
t1 = c + (rrot(h, 6) ^ rrot(h, 11) ^ rrot(h, 25)) + (b ^ (h & (a ^ b))) + 0x59f111f1U + w[5];
g += t1;
c = t1 + (rrot(d, 2) ^ rrot(d, 13) ^ rrot(d, 22)) + ((f & e) | (d & (f | e)));
//
t1 = b + (rrot(g, 6) ^ rrot(g, 11) ^ rrot(g, 25)) + (a ^ (g & (h ^ a))) + 0x923f82a4U + w[6];
f += t1;
b = t1 + (rrot(c, 2) ^ rrot(c, 13) ^ rrot(c, 22)) + ((e & d) | (c & (e | d)));
//
t1 = a + (rrot(f, 6) ^ rrot(f, 11) ^ rrot(f, 25)) + (h ^ (f & (g ^ h))) + 0xab1c5ed5U + w[7];
e += t1;
a = t1 + (rrot(b, 2) ^ rrot(b, 13) ^ rrot(b, 22)) + ((d & c) | (b & (d | c)));
//
t1 = h + (rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25)) + (g ^ (e & (f ^ g))) + 0x5807aa98U;
d += t1;
h = t1 + (rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22)) + ((c & b) | (a & (c | b)));
//
t1 = g + (rrot(d, 6) ^ rrot(d, 11) ^ rrot(d, 25)) + (f ^ (d & (e ^ f))) + 0x12835b01U;
c += t1;
g = t1 + (rrot(h, 2) ^ rrot(h, 13) ^ rrot(h, 22)) + ((b & a) | (h & (b | a)));
//
t1 = f + (rrot(c, 6) ^ rrot(c, 11) ^ rrot(c, 25)) + (e ^ (c & (d ^ e))) + 0x243185beU;
b += t1;
f = t1 + (rrot(g, 2) ^ rrot(g, 13) ^ rrot(g, 22)) + ((a & h) | (g & (a | h)));
//
t1 = e + (rrot(b, 6) ^ rrot(b, 11) ^ rrot(b, 25)) + (d ^ (b & (c ^ d))) + 0x550c7dc3U;
a += t1;
e = t1 + (rrot(f, 2) ^ rrot(f, 13) ^ rrot(f, 22)) + ((h & g) | (f & (h | g)));
//
t1 = d + (rrot(a, 6) ^ rrot(a, 11) ^ rrot(a, 25)) + (c ^ (a & (b ^ c))) + 0x72be5d74U;
h += t1;
d = t1 + (rrot(e, 2) ^ rrot(e, 13) ^ rrot(e, 22)) + ((g & f) | (e & (g | f)));
//
t1 = c + (rrot(h, 6) ^ rrot(h, 11) ^ rrot(h, 25)) + (b ^ (h & (a ^ b))) + 0x80deb1feU;
g += t1;
c = t1 + (rrot(d, 2) ^ rrot(d, 13) ^ rrot(d, 22)) + ((f & e) | (d & (f | e)));
//
t1 = b + (rrot(g, 6) ^ rrot(g, 11) ^ rrot(g, 25)) + (a ^ (g & (h ^ a))) + 0x9bdc06a7U;
f += t1;
b = t1 + (rrot(c, 2) ^ rrot(c, 13) ^ rrot(c, 22)) + ((e & d) | (c & (e | d)));
//
t1 = a + (rrot(f, 6) ^ rrot(f, 11) ^ rrot(f, 25)) + (h ^ (f & (g ^ h))) + 0xc19bf274U;
e += t1;
a = t1 + (rrot(b, 2) ^ rrot(b, 13) ^ rrot(b, 22)) + ((d & c) | (b & (d | c)));
//
t1 = h + (rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25)) + (g ^ (e & (f ^ g))) + 0xe49b69c1U + w[16];
d += t1;
h = t1 + (rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22)) + ((c & b) | (a & (c | b)));
//
t1 = g + (rrot(d, 6) ^ rrot(d, 11) ^ rrot(d, 25)) + (f ^ (d & (e ^ f))) + 0xefbe4786U + w[17];
c += t1;
g = t1 + (rrot(h, 2) ^ rrot(h, 13) ^ rrot(h, 22)) + ((b & a) | (h & (b | a)));
//
t1 = f + (rrot(c, 6) ^ rrot(c, 11) ^ rrot(c, 25)) + (e ^ (c & (d ^ e))) + 0x0fc19dc6U + w[18];
b += t1;
f = t1 + (rrot(g, 2) ^ rrot(g, 13) ^ rrot(g, 22)) + ((a & h) | (g & (a | h)));
//
t1 = e + (rrot(b, 6) ^ rrot(b, 11) ^ rrot(b, 25)) + (d ^ (b & (c ^ d))) + 0x240ca1ccU + w[19];
a += t1;
e = t1 + (rrot(f, 2) ^ rrot(f, 13) ^ rrot(f, 22)) + ((h & g) | (f & (h | g)));
//
t1 = d + (rrot(a, 6) ^ rrot(a, 11) ^ rrot(a, 25)) + (c ^ (a & (b ^ c))) + 0x2de92c6fU + w[20];
h += t1;
d = t1 + (rrot(e, 2) ^ rrot(e, 13) ^ rrot(e, 22)) + ((g & f) | (e & (g | f)));
//
t1 = c + (rrot(h, 6) ^ rrot(h, 11) ^ rrot(h, 25)) + (b ^ (h & (a ^ b))) + 0x4a7484aaU + w[21];
g += t1;
c = t1 + (rrot(d, 2) ^ rrot(d, 13) ^ rrot(d, 22)) + ((f & e) | (d & (f | e)));
//
t1 = b + (rrot(g, 6) ^ rrot(g, 11) ^ rrot(g, 25)) + (a ^ (g & (h ^ a))) + 0x5cb0a9dcU + w[22];
f += t1;
b = t1 + (rrot(c, 2) ^ rrot(c, 13) ^ rrot(c, 22)) + ((e & d) | (c & (e | d)));
//
t1 = a + (rrot(f, 6) ^ rrot(f, 11) ^ rrot(f, 25)) + (h ^ (f & (g ^ h))) + 0x76f988daU + w[23];
e += t1;
a = t1 + (rrot(b, 2) ^ rrot(b, 13) ^ rrot(b, 22)) + ((d & c) | (b & (d | c)));
//
t1 = h + (rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25)) + (g ^ (e & (f ^ g))) + 0x983e5152U + w[24];
d += t1;
h = t1 + (rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22)) + ((c & b) | (a & (c | b)));
//
t1 = g + (rrot(d, 6) ^ rrot(d, 11) ^ rrot(d, 25)) + (f ^ (d & (e ^ f))) + 0xa831c66dU + w[25];
c += t1;
g = t1 + (rrot(h, 2) ^ rrot(h, 13) ^ rrot(h, 22)) + ((b & a) | (h & (b | a)));
//
t1 = f + (rrot(c, 6) ^ rrot(c, 11) ^ rrot(c, 25)) + (e ^ (c & (d ^ e))) + 0xb00327c8U + w[26];
b += t1;
f = t1 + (rrot(g, 2) ^ rrot(g, 13) ^ rrot(g, 22)) + ((a & h) | (g & (a | h)));
//
t1 = e + (rrot(b, 6) ^ rrot(b, 11) ^ rrot(b, 25)) + (d ^ (b & (c ^ d))) + 0xbf597fc7U + w[27];
a += t1;
e = t1 + (rrot(f, 2) ^ rrot(f, 13) ^ rrot(f, 22)) + ((h & g) | (f & (h | g)));
//
t1 = d + (rrot(a, 6) ^ rrot(a, 11) ^ rrot(a, 25)) + (c ^ (a & (b ^ c))) + 0xc6e00bf3U + w[28];
h += t1;
d = t1 + (rrot(e, 2) ^ rrot(e, 13) ^ rrot(e, 22)) + ((g & f) | (e & (g | f)));
//
t1 = c + (rrot(h, 6) ^ rrot(h, 11) ^ rrot(h, 25)) + (b ^ (h & (a ^ b))) + 0xd5a79147U + w[29];
g += t1;
c = t1 + (rrot(d, 2) ^ rrot(d, 13) ^ rrot(d, 22)) + ((f & e) | (d & (f | e)));
//
t1 = b + (rrot(g, 6) ^ rrot(g, 11) ^ rrot(g, 25)) + (a ^ (g & (h ^ a))) + 0x06ca6351U + w[30];
f += t1;
b = t1 + (rrot(c, 2) ^ rrot(c, 13) ^ rrot(c, 22)) + ((e & d) | (c & (e | d)));
//
t1 = a + (rrot(f, 6) ^ rrot(f, 11) ^ rrot(f, 25)) + (h ^ (f & (g ^ h))) + 0x14292967U + w[31];
e += t1;
a = t1 + (rrot(b, 2) ^ rrot(b, 13) ^ rrot(b, 22)) + ((d & c) | (b & (d | c)));
//
t1 = h + (rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25)) + (g ^ (e & (f ^ g))) + 0x27b70a85U + w[32];
d += t1;
h = t1 + (rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22)) + ((c & b) | (a & (c | b)));
//
t1 = g + (rrot(d, 6) ^ rrot(d, 11) ^ rrot(d, 25)) + (f ^ (d & (e ^ f))) + 0x2e1b2138U + w[33];
c += t1;
g = t1 + (rrot(h, 2) ^ rrot(h, 13) ^ rrot(h, 22)) + ((b & a) | (h & (b | a)));
//
t1 = f + (rrot(c, 6) ^ rrot(c, 11) ^ rrot(c, 25)) + (e ^ (c & (d ^ e))) + 0x4d2c6dfcU + w[34];
b += t1;
f = t1 + (rrot(g, 2) ^ rrot(g, 13) ^ rrot(g, 22)) + ((a & h) | (g & (a | h)));
//
t1 = e + (rrot(b, 6) ^ rrot(b, 11) ^ rrot(b, 25)) + (d ^ (b & (c ^ d))) + 0x53380d13U + w[35];
a += t1;
e = t1 + (rrot(f, 2) ^ rrot(f, 13) ^ rrot(f, 22)) + ((h & g) | (f & (h | g)));
//
t1 = d + (rrot(a, 6) ^ rrot(a, 11) ^ rrot(a, 25)) + (c ^ (a & (b ^ c))) + 0x650a7354U + w[36];
h += t1;
d = t1 + (rrot(e, 2) ^ rrot(e, 13) ^ rrot(e, 22)) + ((g & f) | (e & (g | f)));
//
t1 = c + (rrot(h, 6) ^ rrot(h, 11) ^ rrot(h, 25)) + (b ^ (h & (a ^ b))) + 0x766a0abbU + w[37];
g += t1;
c = t1 + (rrot(d, 2) ^ rrot(d, 13) ^ rrot(d, 22)) + ((f & e) | (d & (f | e)));
//
t1 = b + (rrot(g, 6) ^ rrot(g, 11) ^ rrot(g, 25)) + (a ^ (g & (h ^ a))) + 0x81c2c92eU + w[38];
f += t1;
b = t1 + (rrot(c, 2) ^ rrot(c, 13) ^ rrot(c, 22)) + ((e & d) | (c & (e | d)));
//
t1 = a + (rrot(f, 6) ^ rrot(f, 11) ^ rrot(f, 25)) + (h ^ (f & (g ^ h))) + 0x92722c85U + w[39];
e += t1;
a = t1 + (rrot(b, 2) ^ rrot(b, 13) ^ rrot(b, 22)) + ((d & c) | (b & (d | c)));
//
t1 = h + (rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25)) + (g ^ (e & (f ^ g))) + 0xa2bfe8a1U + w[40];
d += t1;
h = t1 + (rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22)) + ((c & b) | (a & (c | b)));
//
t1 = g + (rrot(d, 6) ^ rrot(d, 11) ^ rrot(d, 25)) + (f ^ (d & (e ^ f))) + 0xa81a664bU + w[41];
c += t1;
g = t1 + (rrot(h, 2) ^ rrot(h, 13) ^ rrot(h, 22)) + ((b & a) | (h & (b | a)));
//
t1 = f + (rrot(c, 6) ^ rrot(c, 11) ^ rrot(c, 25)) + (e ^ (c & (d ^ e))) + 0xc24b8b70U + w[42];
b += t1;
f = t1 + (rrot(g, 2) ^ rrot(g, 13) ^ rrot(g, 22)) + ((a & h) | (g & (a | h)));
//
t1 = e + (rrot(b, 6) ^ rrot(b, 11) ^ rrot(b, 25)) + (d ^ (b & (c ^ d))) + 0xc76c51a3U + w[43];
a += t1;
e = t1 + (rrot(f, 2) ^ rrot(f, 13) ^ rrot(f, 22)) + ((h & g) | (f & (h | g)));
//
t1 = d + (rrot(a, 6) ^ rrot(a, 11) ^ rrot(a, 25)) + (c ^ (a & (b ^ c))) + 0xd192e819U + w[44];
h += t1;
d = t1 + (rrot(e, 2) ^ rrot(e, 13) ^ rrot(e, 22)) + ((g & f) | (e & (g | f)));
//
t1 = c + (rrot(h, 6) ^ rrot(h, 11) ^ rrot(h, 25)) + (b ^ (h & (a ^ b))) + 0xd6990624U + w[45];
g += t1;
c = t1 + (rrot(d, 2) ^ rrot(d, 13) ^ rrot(d, 22)) + ((f & e) | (d & (f | e)));
//
t1 = b + (rrot(g, 6) ^ rrot(g, 11) ^ rrot(g, 25)) + (a ^ (g & (h ^ a))) + 0xf40e3585U + w[46];
f += t1;
b = t1 + (rrot(c, 2) ^ rrot(c, 13) ^ rrot(c, 22)) + ((e & d) | (c & (e | d)));
//
t1 = a + (rrot(f, 6) ^ rrot(f, 11) ^ rrot(f, 25)) + (h ^ (f & (g ^ h))) + 0x106aa070U + w[47];
e += t1;
a = t1 + (rrot(b, 2) ^ rrot(b, 13) ^ rrot(b, 22)) + ((d & c) | (b & (d | c)));
//
t1 = h + (rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25)) + (g ^ (e & (f ^ g))) + 0x19a4c116U + w[48];
d += t1;
h = t1 + (rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22)) + ((c & b) | (a & (c | b)));
//
t1 = g + (rrot(d, 6) ^ rrot(d, 11) ^ rrot(d, 25)) + (f ^ (d & (e ^ f))) + 0x1e376c08U + w[49];
c += t1;
g = t1 + (rrot(h, 2) ^ rrot(h, 13) ^ rrot(h, 22)) + ((b & a) | (h & (b | a)));
//
t1 = f + (rrot(c, 6) ^ rrot(c, 11) ^ rrot(c, 25)) + (e ^ (c & (d ^ e))) + 0x2748774cU + w[50];
b += t1;
f = t1 + (rrot(g, 2) ^ rrot(g, 13) ^ rrot(g, 22)) + ((a & h) | (g & (a | h)));
//
t1 = e + (rrot(b, 6) ^ rrot(b, 11) ^ rrot(b, 25)) + (d ^ (b & (c ^ d))) + 0x34b0bcb5U + w[51];
a += t1;
e = t1 + (rrot(f, 2) ^ rrot(f, 13) ^ rrot(f, 22)) + ((h & g) | (f & (h | g)));
//
t1 = d + (rrot(a, 6) ^ rrot(a, 11) ^ rrot(a, 25)) + (c ^ (a & (b ^ c))) + 0x391c0cb3U + w[52];
h += t1;
d = t1 + (rrot(e, 2) ^ rrot(e, 13) ^ rrot(e, 22)) + ((g & f) | (e & (g | f)));
//
t1 = c + (rrot(h, 6) ^ rrot(h, 11) ^ rrot(h, 25)) + (b ^ (h & (a ^ b))) + 0x4ed8aa4aU + w[53];
g += t1;
c = t1 + (rrot(d, 2) ^ rrot(d, 13) ^ rrot(d, 22)) + ((f & e) | (d & (f | e)));
//
t1 = b + (rrot(g, 6) ^ rrot(g, 11) ^ rrot(g, 25)) + (a ^ (g & (h ^ a))) + 0x5b9cca4fU + w[54];
f += t1;
b = t1 + (rrot(c, 2) ^ rrot(c, 13) ^ rrot(c, 22)) + ((e & d) | (c & (e | d)));
//
t1 = a + (rrot(f, 6) ^ rrot(f, 11) ^ rrot(f, 25)) + (h ^ (f & (g ^ h))) + 0x682e6ff3U + w[55];
e += t1;
a = t1 + (rrot(b, 2) ^ rrot(b, 13) ^ rrot(b, 22)) + ((d & c) | (b & (d | c)));
//
t1 = h + (rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25)) + (g ^ (e & (f ^ g))) + 0x748f82eeU + w[56];
d += t1;
h = t1 + (rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22)) + ((c & b) | (a & (c | b)));
//
c += g + (rrot(d, 6) ^ rrot(d, 11) ^ rrot(d, 25)) + (f ^ (d & (e ^ f))) + 0x78a5636fU + w[57];
//
b += f + (rrot(c, 6) ^ rrot(c, 11) ^ rrot(c, 25)) + (e ^ (c & (d ^ e))) + 0x84c87814U + w[58];
//
a += e + (rrot(b, 6) ^ rrot(b, 11) ^ rrot(b, 25)) + (d ^ (b & (c ^ d))) + 0x8cc70208U + w[43] + w[52] + (rrot(w[44], 7) ^ rrot(w[44], 18) ^ (w[44] >> 3)) + (rrot(w[57], 17) ^ rrot(w[57], 19) ^ (w[57] >> 10));
//
h += d + (rrot(a, 6) ^ rrot(a, 11) ^ rrot(a, 25)) + (c ^ (a & (b ^ c))) + 0x90befffaU + w[44] + w[53] + (rrot(w[45], 7) ^ rrot(w[45], 18) ^ (w[45] >> 3)) + (rrot(w[58], 17) ^ rrot(w[58], 19) ^ (w[58] >> 10));
//
if (h == 0xa41f32e7)
{
uint32_t tmp = atomicCAS(result, 0xffffffff, nonce);
if (tmp != 0xffffffff)
result[1] = nonce;
}
} // nonce loop
} // if thread<threads
}
__host__
void bitcoin_midstate(const uint32_t *data, uint32_t *midstate)
{
int i;
uint32_t s0, s1, t1, t2, maj, ch, a, b, c, d, e, f, g, h;
uint32_t w[64];
const uint32_t k[64] = {
0x428a2f98U, 0x71374491U, 0xb5c0fbcfU, 0xe9b5dba5U, 0x3956c25bU, 0x59f111f1U, 0x923f82a4U, 0xab1c5ed5U,
0xd807aa98U, 0x12835b01U, 0x243185beU, 0x550c7dc3U, 0x72be5d74U, 0x80deb1feU, 0x9bdc06a7U, 0xc19bf174U,
0xe49b69c1U, 0xefbe4786U, 0x0fc19dc6U, 0x240ca1ccU, 0x2de92c6fU, 0x4a7484aaU, 0x5cb0a9dcU, 0x76f988daU,
0x983e5152U, 0xa831c66dU, 0xb00327c8U, 0xbf597fc7U, 0xc6e00bf3U, 0xd5a79147U, 0x06ca6351U, 0x14292967U,
0x27b70a85U, 0x2e1b2138U, 0x4d2c6dfcU, 0x53380d13U, 0x650a7354U, 0x766a0abbU, 0x81c2c92eU, 0x92722c85U,
0xa2bfe8a1U, 0xa81a664bU, 0xc24b8b70U, 0xc76c51a3U, 0xd192e819U, 0xd6990624U, 0xf40e3585U, 0x106aa070U,
0x19a4c116U, 0x1e376c08U, 0x2748774cU, 0x34b0bcb5U, 0x391c0cb3U, 0x4ed8aa4aU, 0x5b9cca4fU, 0x682e6ff3U,
0x748f82eeU, 0x78a5636fU, 0x84c87814U, 0x8cc70208U, 0x90befffaU, 0xa4506cebU, 0xbef9a3f7U, 0xc67178f2U
};
const uint32_t hc[8] = {
0x6a09e667U, 0xbb67ae85U, 0x3c6ef372U, 0xa54ff53aU,
0x510e527fU, 0x9b05688cU, 0x1f83d9abU, 0x5be0cd19U
};
for (i = 0; i <= 15; i++)
{
w[i] = data[i];
}
for (i = 16; i <= 63; i++)
{
s0 = rrot(w[i - 15], 7) ^ rrot(w[i - 15], 18) ^ (w[i - 15] >> 3);
s1 = rrot(w[i - 2], 17) ^ rrot(w[i - 2], 19) ^ (w[i - 2] >> 10);
w[i] = w[i - 16] + s0 + w[i - 7] + s1;
}
a = hc[0];
b = hc[1];
c = hc[2];
d = hc[3];
e = hc[4];
f = hc[5];
g = hc[6];
h = hc[7];
for (i = 0; i <= 63; i++)
{
s0 = rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22);
maj = (a & b) ^ (a & c) ^ (b & c);
t2 = s0 + maj;
s1 = rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25);
ch = (e & f) ^ ((~e) & g);
t1 = h + s1 + ch + k[i] + w[i];
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
midstate[0] = a + hc[0];
midstate[1] = b + hc[1];
midstate[2] = c + hc[2];
midstate[3] = d + hc[3];
midstate[4] = e + hc[4];
midstate[5] = f + hc[5];
midstate[6] = g + hc[6];
midstate[7] = h + hc[7];
}
__host__
void bitcoin_cpu_hash(int thr_id, uint32_t threads, uint32_t startNounce, const uint32_t *const ms, uint32_t merkle, uint32_t time, uint32_t compacttarget, uint32_t *const h_nounce)
{
uint32_t b2, c2, d2, f2, g2, h2, t1, w16, w17, t1c, t2c, w16rot, w17rot;
cudaMemset(d_result[thr_id], 0xffffffff, 2 * sizeof(uint32_t));
t1 = ms[7] + (rrot(ms[4], 6) ^ rrot(ms[4], 11) ^ rrot(ms[4], 25)) + (ms[6] ^ (ms[4] & (ms[5] ^ ms[6]))) + 0x428a2f98U + merkle;
d2 = ms[3] + t1;
h2 = t1 + (rrot(ms[0], 2) ^ rrot(ms[0], 13) ^ rrot(ms[0], 22)) + ((ms[2] & ms[1]) | (ms[0] & (ms[2] | ms[1])));
//
t1 = ms[6] + (rrot(d2, 6) ^ rrot(d2, 11) ^ rrot(d2, 25)) + (ms[5] ^ (d2 & (ms[4] ^ ms[5]))) + 0x71374491U + time;
c2 = ms[2] + t1;
g2 = t1 + (rrot(h2, 2) ^ rrot(h2, 13) ^ rrot(h2, 22)) + ((ms[1] & ms[0]) | (h2 & (ms[1] | ms[0])));
//
t1 = ms[5] + (rrot(c2, 6) ^ rrot(c2, 11) ^ rrot(c2, 25)) + (ms[4] ^ (c2 & (d2 ^ ms[4]))) + 0xb5c0fbcfU + compacttarget;
b2 = ms[1] + t1;
f2 = t1 + (rrot(g2, 2) ^ rrot(g2, 13) ^ rrot(g2, 22)) + ((ms[0] & h2) | (g2 & (ms[0] | h2)));
w16 = merkle + (rrot(time, 7) ^ rrot(time, 18) ^ (time >> 3));
w16rot = (rrot(w16, 17) ^ rrot(w16, 19) ^ (w16 >> 10)) + compacttarget;
w17 = time + (rrot(compacttarget, 7) ^ rrot(compacttarget, 18) ^ (compacttarget >> 3)) + 0x01100000U;
w17rot = (rrot(w17, 17) ^ rrot(w17, 19) ^ (w17 >> 10)) + 0x11002000U;
t2c = (rrot(f2, 2) ^ rrot(f2, 13) ^ rrot(f2, 22)) + ((h2 & g2) | (f2 & (h2 | g2)));
t1c = ms[4] + (rrot(b2, 6) ^ rrot(b2, 11) ^ rrot(b2, 25)) + (d2 ^ (b2 & (c2 ^ d2))) + 0xe9b5dba5U;
dim3 grid((threads + TPB*NONCES_PER_THREAD - 1) / TPB / NONCES_PER_THREAD);
dim3 block(TPB);
bitcoin_gpu_hash << <grid, block >> > (threads, startNounce, d_result[thr_id], t1c, t2c, w16, w16rot, w17, w17rot, b2, c2, d2, f2, g2, h2, ms[0], ms[1], ms[2], ms[3], ms[4], ms[5], ms[6], ms[7], compacttarget);
CUDA_SAFE_CALL(cudaMemcpy(h_nounce, d_result[thr_id], 2 * sizeof(uint32_t), cudaMemcpyDeviceToHost));
}
__host__
void bitcoin_cpu_init(int thr_id)
{
CUDA_SAFE_CALL(cudaMalloc(&d_result[thr_id], 4 * sizeof(uint32_t)));
}
|
the_stack
|
#include "gpu/image/blur.hpp"
#include "../deviceBuffer.hpp"
#include "../deviceStream.hpp"
#include "cuda/util.hpp"
#include "image/transpose.hpp"
#include "libvideostitch/profile.hpp"
#include <cuda_runtime.h>
#include <cassert>
#define RGBA_BOX_BLUR_1D_BLOCK_SIZE (4 * 32)
#define RGBA_BOX_BLUR_SS_1D_BLOCK_SIZE (4 * 32)
template <typename Type>
struct ScalarPixel {
typedef Type T;
};
#include "image/kernels/blurKernel.cu"
#include "image/kernels/blurKernelSmallSupport.cu"
#include "image/kernels/unrolledGaussianKernels.cu"
namespace VideoStitch {
namespace Image {
namespace {
template <typename T>
void swap(T& a, T& b) {
T tmp = a;
a = b;
b = tmp;
}
} // namespace
template <typename T>
Status boxBlur1DNoWrap(GPU::Buffer<T> dst, GPU::Buffer<const T> src, std::size_t width, std::size_t height,
unsigned radius, unsigned blockSize, GPU::Stream gpuStream) {
cudaStream_t stream = gpuStream.get();
dim3 dimBlock(blockSize, 1, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), 1, 1);
if ((std::size_t)radius >= height) {
blur1DKernelNoWrapHugeRadius<<<dimGrid, dimBlock, 0, stream>>>(dst.get().raw(), src.get().raw(), (unsigned)width,
(unsigned)height, radius);
} else if ((std::size_t)(2 * radius) >= height) {
blur1DKernelNoWrapLargeRadius<<<dimGrid, dimBlock, 0, stream>>>(dst.get().raw(), src.get().raw(), (unsigned)width,
(unsigned)height, radius);
} else if (COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= radius) { // if radius is short enough for blurColumnsKernel
dim3 blocks((unsigned)Cuda::ceilDiv(width, COLUMNS_BLOCKDIM_X),
(unsigned)Cuda::ceilDiv(height, (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)));
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
blurColumnsKernelNoWrap<T><<<blocks, threads, 0, stream>>>(dst.get().raw(), src.get().raw(), (unsigned)width,
(unsigned)height, (unsigned)width, radius);
} else {
blur1DKernelNoWrap<<<dimGrid, dimBlock, 0, stream>>>(dst.get().raw(), src.get().raw(), (unsigned)width,
(unsigned)height, radius);
}
return CUDA_STATUS;
}
template <typename T>
Status boxBlur1DWrap(GPU::Buffer<T> dst, GPU::Buffer<const T> src, std::size_t width, std::size_t height,
unsigned radius, unsigned blockSize, GPU::Stream stream) {
if ((std::size_t)(2 * radius) >= height) {
// the blur takes the whole buffer for all pixels since the stencil is larger than the patchlet,
// so just resize the stencil
radius = (unsigned)(height / 2 - 1);
}
if (COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= radius) { // if radius is short enough for blurColumnsKernel
dim3 blocks((unsigned)Cuda::ceilDiv(width, COLUMNS_BLOCKDIM_X),
(unsigned)Cuda::ceilDiv(height, (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)));
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
blurColumnsKernelWrap<<<blocks, threads, 0, stream.get()>>>(dst.get().raw(), src.get().raw(), (unsigned)width,
(unsigned)height, (unsigned)width, radius);
} else {
dim3 dimBlock(blockSize, 1, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), 1, 1);
blur1DKernelWrap<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().raw(), src.get().raw(), (unsigned)width,
(unsigned)height, radius);
}
return CUDA_STATUS;
}
template Status boxBlur1DNoWrap(GPU::Buffer<float> dst, GPU::Buffer<const float> src, std::size_t width,
std::size_t height, unsigned radius, unsigned blockSize, GPU::Stream stream);
template Status boxBlur1DNoWrap(GPU::Buffer<float2> dst, GPU::Buffer<const float2> src, std::size_t width,
std::size_t height, unsigned radius, unsigned blockSize, GPU::Stream stream);
template Status boxBlur1DNoWrap(GPU::Buffer<unsigned char> dst, GPU::Buffer<const unsigned char> src, std::size_t width,
std::size_t height, unsigned radius, unsigned blockSize, GPU::Stream stream);
template Status boxBlur1DWrap(GPU::Buffer<unsigned char> dst, GPU::Buffer<const unsigned char> src, std::size_t width,
std::size_t height, unsigned radius, unsigned blockSize, GPU::Stream stream);
template <typename T>
Status gaussianBlur2D(GPU::Buffer<T> dst, GPU::Buffer<const T> src, GPU::Buffer<T> work, std::size_t width,
std::size_t height, unsigned radius, unsigned passes, bool wrap, GPU::Stream stream) {
assert(passes > 0);
const unsigned blockSize = RGBA_BOX_BLUR_1D_BLOCK_SIZE;
// First pass is from src to work;
PROPAGATE_FAILURE_STATUS(boxBlur1DNoWrap(work, src, width, height, radius, blockSize, stream));
// Other passes ping-pong between work buffers.
GPU::Buffer<T> srcBuf = work;
GPU::Buffer<T> dstBuf = dst;
for (unsigned i = 1; i < passes; ++i) {
PROPAGATE_FAILURE_STATUS(boxBlur1DNoWrap(dstBuf, srcBuf.as_const(), width, height, radius, blockSize, stream));
swap(dstBuf, srcBuf);
}
// transpose
PROPAGATE_FAILURE_STATUS(transpose(dstBuf.get().raw(), srcBuf.get().raw(), width, height, stream));
swap(dstBuf, srcBuf);
if (wrap) {
for (unsigned i = 0; i < passes; ++i) {
PROPAGATE_FAILURE_STATUS(boxBlur1DWrap(dstBuf, srcBuf.as_const(), height, width, radius, blockSize, stream));
swap(dstBuf, srcBuf);
}
} else {
for (unsigned i = 0; i < passes; ++i) {
PROPAGATE_FAILURE_STATUS(boxBlur1DNoWrap(dstBuf, srcBuf.as_const(), height, width, radius, blockSize, stream));
swap(dstBuf, srcBuf);
}
}
PROPAGATE_FAILURE_STATUS(transpose(dstBuf.get().raw(), srcBuf.get().raw(), height, width, stream));
// There are (passes - 1) swaps, then the transpose swap, then passes swaps.
// i.e. 2 * passes swaps. So overall srcBuf ad dstBuff are unchanged from their first state.
assert(dstBuf == dst);
return CUDA_STATUS;
}
template Status gaussianBlur2D(GPU::Buffer<unsigned char> dst, GPU::Buffer<const unsigned char> src,
GPU::Buffer<unsigned char> work, std::size_t width, std::size_t height, unsigned radius,
unsigned passes, bool wrap, GPU::Stream stream);
template Status gaussianBlur2D(GPU::Buffer<float2> dst, GPU::Buffer<const float2> src, GPU::Buffer<float2> work,
std::size_t width, std::size_t height, unsigned radius, unsigned passes, bool wrap,
GPU::Stream stream);
Status boxBlurColumnsWrapRGBA210(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> src, std::size_t width,
std::size_t height, unsigned radius, GPU::Stream stream) {
if ((std::size_t)(2 * radius) >= height) {
// the blur takes the whole buffer for all pixels since the stencil is larger than the patchlet,
// so just resize the stencil
radius = (unsigned)(height / 2 - 1);
}
if (COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= radius) { // if radius is short enough for blurColumnsKernel
dim3 blocks((unsigned)Cuda::ceilDiv(width, COLUMNS_BLOCKDIM_X),
(unsigned)Cuda::ceilDiv(height, (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)));
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
blurColumnsKernelWrap<uint32_t><<<blocks, threads, 0, stream.get()>>>(
dst.get().raw(), src.get().raw(), (unsigned)width, (unsigned)height, (unsigned)width, radius);
} else {
dim3 dimBlock(RGBA_BOX_BLUR_1D_BLOCK_SIZE, 1, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), 1, 1);
blur1DKernelWrap<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().raw(), src.get().raw(), (unsigned)width,
(unsigned)height, radius);
}
return CUDA_STATUS;
}
Status boxBlurColumnsNoWrapRGBA210(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> src, std::size_t width,
std::size_t height, unsigned radius, GPU::Stream gpuStream) {
cudaStream_t stream = gpuStream.get();
dim3 dimBlock(RGBA_BOX_BLUR_1D_BLOCK_SIZE, 1, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), 1, 1);
if ((std::size_t)radius >= height) {
blur1DKernelNoWrapHugeRadius<<<dimGrid, dimBlock, 0, stream>>>(dst.get().raw(), src.get().raw(), (unsigned)width,
(unsigned)height, radius);
} else if ((std::size_t)(2 * radius) >= height) {
blur1DKernelNoWrapLargeRadius<<<dimGrid, dimBlock, 0, stream>>>(dst.get().raw(), src.get().raw(), (unsigned)width,
(unsigned)height, radius);
} else if (COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= radius) { // if radius is short enough for blurColumnsKernel
dim3 blocks((unsigned)Cuda::ceilDiv(width, COLUMNS_BLOCKDIM_X),
(unsigned)Cuda::ceilDiv(height, (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)));
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
blurColumnsKernelNoWrap<uint32_t><<<blocks, threads, 0, stream>>>(dst.get().raw(), src.get().raw(), (unsigned)width,
(unsigned)height, (unsigned)width, radius);
} else {
blur1DKernelNoWrap<<<dimGrid, dimBlock, 0, stream>>>(dst.get().raw(), src.get().raw(), (unsigned)width,
(unsigned)height, radius);
}
return CUDA_STATUS;
}
Status boxBlurRowsRGBA210(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> src, std::size_t width,
std::size_t height, unsigned radius, GPU::Stream stream, bool wrap) {
dim3 blocks((unsigned)Cuda::ceilDiv(width, (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X)),
(unsigned)Cuda::ceilDiv(height, ROWS_BLOCKDIM_Y));
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
if ((std::size_t)(2 * radius) >= width) {
// the blur takes the whole buffer for all pixels since the stencil is larger than the patchlet,
// so just resize the stencil
radius = (unsigned)(width / 2 - 1);
}
if (wrap) {
blurRowsKernelWrap<<<blocks, threads, 0, stream.get()>>>(dst.get().raw(), src.get().raw(), width, height, width,
radius);
} else {
blurRowsKernelNoWrap<<<blocks, threads, 0, stream.get()>>>(dst.get().raw(), src.get().raw(), width, height, width,
radius);
}
return CUDA_STATUS;
}
Status gaussianBlur2DRGBA210(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> src, GPU::Buffer<uint32_t> work,
std::size_t width, std::size_t height, unsigned radius, unsigned passes, bool wrap,
GPU::Stream stream) {
assert(passes > 0);
// First pass is from src to work;
PROPAGATE_FAILURE_STATUS(boxBlurColumnsNoWrapRGBA210(work, src, width, height, radius, stream));
// Other passes ping-pong between work buffers.
GPU::Buffer<uint32_t> srcBuf = work;
GPU::Buffer<uint32_t> dstBuf = dst;
for (unsigned i = 1; i < passes; ++i) {
PROPAGATE_FAILURE_STATUS(boxBlurColumnsNoWrapRGBA210(dstBuf, srcBuf.as_const(), width, height, radius, stream));
swap(dstBuf, srcBuf);
}
if ((ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= radius) &&
((std::size_t)2 * radius < height)) { // boxBlurRowsRGBA210 works only in this case
for (unsigned i = 0; i < passes; ++i) {
PROPAGATE_FAILURE_STATUS(boxBlurRowsRGBA210(dstBuf, srcBuf.as_const(), width, height, radius, stream, wrap));
swap(dstBuf, srcBuf);
}
swap(dstBuf, srcBuf);
assert(dstBuf == dst);
} else {
// transpose
PROPAGATE_FAILURE_STATUS(transpose(dstBuf.get().raw(), srcBuf.get().raw(), width, height, stream));
swap(dstBuf, srcBuf);
if (wrap) {
for (unsigned i = 0; i < passes; ++i) {
PROPAGATE_FAILURE_STATUS(boxBlurColumnsWrapRGBA210(dstBuf, srcBuf.as_const(), height, width, radius, stream));
swap(dstBuf, srcBuf);
}
} else {
for (unsigned i = 0; i < passes; ++i) {
PROPAGATE_FAILURE_STATUS(boxBlurColumnsNoWrapRGBA210(dstBuf, srcBuf.as_const(), height, width, radius, stream));
swap(dstBuf, srcBuf);
}
}
PROPAGATE_FAILURE_STATUS(transpose(dstBuf.get().raw(), srcBuf.get().raw(), height, width, stream));
// There are (passes - 1) swaps, then the transpose swap, then passes swaps.
// i.e. 2 * passes swaps. So overall srcBuf ad dstBuff are unchanged from their first state.
assert(dstBuf == dst);
}
return CUDA_STATUS;
}
Status gaussianBlur1DRGBA210SS(uint32_t* dst, const uint32_t* src, std::size_t width, std::size_t height,
unsigned radius, bool wrap, GPU::Stream gpuStream) {
cudaStream_t stream = gpuStream.get();
// Block organization is as follows for a 5x3 image and dimBlock.x == 3
// 00 00 00 10 10
// 01 01 01 11 11
// 02 02 02 12 12
// Handle the interior
if ((unsigned)width > 2 * radius) {
dim3 dimBlock(RGBA_BOX_BLUR_SS_1D_BLOCK_SIZE, 1, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(width - 2 * radius, dimBlock.x), (unsigned)height, 1);
assert(2 * radius < dimBlock.x);
switch (radius) {
case 1:
gaussianBlur1DRGBA210SSKernelInterior<unrolledGaussianKernel1>
<<<dimGrid, dimBlock, 16 * (dimBlock.x + 2 * radius), stream>>>(dst, src, (unsigned)width, (unsigned)height,
radius);
break;
case 2:
gaussianBlur1DRGBA210SSKernelInterior<unrolledGaussianKernel2>
<<<dimGrid, dimBlock, 16 * (dimBlock.x + 2 * radius), stream>>>(dst, src, (unsigned)width, (unsigned)height,
radius);
break;
case 3:
gaussianBlur1DRGBA210SSKernelInterior<unrolledGaussianKernel3>
<<<dimGrid, dimBlock, 16 * (dimBlock.x + 2 * radius), stream>>>(dst, src, (unsigned)width, (unsigned)height,
radius);
break;
case 4:
gaussianBlur1DRGBA210SSKernelInterior<unrolledGaussianKernel4>
<<<dimGrid, dimBlock, 16 * (dimBlock.x + 2 * radius), stream>>>(dst, src, (unsigned)width, (unsigned)height,
radius);
break;
case 5:
gaussianBlur1DRGBA210SSKernelInterior<unrolledGaussianKernel5>
<<<dimGrid, dimBlock, 16 * (dimBlock.x + 2 * radius), stream>>>(dst, src, (unsigned)width, (unsigned)height,
radius);
break;
case 6:
gaussianBlur1DRGBA210SSKernelInterior<unrolledGaussianKernel6>
<<<dimGrid, dimBlock, 16 * (dimBlock.x + 2 * radius), stream>>>(dst, src, (unsigned)width, (unsigned)height,
radius);
break;
default:
assert(false);
break;
}
}
// There are exactly radius pixels on each border (left and right) + radius pixels before and after them.
assert(4 * radius <= 32);
dim3 dimBlock(4 * radius, 1, 1);
dim3 dimGrid(1, (unsigned)height, 1);
if (wrap) {
switch (radius) {
case 1:
gaussianBlur1DRGBA210SSKernelWrap<unrolledGaussianKernel1>
<<<dimGrid, dimBlock, 16 * dimBlock.x, stream>>>(dst, src, (unsigned)width, (unsigned)height, radius);
break;
case 2:
gaussianBlur1DRGBA210SSKernelWrap<unrolledGaussianKernel2>
<<<dimGrid, dimBlock, 16 * dimBlock.x, stream>>>(dst, src, (unsigned)width, (unsigned)height, radius);
break;
case 3:
gaussianBlur1DRGBA210SSKernelWrap<unrolledGaussianKernel3>
<<<dimGrid, dimBlock, 16 * dimBlock.x, stream>>>(dst, src, (unsigned)width, (unsigned)height, radius);
break;
case 4:
gaussianBlur1DRGBA210SSKernelWrap<unrolledGaussianKernel4>
<<<dimGrid, dimBlock, 16 * dimBlock.x, stream>>>(dst, src, (unsigned)width, (unsigned)height, radius);
break;
case 5:
gaussianBlur1DRGBA210SSKernelWrap<unrolledGaussianKernel5>
<<<dimGrid, dimBlock, 16 * dimBlock.x, stream>>>(dst, src, (unsigned)width, (unsigned)height, radius);
break;
case 6:
gaussianBlur1DRGBA210SSKernelWrap<unrolledGaussianKernel6>
<<<dimGrid, dimBlock, 16 * dimBlock.x, stream>>>(dst, src, (unsigned)width, (unsigned)height, radius);
break;
default:
assert(false);
break;
}
} else {
switch (radius) {
case 1:
gaussianBlur1DRGBA210SSKernelNoWrap<unrolledGaussianKernel1>
<<<dimGrid, dimBlock, 16 * dimBlock.x, stream>>>(dst, src, (unsigned)width, (unsigned)height, radius);
break;
case 2:
gaussianBlur1DRGBA210SSKernelNoWrap<unrolledGaussianKernel2>
<<<dimGrid, dimBlock, 16 * dimBlock.x, stream>>>(dst, src, (unsigned)width, (unsigned)height, radius);
break;
case 3:
gaussianBlur1DRGBA210SSKernelNoWrap<unrolledGaussianKernel3>
<<<dimGrid, dimBlock, 16 * dimBlock.x, stream>>>(dst, src, (unsigned)width, (unsigned)height, radius);
break;
case 4:
gaussianBlur1DRGBA210SSKernelNoWrap<unrolledGaussianKernel4>
<<<dimGrid, dimBlock, 16 * dimBlock.x, stream>>>(dst, src, (unsigned)width, (unsigned)height, radius);
break;
case 5:
gaussianBlur1DRGBA210SSKernelNoWrap<unrolledGaussianKernel5>
<<<dimGrid, dimBlock, 16 * dimBlock.x, stream>>>(dst, src, (unsigned)width, (unsigned)height, radius);
break;
case 6:
gaussianBlur1DRGBA210SSKernelNoWrap<unrolledGaussianKernel6>
<<<dimGrid, dimBlock, 16 * dimBlock.x, stream>>>(dst, src, (unsigned)width, (unsigned)height, radius);
break;
default:
assert(false);
break;
}
}
return CUDA_STATUS;
}
Status gaussianBlur2DRGBA210SS(uint32_t* buf, uint32_t* work, std::size_t width, std::size_t height, unsigned radius,
bool wrap, GPU::Stream stream) {
// Vertical pass, never wraps.
PROPAGATE_FAILURE_STATUS(gaussianBlur1DRGBA210SS(work, buf, width, height, radius, false, stream));
// transpose
PROPAGATE_FAILURE_STATUS(transpose(buf, work, width, height, stream));
PROPAGATE_FAILURE_STATUS(gaussianBlur1DRGBA210SS(work, buf, height, width, radius, wrap, stream));
return transpose(buf, work, height, width, stream);
}
// TODO_GPU_DEPRECATE
// only used in test currently
Status gaussianBlur2D(GPU::Buffer<unsigned char> buf, GPU::Buffer<unsigned char> work, std::size_t width,
std::size_t height, unsigned radius, unsigned passes, bool wrap, unsigned blockSize,
GPU::Stream stream) {
// Avoid copy: force even passes
assert((passes & 1) == 0);
for (unsigned i = 0; i < passes / 2; ++i) {
PROPAGATE_FAILURE_STATUS(boxBlur1DNoWrap(work, buf.as_const(), width, height, radius, blockSize, stream));
PROPAGATE_FAILURE_STATUS(boxBlur1DNoWrap(buf, work.as_const(), width, height, radius, blockSize, stream));
}
// transpose
PROPAGATE_FAILURE_STATUS(transpose(work.get().raw(), buf.as_const().get().raw(), width, height, stream));
for (unsigned i = 0; i < passes / 2; ++i) {
if (wrap) {
PROPAGATE_FAILURE_STATUS(boxBlur1DWrap(buf, work.as_const(), height, width, radius, blockSize, stream));
PROPAGATE_FAILURE_STATUS(boxBlur1DWrap(work, buf.as_const(), height, width, radius, blockSize, stream));
} else {
PROPAGATE_FAILURE_STATUS(boxBlur1DNoWrap(buf, work.as_const(), height, width, radius, blockSize, stream));
PROPAGATE_FAILURE_STATUS(boxBlur1DNoWrap(work, buf.as_const(), height, width, radius, blockSize, stream));
}
}
return transpose(buf.get().raw(), work.as_const().get().raw(), height, width, stream);
}
Status gaussianBlur2DRGBA(GPU::Buffer<uint32_t> dst, GPU::Buffer<const uint32_t> src, GPU::Buffer<uint32_t> work,
std::size_t width, std::size_t height, unsigned /*radius*/, unsigned /*passes*/, bool wrap,
GPU::Stream stream) {
uint32_t* h_Kernel = (uint32_t*)malloc((2 * KERNEL_RADIUS + 1) * sizeof(uint32_t));
h_Kernel[0] = 1;
h_Kernel[1] = 4;
h_Kernel[2] = 6;
h_Kernel[3] = 4;
h_Kernel[4] = 1;
setConvolutionKernel(h_Kernel);
{
dim3 blocks((unsigned)Cuda::ceilDiv(width, ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X),
(unsigned)Cuda::ceilDiv(height, ROWS_BLOCKDIM_Y));
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
if (wrap) {
convolutionRowsKernel<true><<<blocks, threads, 0, stream.get()>>>(
work.get().raw(), src.get().raw(), (unsigned)width, (unsigned)height, (unsigned)width);
} else {
convolutionRowsKernel<false><<<blocks, threads, 0, stream.get()>>>(
work.get().raw(), src.get().raw(), (unsigned)width, (unsigned)height, (unsigned)width);
}
}
{
dim3 blocks((unsigned)Cuda::ceilDiv(width, COLUMNS_BLOCKDIM_X),
(unsigned)Cuda::ceilDiv(height, COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y));
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
convolutionColumnsKernel<<<blocks, threads, 0, stream.get()>>>(dst.get().raw(), work.get().raw(), (unsigned)width,
(unsigned)height, (unsigned)width);
}
free(h_Kernel);
return CUDA_STATUS;
}
} // namespace Image
} // namespace VideoStitch
|
the_stack
|
namespace at {
namespace native {
#if defined(USE_ROCM)
constexpr int CAT_ARRAY_BATCH_SIZE = 1024;
#else
constexpr int CAT_ARRAY_BATCH_SIZE = 128;
#endif
constexpr int CAT_ARRAY_MAX_INPUT_DIMS = 4;
namespace {
inline bool getCatGrid(ptrdiff_t nTensors, dim3& grid) {
const int numSM = at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
//X dim of grid for cat array cooperates on a single tensor in the cat.
//Given half of the GPU, full utilization will always occur.
grid = dim3( 2LL * numSM, (long long) nTensors );
return true;
}
// Similar to any other IndexToOffset calculation for copying along a given
// dimension.
template <typename IndexType, int Dims>
struct CatArrIndexToOffset {
static inline __device__ IndexType compute(
const IndexType tensorSize[Dims],
const IndexType tensorStride[Dims],
const IndexType dimSize,
const unsigned int concatDim,
IndexType linearIndex) {
// linearIndex is not really linear index, but instead the offset in
// input tensor. If the input tensor is contiguous, then this offset
// is the linear index, but if the input tensor is channels last, then
// it is the linear index of the permuted contiguous tensor
IndexType offset = 0;
#pragma unroll
for (int i = Dims - 1; i >= 1; --i) {
IndexType curDimSize = i == concatDim ? dimSize : tensorSize[i];
IndexType nextDimIndex = linearIndex / curDimSize;
IndexType curDimIndex = linearIndex - curDimSize * nextDimIndex;
IndexType curDimOffset = curDimIndex * tensorStride[i];
offset += curDimOffset;
linearIndex = nextDimIndex;
}
return offset + linearIndex * tensorStride[0];
}
};
template<typename IndexType, unsigned int MaxDims>
struct TensorSizeStride {
IndexType tensorSize[MaxDims];
IndexType tensorStride[MaxDims];
};
/**
* Kernel used to concatenated grimDim.y tensors into an output tensor. Uses a
* grid-stride loop based off of the blockIdx.x, threadIdx.x for each input to
* copy each element from each input tensor into the output.
*
* output: base pointer to the storage associated with the output tensor
* inputs: GPU-allocated array of input metadata for each input to concatenate
* in the kernel
* os: the size/stride vectors for the output tensor
* concatDim: dimension along which we are concatenating
* dimStride: the stride of the output tensor at the concatDim
*
* The most important assumption made is that the input tensors are contiguous.
*/
// Use pinned memory and and pass the struct by pointer on ROCm
template <typename T, typename IndexType>
struct CatArrInputTensor {
T* input;
IndexType offset;
IndexType dimSize;
IndexType nElements;
};
template <typename T, typename IndexType, int Dims>
C10_LAUNCH_BOUNDS_1(512)
__global__ void HIP_CatArrayBatchedCopy(
T* output,
CatArrInputTensor<T, IndexType>* inputs,
TensorSizeStride<IndexType, CAT_ARRAY_MAX_INPUT_DIMS> os,
const int concatDim,
IndexType dimStride) {
IndexType tid = blockIdx.x * blockDim.x + threadIdx.x;
IndexType nElements = inputs[blockIdx.y].nElements;
if(tid >= nElements) return;
T* data = inputs[blockIdx.y].input;
IndexType offset = inputs[blockIdx.y].offset;
IndexType dimSize = inputs[blockIdx.y].dimSize;
IndexType dataOffset = offset * dimStride;
IndexType stride = gridDim.x * blockDim.x;
while( tid < nElements){
IndexType elementOffset = CatArrIndexToOffset<IndexType, Dims>::compute(
os.tensorSize, os.tensorStride, dimSize, concatDim, tid);
output[dataOffset + elementOffset] = data[tid];
tid += stride;
}
}
// pass meta data directly through kernel argument instead of pin memory
// In contiguous case, we will not need stride_size, setting it as 1 as placeholder
// to pass compile.
template <typename T, typename IndexType, int n, int stride_size>
struct CatArrInputTensorMetadata {
T* input[n];
IndexType offset[n];
IndexType dimSize[n];
IndexType nElements[n];
bool isContiguous[n];
TensorSizeStride<IndexType, CAT_ARRAY_MAX_INPUT_DIMS> tensorStride[stride_size];
};
template <typename T, typename IndexType, int Dims, int batch_size, int stride_size>
__global__ void CatArrayBatchedCopy(
T* output,
CatArrInputTensorMetadata<T, IndexType, batch_size, stride_size> inputs,
TensorSizeStride<IndexType, CAT_ARRAY_MAX_INPUT_DIMS> os,
const int concatDim,
IndexType dimStride) {
IndexType tid = blockIdx.x * blockDim.x + threadIdx.x;
IndexType nElements = inputs.nElements[blockIdx.y];
TensorSizeStride<IndexType, CAT_ARRAY_MAX_INPUT_DIMS> ins = stride_size > 1 ? inputs.tensorStride[blockIdx.y] : inputs.tensorStride[0];
bool isContig = inputs.isContiguous[blockIdx.y];
if(tid >= nElements) return;
T* data = inputs.input[blockIdx.y];
IndexType offset = inputs.offset[blockIdx.y];
IndexType dimSize = inputs.dimSize[blockIdx.y];
IndexType dataOffset = offset * dimStride;
IndexType stride = gridDim.x * blockDim.x;
while( tid < nElements){
IndexType elementOffset = CatArrIndexToOffset<IndexType, Dims>::compute(
os.tensorSize, os.tensorStride, dimSize, concatDim, tid);
if (isContig) {
output[dataOffset + elementOffset] = data[tid];
} else {
IndexType inElementOffset = CatArrIndexToOffset<IndexType, Dims>::compute(
ins.tensorSize, ins.tensorStride, dimSize, concatDim, tid);
output[dataOffset + elementOffset] = data[inElementOffset];
}
tid += stride;
}
}
template <typename scalar_t>
void hip_parallel_cat(Tensor &out, const TensorList &inputs, int64_t dimension,
int nDims, c10::MemoryFormat memory_format) {
// First, let's set up our kernel parameters. We start with a raw pointer to
// the storage for the output Tensor.
scalar_t *data = out.data_ptr<scalar_t>();
// Kernel Parameter
long tensorMetadataSize =
sizeof(CatArrInputTensor<scalar_t, unsigned int>) * CAT_ARRAY_BATCH_SIZE;
auto d_inputs_storage = at::empty(
{tensorMetadataSize}, out.options().dtype(at::kByte));
auto d_inputs = static_cast<CatArrInputTensor<scalar_t, unsigned int> *>(
d_inputs_storage.data_ptr());
TensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> outputParam;
// Next, let's initialize the size, stride arrays for the output Tensor.
if (memory_format == c10::MemoryFormat::Contiguous) {
for (int i = 0; i < nDims; ++i) {
outputParam.tensorSize[i] = at::native::size(out, i);
outputParam.tensorStride[i] = out.stride(i);
}
} else if (memory_format == c10::MemoryFormat::ChannelsLast || memory_format == c10::MemoryFormat::ChannelsLast3d) {
// permute the semantics of dims from NCHW to NHWC so that the input
// tensor is now contiguous
outputParam.tensorSize[0] = at::native::size(out, 0);
outputParam.tensorStride[0] = out.stride(0);
for (int i = 1; i < nDims - 1; ++i) {
outputParam.tensorSize[i] = at::native::size(out, i + 1);
outputParam.tensorStride[i] = out.stride(i + 1);
}
outputParam.tensorSize[nDims - 1] = at::native::size(out, 1);
outputParam.tensorStride[nDims - 1] = out.stride(1);
} else {
TORCH_CHECK(false, "unsupported memory format");
}
at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream();
// Now we loop
int batchCounter = 0;
int64_t offset = 0;
for (int i = 0; i < inputs.size() ; i += CAT_ARRAY_BATCH_SIZE) {
// Re-allocate stackInputs every iteration to avoid read-after-write hazard
{
auto stackInputs_storage = at::empty({tensorMetadataSize},
out.options().dtype(at::kByte).device(at::kCPU).pinned_memory(true));
auto stackInputs =
static_cast<CatArrInputTensor<scalar_t, unsigned int> *>(
stackInputs_storage.data_ptr());
for (batchCounter = 0;
batchCounter < CAT_ARRAY_BATCH_SIZE &&
(i+batchCounter) < inputs.size();
++batchCounter) {
int64_t dimSize = 0;
// There is a legacy case where a 1-D empty tensor can be concat with
// high-dimensional tensor
if (inputs[i+batchCounter].numel() > 0) {
dimSize = at::native::size(inputs[i+batchCounter], dimension);
}
stackInputs[batchCounter].input =
inputs[i+batchCounter].data_ptr<scalar_t>();
stackInputs[batchCounter].offset = offset;
stackInputs[batchCounter].dimSize = dimSize;
stackInputs[batchCounter].nElements = inputs[i+batchCounter].numel();
// update offset
offset += dimSize;
}
at::native::copy_(d_inputs_storage, stackInputs_storage,
/* non_blocking= */ true);
}
// Next, let's consider how we set our kernel launch parameters.
// We borrow from THCApply, which the kernel's internal indexing
// is based on.
dim3 applyBlock = dim3(32*16);
//Get grid where x dim fills half gpu and y dim is number of tensors.
//This will have cating two tensors fill the entire grid, but prevent
//many threads from needlessly load meta data if their sizes is small.
dim3 catGrid;
getCatGrid(batchCounter, catGrid);
if (memory_format != c10::MemoryFormat::Contiguous) {
switch (dimension) {
case 0:
break;
case 1:
dimension = nDims - dimension;
break;
default:
dimension--;
}
}
// Template Declarations for dim = 1, 2, 3, 4
#define HANDLE_CASE(DIMS) \
HIP_CatArrayBatchedCopy<scalar_t, unsigned int, DIMS><<<\
catGrid, applyBlock, 0, stream.stream()>>>(\
data, d_inputs, outputParam, dimension, outputParam.tensorStride[dimension]); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
switch (nDims) {
case 1:
HANDLE_CASE(1);
break;
case 2:
HANDLE_CASE(2);
break;
case 3:
HANDLE_CASE(3);
break;
case 4:
HANDLE_CASE(4);
break;
}
#undef HANDLE_CASE
}
}
template <typename scalar_t, int batch_size, int stride_size>
void parallel_cat(Tensor &out, const TensorList &inputs, int64_t dimension,
int nDims, c10::MemoryFormat memory_format) {
// First, let's set up our kernel parameters. We start with a raw pointer to
// the storage for the output Tensor.
scalar_t *data = out.data_ptr<scalar_t>();
CatArrInputTensorMetadata<scalar_t, unsigned int, batch_size, stride_size> catMetaData;
TensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> outputParam;
// Next, let's initialize the size, stride arrays for the output Tensor.
if (memory_format == c10::MemoryFormat::Contiguous) {
for (int i = 0; i < nDims; ++i) {
outputParam.tensorSize[i] = at::native::size(out, i);
outputParam.tensorStride[i] = out.stride(i);
}
} else if (memory_format == c10::MemoryFormat::ChannelsLast || memory_format == c10::MemoryFormat::ChannelsLast3d) {
// permute the semantics of dims from NCHW to NHWC so that the input
// tensor is now contiguous
outputParam.tensorSize[0] = at::native::size(out, 0);
outputParam.tensorStride[0] = out.stride(0);
for (int i = 1; i < nDims - 1; ++i) {
outputParam.tensorSize[i] = at::native::size(out, i + 1);
outputParam.tensorStride[i] = out.stride(i + 1);
}
outputParam.tensorSize[nDims - 1] = at::native::size(out, 1);
outputParam.tensorStride[nDims - 1] = out.stride(1);
} else {
TORCH_CHECK(false, "unsupported memory format");
}
at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream();
// Now we loop
int batchCounter = 0;
int64_t offset = 0;
for (int i = 0; i < inputs.size() ; i += batch_size) {
for (batchCounter = 0;
batchCounter < batch_size &&
(i+batchCounter) < inputs.size();
++batchCounter) {
int64_t dimSize = 0;
// There is a legacy case where a 1-D empty tensor can be concat with
// high-dimensional tensor
if (inputs[i+batchCounter].numel() > 0) {
dimSize = at::native::size(inputs[i+batchCounter], dimension);
}
catMetaData.input[batchCounter] = inputs[i+batchCounter].data_ptr<scalar_t>();
catMetaData.offset[batchCounter] = offset;
catMetaData.dimSize[batchCounter] = dimSize;
catMetaData.nElements[batchCounter] = inputs[i+batchCounter].numel();
if (stride_size > 1) {
auto strides = inputs[i+batchCounter].strides();
auto sizes = inputs[i+batchCounter].sizes();
for(int j = 0; j < nDims; j++){
catMetaData.tensorStride[batchCounter].tensorSize[j] = sizes[j];
catMetaData.tensorStride[batchCounter].tensorStride[j] = strides[j];
}
catMetaData.isContiguous[batchCounter] = false;
} else {
catMetaData.isContiguous[batchCounter] = true;
}
// update offset
offset += dimSize;
}
// Next, let's consider how we set our kernel launch parameters.
// We borrow from THCApply, which the kernel's internal indexing
// is based on.
dim3 applyBlock = dim3(32*16);
//Get grid where x dim fills half gpu and y dim is number of tensors.
//This will have cating two tensors fill the entire grid, but prevent
//many threads from needlessly load meta data if their sizes is small.
dim3 catGrid;
getCatGrid(batchCounter, catGrid);
if (memory_format != c10::MemoryFormat::Contiguous) {
switch (dimension) {
case 0:
break;
case 1:
dimension = nDims - dimension;
break;
default:
dimension--;
}
}
// Template Declarations for dim = 1, 2, 3, 4
#define HANDLE_CASE(DIMS) \
CatArrayBatchedCopy<scalar_t, unsigned int, DIMS, batch_size, stride_size><<<\
catGrid, applyBlock, 0, stream.stream()>>>(\
data, catMetaData, outputParam, dimension, outputParam.tensorStride[dimension]); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
switch (nDims) {
case 1:
HANDLE_CASE(1);
break;
case 2:
HANDLE_CASE(2);
break;
case 3:
HANDLE_CASE(3);
break;
case 4:
HANDLE_CASE(4);
break;
}
#undef HANDLE_CASE
}
}
} // namespace
Tensor cat_cuda(TensorList inputs, int64_t dimension) {
ScalarType high_type = result_type(inputs);
Tensor out = at::empty({0}, inputs.front().options().dtype(high_type));
at::native::cat_out_cuda(inputs, dimension, out);
return out;
}
inline c10::MemoryFormat compute_output_memory_format(const TensorList &inputs) {
c10::optional<c10::MemoryFormat> format = c10::nullopt;
for (auto &t : inputs) {
auto f = t.suggest_memory_format();
if (!format.has_value()) {
format = f;
continue;
}
if (format.value() == f) {
continue;
}
bool contiguous = (format.value() == c10::MemoryFormat::Contiguous || f == c10::MemoryFormat::Contiguous || format.value() != f);
if (contiguous) {
return c10::MemoryFormat::Contiguous;
}
}
return format.value();
}
Tensor& cat_out_cuda(TensorList inputs, int64_t dimension, Tensor& out) {
// previously, size [0] tensors were the only possible empty tensors; thus, it
// wasn't possible to cat empty tensors unless all the other tensors were
// 1-dimensional, so we allowed these tensors to be "skipped". We maintain
// this behavior for backwards compatibility, but only for this specific size
// (i.e. other empty sizes are not skipped).
// FIXME: warn if this is the case
auto should_skip = [](const Tensor &t) {
return t.dim() == 1 && at::native::size(t, 0) == 0;
};
const Tensor *notSkippedTensor = NULL; // non-owning reference
int nDims = 0;
// Check for type promotion
TORCH_CHECK(canCast(result_type(inputs), out.scalar_type()), "torch.cat(): input types ",
" can't be cast to the desired output type ",
out.scalar_type());
// Inputs cannot alias the output tensor
for (int i = 0; i < inputs.size(); i++) {
auto lap = at::get_overlap_status(out, inputs[i]);
TORCH_CHECK(lap != at::MemOverlapStatus::PARTIAL &&
lap != at::MemOverlapStatus::FULL,
"torch.cat(): unsupported operation: the input tensors cannot refer to any "
"of the output memory locations. Found overlap in input "
"tensor ", i);
}
at::assert_no_internal_overlap(out);
for (int i = 0; i < inputs.size(); i++) {
if (should_skip(inputs[i])) {
continue;
}
nDims = inputs[i].dim();
notSkippedTensor = &inputs[i];
break;
}
// If all inputs are empty tensors, return an empty tensor
if (notSkippedTensor == NULL) {
return out;
}
TORCH_CHECK(inputs.size() > 0, "torch.cat(): invalid number of inputs ", inputs.size());
TORCH_CHECK(dimension >= 0, "torch.cat(): invalid dimension ", dimension);
for (const Tensor& t: inputs) {
TORCH_CHECK(t.device() == notSkippedTensor->device(),
"torch.cat(): all input tensors must be on the same device. Received ",
t.device(), " and ", notSkippedTensor->device());
}
TORCH_CHECK(
out.device() == notSkippedTensor->device(),
"torch.cat(): all input tensors and out must be on the same device, but inputs are on ",
notSkippedTensor->device(), " and out is on ", out.device());
c10::MemoryFormat memory_format = compute_output_memory_format(inputs);
std::vector<int64_t> size(notSkippedTensor->sizes().vec());
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < inputs.size(); i++) {
const Tensor &tensor = inputs[i];
if (should_skip(tensor)) {
continue;
}
check_cat_shape_except_dim(*notSkippedTensor, tensor, dimension, i);
cat_dim_size += at::native::size(tensor, dimension);
}
// Compute the size of the result
size[dimension] = cat_dim_size;
// skip resizing if size of result is same as expected
// raise a warning while resizing if output has one or more elements
// See https://github.com/pytorch/pytorch/pull/62560#discussion_r687363362
// for understanding why at::native::resize_output is not called directly.
// if (at::native::resize_output_check(out, size)) {
// TODO: restore the above, see https://github.com/pytorch/pytorch/issues/64709
if (out.sizes() != size) {
out.resize_(size, memory_format);
}
if (out.numel() == 0) {
return out;
}
// We parallelize the copy if all 6 conditions pass:
//
// 1. There is more than one input tensor
// 2. The out tensor is 32-bit indexable
// 3. The number of dimensions is <= 4
// 4. All input tensors are contiguous (output tensor may be non-contig)
// 5. All input tensors can use 32-bit indexing
const bool all32BitIndexable = std::all_of(inputs.begin(), inputs.end(),
[] (const Tensor& t) {
return at::cuda::detail::canUse32BitIndexMath(t);
});
const bool allContiguous = std::all_of(inputs.begin(), inputs.end(),
[=](const Tensor& t) {
return !t.defined() || t.is_contiguous(memory_format);
});
ScalarType firstType = inputs[0].scalar_type();
bool allSameType = std::all_of(inputs.begin(), inputs.end(),
[firstType](const Tensor& t) {
return t.scalar_type() == firstType;
});
allSameType = allSameType && (out.scalar_type() == firstType);
#if defined(USE_ROCM)
if (inputs.size() > 1 &&
out.dim() <= CAT_ARRAY_MAX_INPUT_DIMS &&
at::cuda::detail::canUse32BitIndexMath(out) &&
allContiguous &&
all32BitIndexable &&
allSameType) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
out.scalar_type(), "cat_cuda", [&]() {
hip_parallel_cat<scalar_t>(out, inputs, dimension, nDims, memory_format);
});
#else
// We support the contiguous inputs and non-contiguous input (<=4 dims) in different ways
// For contiguous input, we don't need to pass stride meta data to cuda kernel through constant
// memory. Therefore, we could pass more inputs to cuda threads.
// For non-contiguous, we reduce the number of inputs passed to cuda kernel due to the limitation
// of constant memory.
if (inputs.size() > 1 &&
out.dim() <= CAT_ARRAY_MAX_INPUT_DIMS &&
at::cuda::detail::canUse32BitIndexMath(out) &&
allContiguous &&
all32BitIndexable &&
allSameType) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
out.scalar_type(), "cat_cuda", [&]() {
parallel_cat<scalar_t, CAT_ARRAY_BATCH_SIZE, 1>(out, inputs, dimension, nDims, memory_format);
});
} else if (inputs.size() > 1 &&
out.dim() <= CAT_ARRAY_MAX_INPUT_DIMS &&
at::cuda::detail::canUse32BitIndexMath(out) &&
nDims <= CAT_ARRAY_MAX_INPUT_DIMS &&
all32BitIndexable &&
allSameType &&
memory_format == c10::MemoryFormat::Contiguous) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16,
out.scalar_type(), "cat_cuda", [&]() {
parallel_cat<scalar_t, CAT_ARRAY_BATCH_SIZE/2, CAT_ARRAY_BATCH_SIZE/2>(out, inputs, dimension, nDims, memory_format);
});
#endif
} else {
int64_t offset = 0;
for (int j = 0; j < inputs.size(); j++)
{
if (should_skip(inputs[j])) continue;
int64_t dimSize = at::native::size(inputs[j], dimension);
Tensor nt = at::narrow(out, dimension, offset, dimSize);
copy_(nt, inputs[j]);
offset += dimSize;
}
}
return out;
}
} // namespace native
} // namespace at
|
the_stack
|
#include <iostream>
#include <algorithm>
#include "cuhnsw.hpp"
#include "cuda_search_kernels.cuh"
#include "cuda_build_kernels.cuh"
namespace cuhnsw {
void CuHNSW::GetDeviceInfo() {
CHECK_CUDA(cudaGetDevice(&devId_));
cudaDeviceProp prop;
CHECK_CUDA(cudaGetDeviceProperties(&prop, devId_));
mp_cnt_ = prop.multiProcessorCount;
major_ = prop.major;
minor_ = prop.minor;
cores_ = -1;
}
void CuHNSW::GetEntryPoints(
const std::vector<int>& nodes,
std::vector<int>& entries,
int level, bool search) {
int size = nodes.size();
// process input data for kernel
LevelGraph& graph = level_graphs_[level];
const std::vector<int>& upper_nodes = graph.GetNodes();
int upper_size = upper_nodes.size();
std::vector<int> deg(upper_size);
std::vector<int> neighbors(upper_size * max_m_);
for (int i = 0; i < upper_size; ++i) {
const std::vector<std::pair<float, int>>& _neighbors = graph.GetNeighbors(upper_nodes[i]);
deg[i] = _neighbors.size();
int offset = max_m_ * i;
for (int j = 0; j < deg[i]; ++j) {
neighbors[offset + j] = graph.GetNodeId(_neighbors[j].second);
}
}
for (int i = 0; i < size; ++i) {
int entryid = graph.GetNodeId(entries[i]);
entries[i] = entryid;
}
// copy to gpu mem
thrust::device_vector<int> dev_nodes(size), dev_entries(size);
thrust::device_vector<int> dev_upper_nodes(upper_size), dev_deg(upper_size);
thrust::device_vector<int> dev_neighbors(upper_size * max_m_);
thrust::copy(nodes.begin(), nodes.end(), dev_nodes.begin());
thrust::copy(entries.begin(), entries.end(), dev_entries.begin());
thrust::copy(upper_nodes.begin(), upper_nodes.end(), dev_upper_nodes.begin());
thrust::copy(deg.begin(), deg.end(), dev_deg.begin());
thrust::copy(neighbors.begin(), neighbors.end(), dev_neighbors.begin());
thrust::device_vector<bool> dev_visited(upper_size * block_cnt_, false);
thrust::device_vector<int> dev_visited_list(visited_list_size_ * block_cnt_);
thrust::device_vector<int64_t> dev_acc_visited_cnt(block_cnt_, 0);
thrust::device_vector<cuda_scalar>& qdata = search? device_qdata_: device_data_;
// run kernel
GetEntryPointsKernel<<<block_cnt_, block_dim_>>>(
thrust::raw_pointer_cast(qdata.data()),
thrust::raw_pointer_cast(dev_nodes.data()),
thrust::raw_pointer_cast(device_data_.data()),
thrust::raw_pointer_cast(dev_upper_nodes.data()),
num_dims_, size, upper_size, max_m_, dist_type_,
thrust::raw_pointer_cast(dev_neighbors.data()),
thrust::raw_pointer_cast(dev_deg.data()),
thrust::raw_pointer_cast(dev_visited.data()),
thrust::raw_pointer_cast(dev_visited_list.data()),
visited_list_size_,
thrust::raw_pointer_cast(dev_entries.data()),
thrust::raw_pointer_cast(dev_acc_visited_cnt.data())
);
CHECK_CUDA(cudaDeviceSynchronize());
// el_[GPU] += sw_[GPU].CheckPoint();
thrust::copy(dev_entries.begin(), dev_entries.end(), entries.begin());
std::vector<int64_t> acc_visited_cnt(block_cnt_);
thrust::copy(dev_acc_visited_cnt.begin(), dev_acc_visited_cnt.end(), acc_visited_cnt.begin());
CHECK_CUDA(cudaDeviceSynchronize());
int64_t full_visited_cnt = std::accumulate(acc_visited_cnt.begin(), acc_visited_cnt.end(), 0);
DEBUG("full visited cnt: {}", full_visited_cnt);
// set output
for (int i = 0; i < size; ++i) {
entries[i] = upper_nodes[entries[i]];
}
}
void CuHNSW::BuildGraph() {
visited_ = new bool[batch_size_ * num_data_];
for (int level = max_level_; level >= 0; --level) {
DEBUG("build graph of level: {}", level);
BuildLevelGraph(level);
}
}
void CuHNSW::BuildLevelGraph(int level) {
std::set<int> upper_nodes;
std::vector<int> new_nodes;
LevelGraph& graph = level_graphs_[level];
const std::vector<int>& nodes = graph.GetNodes();
int size = nodes.size();
int max_m = level > 0? max_m_: max_m0_;
thrust::host_vector<int> graph_vec(size * max_m, 0);
thrust::host_vector<int> deg(size, 0);
if (level < max_level_) {
LevelGraph& upper_graph = level_graphs_[level + 1];
for (auto& node: upper_graph.GetNodes()) {
upper_nodes.insert(node);
int srcid = graph.GetNodeId(node);
int idx = 0;
for (auto& nb: upper_graph.GetNeighbors(node)) {
int dstid = graph.GetNodeId(nb.second);
graph_vec[max_m * srcid + (idx++)] = dstid;
}
deg[srcid] = idx;
}
}
for (auto& node: graph.GetNodes()) {
if (upper_nodes.count(node)) continue;
new_nodes.push_back(node);
}
// initialize entries
std::vector<int> entries(new_nodes.size(), enter_point_);
for (int l = max_level_; l > level; --l)
GetEntryPoints(new_nodes, entries, l, false);
for (int i = 0; i < new_nodes.size(); ++i) {
int srcid = graph.GetNodeId(new_nodes[i]);
int dstid = graph.GetNodeId(entries[i]);
graph_vec[max_m * srcid] = dstid;
deg[srcid] = 1;
}
thrust::device_vector<int> device_graph(max_m * size);
thrust::device_vector<float> device_distances(max_m * size);
thrust::device_vector<int> device_deg(size);
thrust::device_vector<int> device_nodes(size);
thrust::device_vector<int> device_visited_table(visited_table_size_ * block_cnt_, -1);
thrust::device_vector<int> device_visited_list(visited_list_size_ * block_cnt_);
thrust::device_vector<int> device_mutex(size, 0);
thrust::device_vector<int64_t> device_acc_visited_cnt(block_cnt_, 0);
thrust::device_vector<Neighbor> device_neighbors(ef_construction_ * block_cnt_);
thrust::device_vector<int> device_cand_nodes(ef_construction_ * block_cnt_);
thrust::device_vector<cuda_scalar> device_cand_distances(ef_construction_ * block_cnt_);
thrust::device_vector<int> device_backup_neighbors(max_m * block_cnt_);
thrust::device_vector<cuda_scalar> device_backup_distances(max_m * block_cnt_);
thrust::device_vector<bool> device_went_through_heuristic(size, false);
thrust::copy(graph_vec.begin(), graph_vec.end(), device_graph.begin());
thrust::copy(deg.begin(), deg.end(), device_deg.begin());
thrust::copy(nodes.begin(), nodes.end(), device_nodes.begin());
BuildLevelGraphKernel<<<block_cnt_, block_dim_>>>(
thrust::raw_pointer_cast(device_data_.data()),
thrust::raw_pointer_cast(device_nodes.data()),
num_dims_, size, max_m, dist_type_, save_remains_,
ef_construction_,
thrust::raw_pointer_cast(device_graph.data()),
thrust::raw_pointer_cast(device_distances.data()),
thrust::raw_pointer_cast(device_deg.data()),
thrust::raw_pointer_cast(device_visited_table.data()),
thrust::raw_pointer_cast(device_visited_list.data()),
visited_table_size_, visited_list_size_,
thrust::raw_pointer_cast(device_mutex.data()),
thrust::raw_pointer_cast(device_acc_visited_cnt.data()),
reverse_cand_,
thrust::raw_pointer_cast(device_neighbors.data()),
thrust::raw_pointer_cast(device_cand_nodes.data()),
thrust::raw_pointer_cast(device_cand_distances.data()),
heuristic_coef_,
thrust::raw_pointer_cast(device_backup_neighbors.data()),
thrust::raw_pointer_cast(device_backup_distances.data()),
thrust::raw_pointer_cast(device_went_through_heuristic.data())
);
CHECK_CUDA(cudaDeviceSynchronize());
thrust::copy(device_deg.begin(), device_deg.end(), deg.begin());
thrust::copy(device_graph.begin(), device_graph.end(), graph_vec.begin());
std::vector<float> distances(max_m * size);
thrust::copy(device_distances.begin(), device_distances.end(), distances.begin());
std::vector<int64_t> acc_visited_cnt(block_cnt_);
thrust::copy(device_acc_visited_cnt.begin(), device_acc_visited_cnt.end(), acc_visited_cnt.begin());
CHECK_CUDA(cudaDeviceSynchronize());
int64_t full_visited_cnt = std::accumulate(acc_visited_cnt.begin(), acc_visited_cnt.end(), 0LL);
DEBUG("full number of visited nodes: {}", full_visited_cnt);
for (auto& node: graph.GetNodes()) {
graph.ClearEdges(node);
}
for (int i = 0; i < size; ++i) {
int src = nodes[i];
for (int j = 0; j < deg[i]; ++j) {
int dst = nodes[graph_vec[i * max_m + j]];
float dist = distances[i * max_m + j];
graph.AddEdge(src, dst, dist);
}
}
}
void CuHNSW::SearchGraph(const float* qdata, const int num_queries, const int topk, const int ef_search,
int* nns, float* distances, int* found_cnt) {
device_qdata_.resize(num_queries * num_dims_);
#ifdef HALF_PRECISION
std::vector<cuda_scalar> hdata(num_queries * num_dims_);
for (int i = 0; i < num_queries * num_dims_; ++i)
hdata[i] = conversion(qdata[i]);
thrust::copy(hdata.begin(), hdata.end(), device_qdata_.begin());
#else
thrust::copy(qdata, qdata + num_queries * num_dims_, device_qdata_.begin());
#endif
std::vector<int> qnodes(num_queries);
std::iota(qnodes.begin(), qnodes.end(), 0);
std::vector<int> entries(num_queries, enter_point_);
for (int l = max_level_; l > 0; --l)
GetEntryPoints(qnodes, entries, l, true);
std::vector<int> graph_vec(max_m0_ * num_data_);
std::vector<int> deg(num_data_);
LevelGraph graph = level_graphs_[0];
for (int i = 0; i < num_data_; ++i) {
const std::vector<std::pair<float, int>>& neighbors = graph.GetNeighbors(i);
int nbsize = neighbors.size();
int offset = i * max_m0_;
for (int j = 0; j < nbsize; ++j)
graph_vec[offset + j] = neighbors[j].second;
deg[i] = nbsize;
}
thrust::device_vector<int> device_graph(max_m0_ * num_data_);
thrust::device_vector<int> device_deg(num_data_);
thrust::device_vector<int> device_entries(num_queries);
thrust::device_vector<int> device_nns(num_queries * topk);
thrust::device_vector<float> device_distances(num_queries * topk);
thrust::device_vector<int> device_found_cnt(num_queries);
thrust::device_vector<int> device_visited_table(visited_table_size_ * block_cnt_, -1);
thrust::device_vector<int> device_visited_list(visited_list_size_ * block_cnt_);
thrust::device_vector<int64_t> device_acc_visited_cnt(block_cnt_, 0);
thrust::device_vector<Neighbor> device_neighbors(ef_search * block_cnt_);
thrust::device_vector<int> device_cand_nodes(ef_search * block_cnt_);
thrust::device_vector<cuda_scalar> device_cand_distances(ef_search * block_cnt_);
thrust::copy(graph_vec.begin(), graph_vec.end(), device_graph.begin());
thrust::copy(deg.begin(), deg.end(), device_deg.begin());
thrust::copy(entries.begin(), entries.end(), device_entries.begin());
SearchGraphKernel<<<block_cnt_, block_dim_>>>(
thrust::raw_pointer_cast(device_qdata_.data()),
num_queries,
thrust::raw_pointer_cast(device_data_.data()),
num_data_, num_dims_, max_m0_, dist_type_, ef_search,
thrust::raw_pointer_cast(device_entries.data()),
thrust::raw_pointer_cast(device_graph.data()),
thrust::raw_pointer_cast(device_deg.data()),
topk,
thrust::raw_pointer_cast(device_nns.data()),
thrust::raw_pointer_cast(device_distances.data()),
thrust::raw_pointer_cast(device_found_cnt.data()),
thrust::raw_pointer_cast(device_visited_table.data()),
thrust::raw_pointer_cast(device_visited_list.data()),
visited_table_size_, visited_list_size_,
thrust::raw_pointer_cast(device_acc_visited_cnt.data()),
reverse_cand_,
thrust::raw_pointer_cast(device_neighbors.data()),
thrust::raw_pointer_cast(device_cand_nodes.data()),
thrust::raw_pointer_cast(device_cand_distances.data())
);
CHECK_CUDA(cudaDeviceSynchronize());
std::vector<int64_t> acc_visited_cnt(block_cnt_);
thrust::copy(device_acc_visited_cnt.begin(), device_acc_visited_cnt.end(), acc_visited_cnt.begin());
thrust::copy(device_nns.begin(), device_nns.end(), nns);
thrust::copy(device_distances.begin(), device_distances.end(), distances);
thrust::copy(device_found_cnt.begin(), device_found_cnt.end(), found_cnt);
CHECK_CUDA(cudaDeviceSynchronize());
int64_t full_visited_cnt = std::accumulate(acc_visited_cnt.begin(), acc_visited_cnt.end(), 0LL);
DEBUG("full number of visited nodes: {}", full_visited_cnt);
if (labelled_)
for (int i = 0; i < num_queries * topk; ++i)
nns[i] = labels_[nns[i]];
device_qdata_.clear();
device_qdata_.shrink_to_fit();
}
} // namespace cuhnsw
|
the_stack
|
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
#include "cutil_inline.h"
#include "config.h"
#include "structs.h"
#include "util.h"
#include "matlab.h"
#include "bisect_large.cuh"
// includes, kernels
#include "bisect_kernel_large.cu"
#include "bisect_kernel_large_onei.cu"
#include "bisect_kernel_large_multi.cu"
////////////////////////////////////////////////////////////////////////////////
//! Initialize variables and memory for result
//! @param result handles to memory
//! @param matrix_size size of the matrix
////////////////////////////////////////////////////////////////////////////////
void
initResultDataLargeMatrix( ResultDataLarge& result, const unsigned int mat_size) {
// helper variables to initialize memory
unsigned int zero = 0;
unsigned int mat_size_f = sizeof(float) * mat_size;
unsigned int mat_size_ui = sizeof(unsigned int) * mat_size;
float* tempf = (float*) malloc( mat_size_f);
unsigned int* tempui = (unsigned int*) malloc( mat_size_ui);
for( unsigned int i = 0; i < mat_size; ++i) {
tempf[i] = 0.0f;
tempui[i] = 0;
}
// number of intervals containing only one eigenvalue after the first step
cutilSafeCall( cudaMalloc( (void**) &result.g_num_one,
sizeof( unsigned int)) );
cutilSafeCall( cudaMemcpy( result.g_num_one, &zero, sizeof(unsigned int),
cudaMemcpyHostToDevice));
// number of (thread) blocks of intervals with multiple eigenvalues after
// the first iteration
cutilSafeCall( cudaMalloc( (void**) &result.g_num_blocks_mult,
sizeof(unsigned int)));
cutilSafeCall( cudaMemcpy( result.g_num_blocks_mult, &zero,
sizeof(unsigned int),
cudaMemcpyHostToDevice ));
cutilSafeCall( cudaMalloc( (void**) &result.g_left_one, mat_size_f));
cutilSafeCall( cudaMalloc( (void**) &result.g_right_one, mat_size_f));
cutilSafeCall( cudaMalloc( (void**) &result.g_pos_one, mat_size_ui));
cutilSafeCall( cudaMalloc( (void**) &result.g_left_mult, mat_size_f));
cutilSafeCall( cudaMalloc( (void**) &result.g_right_mult, mat_size_f));
cutilSafeCall( cudaMalloc( (void**) &result.g_left_count_mult,
mat_size_ui));
cutilSafeCall( cudaMalloc( (void**) &result.g_right_count_mult,
mat_size_ui));
cutilSafeCall( cudaMemcpy( result.g_left_one, tempf, mat_size_f,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMemcpy( result.g_right_one, tempf, mat_size_f,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMemcpy( result.g_pos_one, tempui, mat_size_ui,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMemcpy( result.g_left_mult, tempf, mat_size_f,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMemcpy( result.g_right_mult, tempf, mat_size_f,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMemcpy( result.g_left_count_mult, tempui, mat_size_ui,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMemcpy( result.g_right_count_mult, tempui, mat_size_ui,
cudaMemcpyHostToDevice));
cutilSafeCall( cudaMalloc( (void**) &result.g_blocks_mult, mat_size_ui));
cutilSafeCall( cudaMemcpy( result.g_blocks_mult, tempui, mat_size_ui,
cudaMemcpyHostToDevice ));
cutilSafeCall(cudaMalloc((void**) &result.g_blocks_mult_sum, mat_size_ui));
cutilSafeCall( cudaMemcpy( result.g_blocks_mult_sum, tempui, mat_size_ui,
cudaMemcpyHostToDevice ));
cutilSafeCall( cudaMalloc( (void**) &result.g_lambda_mult, mat_size_f));
cutilSafeCall( cudaMemcpy( result.g_lambda_mult, tempf, mat_size_f,
cudaMemcpyHostToDevice ));
cutilSafeCall( cudaMalloc( (void**) &result.g_pos_mult, mat_size_ui));
cutilSafeCall( cudaMemcpy( result.g_pos_mult, tempf, mat_size_ui,
cudaMemcpyHostToDevice ));
}
////////////////////////////////////////////////////////////////////////////////
//! Cleanup result memory
//! @param result handles to memory
////////////////////////////////////////////////////////////////////////////////
void
cleanupResultDataLargeMatrix( ResultDataLarge& result) {
cutilSafeCall( cudaFree( result.g_num_one));
cutilSafeCall( cudaFree( result.g_num_blocks_mult));
cutilSafeCall( cudaFree( result.g_left_one));
cutilSafeCall( cudaFree( result.g_right_one));
cutilSafeCall( cudaFree( result.g_pos_one));
cutilSafeCall( cudaFree( result.g_left_mult));
cutilSafeCall( cudaFree( result.g_right_mult));
cutilSafeCall( cudaFree( result.g_left_count_mult));
cutilSafeCall( cudaFree( result.g_right_count_mult));
cutilSafeCall( cudaFree( result.g_blocks_mult));
cutilSafeCall( cudaFree( result.g_blocks_mult_sum));
cutilSafeCall( cudaFree( result.g_lambda_mult));
cutilSafeCall( cudaFree( result.g_pos_mult));
}
////////////////////////////////////////////////////////////////////////////////
//! Run the kernels to compute the eigenvalues for large matrices
//! @param input handles to input data
//! @param result handles to result data
//! @param mat_size matrix size
//! @param precision desired precision of eigenvalues
//! @param lg lower limit of Gerschgorin interval
//! @param ug upper limit of Gerschgorin interval
//! @param iterations number of iterations (for timing)
////////////////////////////////////////////////////////////////////////////////
void
computeEigenvaluesLargeMatrix( const InputData& input, const ResultDataLarge& result,
const unsigned int mat_size, const float precision,
const float lg, const float ug,
const unsigned int iterations )
{
dim3 blocks( 1, 1, 1);
dim3 threads( MAX_THREADS_BLOCK, 1, 1);
unsigned int timer_step1 = 0;
unsigned int timer_step2_one = 0;
unsigned int timer_step2_mult = 0;
unsigned int timer_total = 0;
cutilCheckError( cutCreateTimer( &timer_step1));
cutilCheckError( cutCreateTimer( &timer_step2_one));
cutilCheckError( cutCreateTimer( &timer_step2_mult));
cutilCheckError( cutCreateTimer( &timer_total));
cutilCheckError( cutStartTimer( timer_total));
// do for multiple iterations to improve timing accuracy
for( unsigned int iter = 0; iter < iterations; ++iter) {
cutilCheckError( cutStartTimer( timer_step1));
bisectKernelLarge<<< blocks, threads >>>
( input.g_a, input.g_b, mat_size,
lg, ug, 0, mat_size, precision,
result.g_num_one, result.g_num_blocks_mult,
result.g_left_one, result.g_right_one, result.g_pos_one,
result.g_left_mult, result.g_right_mult,
result.g_left_count_mult, result.g_right_count_mult,
result.g_blocks_mult, result.g_blocks_mult_sum
);
cutilSafeCall( cudaThreadSynchronize());
cutilCheckError( cutStopTimer( timer_step1));
cutilCheckMsg( "Kernel launch failed.");
// get the number of intervals containing one eigenvalue after the first
// processing step
unsigned int num_one_intervals;
cutilSafeCall( cudaMemcpy( &num_one_intervals, result.g_num_one,
sizeof(unsigned int),
cudaMemcpyDeviceToHost));
dim3 grid_onei;
grid_onei.x = getNumBlocksLinear( num_one_intervals, MAX_THREADS_BLOCK);
dim3 threads_onei;
// use always max number of available threads to better balance load times
// for matrix data
threads_onei.x = MAX_THREADS_BLOCK;
// compute eigenvalues for intervals that contained only one eigenvalue
// after the first processing step
cutilCheckError( cutStartTimer( timer_step2_one));
bisectKernelLarge_OneIntervals<<< grid_onei , threads_onei >>>
( input.g_a, input.g_b, mat_size, num_one_intervals,
result.g_left_one, result.g_right_one, result.g_pos_one,
precision
);
cutilSafeCall( cudaThreadSynchronize());
cutilCheckError( cutStopTimer( timer_step2_one));
// process intervals that contained more than one eigenvalue after
// the first processing step
// get the number of blocks of intervals that contain, in total when
// each interval contains only one eigenvalue, not more than
// MAX_THREADS_BLOCK threads
unsigned int num_blocks_mult = 0;
cutilSafeCall( cudaMemcpy( &num_blocks_mult, result.g_num_blocks_mult,
sizeof( unsigned int),
cudaMemcpyDeviceToHost));
// setup the execution environment
dim3 grid_mult( num_blocks_mult, 1, 1);
dim3 threads_mult( MAX_THREADS_BLOCK, 1, 1);
cutilCheckError( cutStartTimer( timer_step2_mult));
bisectKernelLarge_MultIntervals<<< grid_mult, threads_mult >>>
( input.g_a, input.g_b, mat_size,
result.g_blocks_mult, result.g_blocks_mult_sum,
result.g_left_mult, result.g_right_mult,
result.g_left_count_mult, result.g_right_count_mult,
result.g_lambda_mult, result.g_pos_mult,
precision
);
cutilCheckError( cutStopTimer( timer_step2_mult));
cutilCheckMsg( "bisectKernelLarge_MultIntervals() FAILED.");
}
cutilCheckError( cutStopTimer( timer_total));
printf( "Average time step 1: %f ms\n",
cutGetTimerValue( timer_step1) / (float) iterations );
printf( "Average time step 2, one intervals: %f ms\n",
cutGetTimerValue( timer_step2_one) / (float) iterations );
printf( "Average time step 2, mult intervals: %f ms\n",
cutGetTimerValue( timer_step2_mult) / (float) iterations );
printf( "Average time TOTAL: %f ms\n",
cutGetTimerValue( timer_total) / (float) iterations );
cutilCheckError( cutDeleteTimer( timer_step1));
cutilCheckError( cutDeleteTimer( timer_step2_one));
cutilCheckError( cutDeleteTimer( timer_step2_mult));
cutilCheckError( cutDeleteTimer( timer_total));
}
////////////////////////////////////////////////////////////////////////////////
//! Process the result, that is obtain result from device and do simple sanity
//! checking
//! @param input handles to input data
//! @param result handles to result data
//! @param mat_size matrix size
//! @param filename output filename
////////////////////////////////////////////////////////////////////////////////
void
processResultDataLargeMatrix( const InputData& input, const ResultDataLarge& result,
const unsigned int mat_size,
const char* filename,
const unsigned int user_defined, char* exec_path)
{
const unsigned int mat_size_ui = sizeof(unsigned int) * mat_size;
const unsigned int mat_size_f = sizeof(float) * mat_size;
// copy data from intervals that contained more than one eigenvalue after
// the first processing step
float* lambda_mult = (float*) malloc( sizeof(float) * mat_size);
cutilSafeCall( cudaMemcpy( lambda_mult, result.g_lambda_mult,
sizeof(float) * mat_size,
cudaMemcpyDeviceToHost ));
unsigned int* pos_mult =
(unsigned int*) malloc( sizeof(unsigned int) * mat_size);
cutilSafeCall( cudaMemcpy( pos_mult, result.g_pos_mult,
sizeof(unsigned int) * mat_size,
cudaMemcpyDeviceToHost ));
unsigned int* blocks_mult_sum =
(unsigned int*) malloc( sizeof(unsigned int) * mat_size);
cutilSafeCall( cudaMemcpy( blocks_mult_sum, result.g_blocks_mult_sum,
sizeof( unsigned int) * mat_size,
cudaMemcpyDeviceToHost ));
unsigned int num_one_intervals;
cutilSafeCall( cudaMemcpy( &num_one_intervals, result.g_num_one,
sizeof(unsigned int),
cudaMemcpyDeviceToHost));
unsigned int sum_blocks_mult = mat_size - num_one_intervals;
// copy data for intervals that contained one eigenvalue after the first
// processing step
float* left_one = (float*) malloc( mat_size_f);
float* right_one = (float*) malloc( mat_size_f);
unsigned int* pos_one = (unsigned int*) malloc( mat_size_ui);
cutilSafeCall( cudaMemcpy( left_one, result.g_left_one, mat_size_f,
cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy( right_one, result.g_right_one, mat_size_f,
cudaMemcpyDeviceToHost) );
cutilSafeCall( cudaMemcpy( pos_one, result.g_pos_one, mat_size_ui,
cudaMemcpyDeviceToHost) );
// extract eigenvalues
float* eigenvals = (float*) malloc( mat_size_f);
// singleton intervals generated in the second step
for( unsigned int i = 0; i < sum_blocks_mult; ++i) {
eigenvals[pos_mult[i] - 1] = lambda_mult[i];
}
// singleton intervals generated in the first step
unsigned int index = 0;
for( unsigned int i = 0; i < num_one_intervals; ++i, ++index) {
eigenvals[pos_one[i] - 1] = left_one[i];
}
if( 1 == user_defined) {
// store result
writeTridiagSymMatlab( filename, input.a, input.b+1, eigenvals, mat_size);
// cutilCheckError( cutWriteFilef( filename, eigenvals, mat_size, 0.0f));
}
else {
// compare with reference solution
float* reference = NULL;
unsigned int input_data_size = 0;
char* ref_path = cutFindFilePath( "reference.dat", exec_path);
cutilCondition( 0 != ref_path);
cutilCheckError( cutReadFilef( ref_path, &reference, &input_data_size));
cutilCondition( input_data_size == mat_size);
// there's an imprecision of Sturm count computation which makes an
// additional offset necessary
float tolerance = 1.0e-5f + 5.0e-6f;
if( CUTTrue == cutComparefe( reference, eigenvals, mat_size, tolerance)) {
printf( "\nTEST PASSED.\n");
}
else {
printf( "FAILED.\n");
}
cutFree( ref_path);
cutFree( reference);
}
freePtr( eigenvals);
freePtr( lambda_mult);
freePtr( pos_mult);
freePtr( blocks_mult_sum);
freePtr( left_one);
freePtr( right_one);
freePtr( pos_one);
}
|
the_stack
|
#pragma once
#include <gunrock/util/select_device.cuh>
#include <gunrock/app/enactor_base.cuh>
#include <gunrock/app/enactor_iteration.cuh>
#include <gunrock/app/enactor_loop.cuh>
#include <gunrock/app/sm/sm_problem.cuh>
#include <gunrock/oprtr/oprtr.cuh>
namespace gunrock {
namespace app {
namespace sm {
/**
* @brief Speciflying parameters for Subgraph Matching Enactor
* @param parameters The util::Parameter<...> structure holding all parameter
* info \return cudaError_t error message(s), if any
*/
cudaError_t UseParameters_enactor(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(app::UseParameters_enactor(parameters));
return retval;
}
/**
* @brief defination of SM iteration loop
* @tparam EnactorT Type of enactor
*/
template <typename EnactorT>
struct SMIterationLoop : public IterationLoopBase<EnactorT, Use_FullQ | Push> {
typedef typename EnactorT::VertexT VertexT;
typedef typename EnactorT::SizeT SizeT;
typedef typename EnactorT::ValueT ValueT;
typedef typename EnactorT::Problem::GraphT::CsrT CsrT;
typedef IterationLoopBase<EnactorT, Use_FullQ | Push> BaseIterationLoop;
SMIterationLoop() : BaseIterationLoop() {}
/**
* @brief Core computation of sm, one iteration
* @param[in] peer_ Which GPU peers to work on, 0 means local
* \return cudaError_t error message(s), if any
*/
cudaError_t Core(int peer_ = 0) {
auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0];
auto &enactor_slice =
this->enactor
->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_];
auto &enactor_stats = enactor_slice.enactor_stats;
auto &graph = data_slice.sub_graph[0];
auto &subgraphs = data_slice.subgraphs;
auto &constrain = data_slice.constrain;
auto &isValid = data_slice.isValid;
auto &NS = data_slice.NS;
auto &NN = data_slice.NN;
auto &NT = data_slice.NT;
auto &NT_offset = data_slice.NT_offset;
auto &query_ro = data_slice.query_ro;
auto &query_ci = data_slice.query_ci;
auto &counter = data_slice.counter;
auto &temp_count = data_slice.temp_count;
auto &indices = data_slice.indices;
auto &results = data_slice.results;
auto &flags_write = data_slice.flags_write;
auto &row_offsets = graph.CsrT::row_offsets;
auto &col_indices = graph.CsrT::column_indices;
auto &oprtr_parameters = enactor_slice.oprtr_parameters;
auto &retval = enactor_stats.retval;
auto &stream = oprtr_parameters.stream;
auto target = util::DEVICE;
size_t nodes_query = data_slice.nodes_query;
unsigned long nodes_data = graph.nodes;
unsigned long edges_data = graph.edges;
unsigned long mem_limit = nodes_data * nodes_data;
util::Array1D<SizeT, VertexT> *null_frontier = NULL;
auto complete_graph = null_frontier;
// Store data graph degrees to subgraphs
GUARD_CU(subgraphs.ForAll(
[row_offsets] __host__ __device__(VertexT * subgraphs_,
const SizeT &v) {
subgraphs_[v] = row_offsets[v + 1] - row_offsets[v];
},
graph.nodes, target, stream));
// advance to filter out data graph nodes which don't satisfy constrain
auto advance_op = [subgraphs, constrain, isValid] __host__ __device__(
const VertexT &src, VertexT &dest,
const SizeT &edge_id, const VertexT &input_item,
const SizeT &input_pos, SizeT &output_pos) -> bool {
if (isValid[src]) {
if (subgraphs[src] >= constrain[0]) {
return true;
} else {
isValid[src] = false;
atomicAdd(subgraphs + dest, -1);
}
}
return false;
}; // advance_op
auto prune_op =
[subgraphs, row_offsets, col_indices, isValid, NS, NN, NT, NT_offset,
query_ro, query_ci, flags_write, counter, results, temp_count,
nodes_data, nodes_query, mem_limit] __host__
__device__(const VertexT &src, VertexT &dest, const SizeT &edge_id,
const VertexT &input_item, const SizeT &input_pos,
SizeT &output_pos) -> bool {
if (src < 0 || src >= nodes_data) return false;
if ((!isValid[src]) || (!isValid[dest])) return false;
// NS has query node id sequence from pos 0 to pos nodes_query - 1; min
// degree of neighbors from pos nodes_query to end
VertexT query_id = NS[counter[0]]; // node id of current query node
SizeT min_degree = NS[counter[0] + nodes_query];
int nn = NN[counter[0]]; // pos of previously visited neighbor in NS
unsigned long n = nodes_data;
// first iteration (counter[0] = 0), src nodes are candidates
if (nn == -1) {
// check candidates' degrees
if (subgraphs[src] < (query_ro[query_id + 1] - query_ro[query_id]))
return false;
// 1 way look ahead
if (subgraphs[dest] < min_degree) {
return false;
}
flags_write[src] = true;
return true;
}
// flags represent all possible node combinations in each iteration
// with no consideration of repeating nodes
// each flag's pos represent a combination of nodes
// we calculate the node ids based on the value of pos
// The largest pos is n ^ counter - 1
// Check NN
int total = 1;
for (int i = 0; i < counter[0]; ++i) {
total = total * n;
}
int stride_src = 1;
for (int i = nn + 1; i < counter[0]; ++i) {
stride_src = stride_src * n;
}
int combination[50]; // 50 is the largest nodes_query value
for (unsigned long i = 0; i < temp_count[0]; ++i) {
// src is not at nn pos of current combination
if (src != (results[i] / stride_src) % n) continue;
// src satisfies, later iterations counter[0] > 0, dest nodes are
// candidates check candidates' degrees
if (subgraphs[dest] < (query_ro[query_id + 1] - query_ro[query_id]))
continue;
// printf("src: %d, dest:%d, results:%d, dest degree: %d, query
// degree:%d\n", src, dest, results[i], subgraphs[dest],
// (query_ro[query_id + 1] - query_ro[query_id]));
// Fill combination with current result[i]'s representation
int stride = total;
int temp = results[i];
int j = 0;
for (j = 0; j < counter[0]; ++j) {
stride = stride / n;
combination[j] = temp / stride;
temp = temp - combination[j] * stride;
}
// First check: check if dest is duplicated with any of the member
for (j = 0; j < counter[0]; ++j) {
if (dest == combination[j]) break;
}
if (j < counter[0]) continue; // dest is a duplicate, aborted
// Second check: check if dest has any matched NT
int k = 0;
for (k = NT_offset[counter[0]]; k < NT_offset[counter[0] + 1]; ++k) {
int nt = NT[k]; // non-tree edge's other node pos in NS
// check if dest is connected to nt's node in combination
int nt_node = combination[nt];
int offset = 0;
for (offset = row_offsets[dest]; offset < row_offsets[dest + 1];
++offset) {
if (nt_node == col_indices[offset]) break;
}
if (offset >=
row_offsets[dest + 1]) { // dest has no neighbor nt_node
break; // dest doesn't satisfy nt node connections
}
}
if (k <
NT_offset[counter[0] + 1]) { // dest doesn't satisfy all NT edges
continue;
}
// Checks finished, add dest to combination and write to new flags pos
unsigned long pos = (unsigned long)i * nodes_data + (unsigned long)dest;
if (pos >= mem_limit) {
continue;
}
flags_write[pos] = true;
}
return false;
}; // prune_op
// first iteration, filter by basic constrain, and update valid degree,
// could run multiple iterations to do more filter
for (int iter = 0; iter < 1; ++iter) {
GUARD_CU(oprtr::Advance<oprtr::OprtrType_V2V>(
graph.csr(), complete_graph, complete_graph, oprtr_parameters,
advance_op));
}
unsigned long total = 1;
for (int iter = 0; iter < nodes_query; ++iter) {
// set counter to be equal to iter
GUARD_CU(counter.ForAll(
[iter] __host__ __device__(VertexT * counter_, const SizeT &v) {
counter_[v] = iter;
},
1, target, stream));
// First iteration
if (iter == 0) {
GUARD_CU(oprtr::Advance<oprtr::OprtrType_V2V>(
graph.csr(), complete_graph, complete_graph, oprtr_parameters,
prune_op));
} else {
// total is the largest combination value this iteration could have
total = total * nodes_data;
GUARD_CU(flags_write.ForAll(
[] __device__(bool *x, const unsigned long &pos) {
x[pos] = false;
},
mem_limit, target, stream));
// Second and later iterations
GUARD_CU(oprtr::Advance<oprtr::OprtrType_V2V>(
graph.csr(), complete_graph, complete_graph, oprtr_parameters,
prune_op));
GUARD_CU2(cudaStreamSynchronize(stream),
"cudaStreamSynchronize failed");
GUARD_CU(temp_count.Move(util::DEVICE, util::HOST));
// Update indices and reset results for compression
unsigned long size = min(temp_count[0] * nodes_data, mem_limit);
GUARD_CU(indices.ForAll(
[results, nodes_data] __device__(unsigned long *x,
const unsigned long &pos) {
x[pos] =
results[pos / nodes_data] * nodes_data + (pos % nodes_data);
},
size, target, stream));
GUARD_CU(results.ForAll(
[] __host__ __device__(unsigned long *x, const unsigned long &pos) {
x[pos] = 0;
},
mem_limit, target, stream));
}
GUARD_CU(util::CUBSelect_flagged(indices.GetPointer(util::DEVICE),
flags_write.GetPointer(util::DEVICE),
results.GetPointer(util::DEVICE),
temp_count.GetPointer(util::DEVICE),
mem_limit));
// counter.Print();
// indices.Print();
// flags_write.Print();
// results.Print();
// temp_count.Print();
} // results and temp_count contains final results
return retval;
}
/**
* @brief Routine to combine received data and local data
* @tparam NUM_VERTEX_ASSOCIATES Number of data associated with each
* transmition item, typed VertexT
* @tparam NUM_VALUE__ASSOCIATES Number of data associated with each
* transmition item, typed ValueT
* @param received_length The numver of transmition items received
* @param[in] peer_ which peer GPU the data came from
* \return cudaError_t error message(s), if any
*/
template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES>
cudaError_t ExpandIncoming(SizeT &received_length, int peer_) {
auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0];
auto &enactor_slice =
this->enactor
->enactor_slices[this->gpu_num * this->enactor->num_gpus + peer_];
auto expand_op = [] __host__ __device__(
VertexT & key, const SizeT &in_pos,
VertexT *vertex_associate_ins,
ValueT *value__associate_ins) -> bool { return true; };
cudaError_t retval =
BaseIterationLoop::template ExpandIncomingBase<NUM_VERTEX_ASSOCIATES,
NUM_VALUE__ASSOCIATES>(
received_length, peer_, expand_op);
return retval;
}
cudaError_t Compute_OutputLength(int peer_) {
return cudaSuccess; // No need to load balance or get output size
}
cudaError_t Check_Queue_Size(int peer_) {
return cudaSuccess; // no need to check queue size for RW
}
bool Stop_Condition(int gpu_num = 0) {
auto &enactor_slice = this->enactor->enactor_slices[0];
auto &enactor_stats = enactor_slice.enactor_stats;
auto &data_slice = this->enactor->problem->data_slices[this->gpu_num][0];
auto &iter = enactor_stats.iteration;
if (iter == 1)
return true;
else
return false;
}
}; // end of SMIteration
/**
* @brief SM enactor class.
* @tparam _Problem Problem type we process on
* @tparam ARRAY_FLAG Flags for util::Array1D used in the enactor
* @tparam cudaHostRegisterFlag Flags for util::Array1D used in the enactor
*/
template <typename _Problem, util::ArrayFlag ARRAY_FLAG = util::ARRAY_NONE,
unsigned int cudaHostRegisterFlag = cudaHostRegisterDefault>
class Enactor
: public EnactorBase<typename _Problem::GraphT, typename _Problem::LabelT,
typename _Problem::ValueT, ARRAY_FLAG,
cudaHostRegisterFlag> {
public:
// Definations
typedef _Problem Problem;
typedef typename Problem::SizeT SizeT;
typedef typename Problem::VertexT VertexT;
typedef typename Problem::ValueT ValueT;
typedef typename Problem::GraphT GraphT;
typedef typename Problem::LabelT LabelT;
typedef EnactorBase<GraphT, LabelT, ValueT, ARRAY_FLAG, cudaHostRegisterFlag>
BaseEnactor;
typedef Enactor<Problem, ARRAY_FLAG, cudaHostRegisterFlag> EnactorT;
typedef SMIterationLoop<EnactorT> IterationT;
// Members
Problem *problem;
IterationT *iterations;
/**
* \addtogroup PublicInterface
* @{
*/
/**
* @brief SMEnactor constructor
*/
Enactor() : BaseEnactor("sm"), problem(NULL) {
this->max_num_vertex_associates = 0;
this->max_num_value__associates = 1;
}
/**
* @brief SMEnactor destructor
*/
virtual ~Enactor() {
// Release();
}
/*
* @brief Releasing allocated memory space
* @param target The location to release memory from
* \return cudaError_t error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseEnactor::Release(target));
delete[] iterations;
iterations = NULL;
problem = NULL;
return retval;
}
/**
* @brief Initialize the enactor.
* @param[in] problem The problem object.
* @param[in] target Target location of data
* \return cudaError_t error message(s), if any
*/
cudaError_t Init(Problem &problem, util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
this->problem = &problem;
GUARD_CU(BaseEnactor::Init(problem, Enactor_None, 2, NULL, target, false));
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
auto &enactor_slice = this->enactor_slices[gpu * this->num_gpus + 0];
auto &graph = problem.sub_graphs[gpu];
GUARD_CU(enactor_slice.frontier.Allocate(graph.nodes, graph.edges,
this->queue_factors));
}
iterations = new IterationT[this->num_gpus];
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
GUARD_CU(iterations[gpu].Init(this, gpu));
}
GUARD_CU(this->Init_Threads(
this, (CUT_THREADROUTINE) & (GunrockThread<EnactorT>)));
return retval;
}
/**
* @brief one run of ss, to be called within GunrockThread
* @param thread_data Data for the CPU thread
* \return cudaError_t error message(s), if any
*/
cudaError_t Run(ThreadSlice &thread_data) {
gunrock::app::Iteration_Loop<
// TODO: change to how many {VertexT, ValueT} data need to communicate
// per element in the inter-GPU sub-frontiers
0, 1, IterationT>(thread_data, iterations[thread_data.thread_num]);
return cudaSuccess;
}
/**
* @brief Reset enactor
* @param[in] src Source node to start primitive.
* @param[in] target Target location of data
* \return cudaError_t error message(s), if any
*/
cudaError_t Reset(util::Location target = util::DEVICE) {
typedef typename GraphT::GpT GpT;
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseEnactor::Reset(target));
SizeT nodes = this->problem->data_slices[0][0].sub_graph[0].nodes;
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
if (this->num_gpus == 1) {
this->thread_slices[gpu].init_size = nodes;
for (int peer_ = 0; peer_ < this->num_gpus; peer_++) {
auto &frontier =
this->enactor_slices[gpu * this->num_gpus + peer_].frontier;
frontier.queue_length = (peer_ == 0) ? nodes : 0;
if (peer_ == 0) {
util::Array1D<SizeT, VertexT> tmp;
tmp.Allocate(nodes, target | util::HOST);
for (SizeT i = 0; i < nodes; ++i) {
tmp[i] = (VertexT)i % nodes;
}
GUARD_CU(tmp.Move(util::HOST, target));
GUARD_CU(frontier.V_Q()->ForEach(
tmp,
[] __host__ __device__(VertexT & v, VertexT & i) { v = i; },
nodes, target, 0));
tmp.Release();
}
}
} else {
// MULTI_GPU INCOMPLETE
}
}
GUARD_CU(BaseEnactor::Sync());
return retval;
}
/**
* @brief Enacts a SM computing on the specified graph.
* @param[in] src Source node to start primitive.
* \return cudaError_t error message(s), if any
*/
cudaError_t Enact() {
cudaError_t retval = cudaSuccess;
GUARD_CU(this->Run_Threads(this));
util::PrintMsg("GPU SM Done.", this->flag & Debug);
return retval;
}
/** @} */
};
} // namespace sm
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
#include "ICcircleRadii.h"
#include "Image.h"
#include "ErrorCode.h"
// 宏 IC_BLOCK_X 和 IC_BLOCK_Y
#define IC_BLOCK_X 32
#define IC_BLOCK_Y 8
#define IC_BLOCK_Z 8
// 宏 IC_MAX_INT 和 IC_MIN_INT
// 定义距离的最大值和最小值
#define IC_MAX_INT 65536
#define IC_MIN_INT -1
// Kernel 函数:_getContourKer(获得轮廓上每个点的坐标)
// 根据给定的轮廓图像,将其上轮廓的每一个点的坐标输出
static __global__ void // 无返回值
_getContourKer(
ImageCuda inimg, // 输入坐标集
int *lenth, // 轮廓上点的个数
int *contourX, // 轮廓上每个点的 X 坐标
int *contourY // 轮廓上每个点的 Y 坐标
);
// Kernel 函数:_minMaxKer(最远距离最小的点)
// 根据给定轮廓的坐标集,其所包围领域内的一点,以及需要的点的数量,返回最远距离
// 最小的点的集合
static __global__ void // 无返回值
_minMaxKer(
ImageCuda inimg, // 输入坐标集
int *minMaxDist, // 输出点的集合
int lenth, // 轮廓上点的个数
int *contourX, // 轮廓上点的 X 坐标
int *contourY, // 轮廓上点的 Y 坐标
int *indexX, // 结果点集的 X 坐标
int *indexY, // 结果点集的 Y 坐标
int *lensec // 结果点集的数量
);
// Kernel 函数:_maxMinKer(最近距离最大的点)
// 根据给定轮廓的坐标集,其所包围领域内的一点,以及需要的点的数量,返回最近距离
// 最大的点的集合
static __global__ void // 无返回值
_maxMinKer(
ImageCuda inimg, // 输入坐标集
int *minMaxDist, // 输出点的集合
int lenth, // 轮廓上点的数量
int *contourX, // 轮廓上点的 X 坐标
int *contourY, // 轮廓上点的 Y 坐标
int *indexX, // 结果点集的 X 坐标
int *indexY, // 结果点集的 Y 坐标
int *lensec // 结果点集的数量
);
// Kernel 函数: _shearSortRowDesKer(行降序排序)
// 对待排序矩阵的每一行进行双调排序。
static __global__ void
_shearSortDesKer(
int distDev[], // 得票数。
int indexXDev[], // 索引值。
int indexYDev[],
int lensec, // 矩阵行数。
int judge // 块内共享内存的大小。
);
// Kernel 函数: _shearSortRowDesKer(行升序排序)
// 对待排序矩阵的每一行进行双调排序。
static __global__ void
_shearSortAscKer(
int distDev[], // 得票数。
int indexXDev[], // 索引值。
int indexYDev[],
int lensec, // 矩阵行数。
int judge // 块内共享内存的大小。
);
// Kernel 函数:_getContourKer(获得轮廓上每个点的坐标)
static __global__ void _getContourKer(ImageCuda inimg, int *lenth,
int *contourX, int *contourY)
{
// 获取线程索引,图像索引采用线程索引
int tidc = blockIdx.x * blockDim.x + threadIdx.x;
int tidr = blockIdx.y * blockDim.y + threadIdx.y;
// 转化为图像下标
int id = tidr * inimg.pitchBytes + tidc;
// 判断是否越界
if (tidc >= inimg.imgMeta.width || tidr >= inimg.imgMeta.height)
return;
// 判断是否为轮廓上的点,如果在轮廓上,将其坐标记录
if (inimg.imgMeta.imgData[id] == 0) {
contourX[*lenth] = tidc;
contourY[*lenth] = tidr;
*lenth = *lenth + 1;
}
}
// Kernel 函数:_minMaxKer(最远距离最小的点)
static __global__ void _minMaxKer(
ImageCuda inimg, int *minMaxDist, int lenth, int *contourX,
int *contourY, int *indexX, int *indexY, int *lensec)
{
// 获取线程索引
int tidc = blockIdx.x * blockDim.x + threadIdx.x;
int tidr = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
// 转化为图像下标
int id = tidr * inimg.pitchBytes + tidc;
// 判断是否越界
if (tidc >= inimg.imgMeta.width || tidr >= inimg.imgMeta.height)
return;
if (z >= lenth) return;
// 记录两点之间的坐标差值
int dx = 0;
int dy = 0;
int dist = 0;
// 是否在轮廓上
if (inimg.imgMeta.imgData[id] == 0) {
dx = contourX[z] - tidc;
dy = contourY[z] - tidr;
dist = dx * dx + dy * dy;
// 统计轮廓内部坐标到某一点的最小距离
if (dist < minMaxDist[z]) {
minMaxDist[z] = dist;
indexX[z] = tidc;
indexY[z] = tidr;
if (z > *lensec)
*lensec = z;
}
}
}
// Kernel 函数:_maxMinKer(最近距离最大的点)
static __global__ void _maxMinKer(
ImageCuda inimg, int *maxMinDist, int lenth, int *contourX,
int *contourY, int *indexX, int *indexY, int *lensec)
{
// 获取线程索引
int tidc = blockIdx.x * blockDim.x + threadIdx.x;
int tidr = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
// 转化为图像下标
int id = tidr * inimg.pitchBytes + tidc;
// 判断是否越界
if (tidc >= inimg.imgMeta.width || tidr >= inimg.imgMeta.height)
return;
if (z >= lenth) return;
// 记录两点之间的坐标差值
int dx = 0;
int dy = 0;
int dist = 0;
// 是否在轮廓上
if (inimg.imgMeta.imgData[id] == 0) {
dx = contourX[z] - tidc;
dy = contourY[z] - tidr;
dist = dx * dx + dy * dy;
// 统计轮廓内部坐标到某一点的最大距离
if (dist > maxMinDist[z]) {
maxMinDist[z] = dist;
indexX[z] = tidc;
indexY[z] = tidr;
if (z > *lensec)
*lensec = z;
}
}
}
// Kernel 函数: _shearSortRowDesKer(行降序排序)
static __global__ void _shearSortDesKer(
int distDev[], int indexXDev[], int indexYDev[], int lensec, int judge)
{
// 读取线程号和块号。
int cid = threadIdx.x;
int rid = blockIdx.x;
extern __shared__ int shared[];
// 通过偏移,获得存放得票数和索引值的两部分共享内存空间。
int *vote, *indexX, *indexY;
vote = shared;
indexX = shared + judge;
indexY = shared + judge * 2;
// 为共享内存赋初始值。
if (cid < lensec) {
vote[cid] = distDev[rid * lensec + cid];
indexX[cid] = indexXDev[rid * lensec + cid];
indexY[cid] = indexYDev[rid * lensec + cid];
}
// 块内同步。
__syncthreads();
// 声明临时变量
int ixj, tempvote, tempindex;
// 偶数行降序排序。
if (rid % 2 == 0) {
for (int k = 2; k <= lensec; k <<= 1) {
// 双调合并。
for (int j = k >> 1; j > 0; j >>= 1) {
// ixj 是与当前位置 cid 进行比较交换的位置。
ixj = cid ^ j;
if (ixj > cid) {
// 如果 (cid & k) == 0,按照降序交换两项。
if ((cid & k) == 0 && (vote[cid] < vote[ixj])) {
// 交换得票数。
tempvote = vote[cid];
vote[cid] = vote[ixj];
vote[ixj] = tempvote;
// 交换索引值。
tempindex = indexX[cid];
indexX[cid] = indexX[ixj];
indexX[ixj] = tempindex;
tempindex = indexY[cid];
indexY[cid] = indexY[ixj];
indexY[ixj] = tempindex;
// 如果 (cid & k) == 0,按照升序交换两项。
} else if ((cid & k) != 0 && vote[cid] > vote[ixj]) {
// 交换得票数。
tempvote = vote[cid];
vote[cid] = vote[ixj];
vote[ixj] = tempvote;
// 交换索引值。
tempindex = indexX[cid];
indexX[cid] = indexX[ixj];
indexX[ixj] = tempindex;
tempindex = indexY[cid];
indexY[cid] = indexY[ixj];
indexY[ixj] = tempindex;
}
}
__syncthreads();
}
}
// 奇数行升序排序。
} else {
for (int k = 2; k <= lensec; k <<= 1) {
// 双调合并。
for (int j = k >> 1; j > 0; j >>= 1) {
// ixj 是与当前位置 cid 进行比较交换的位置。
ixj = cid ^ j;
if (ixj > cid) {
// 如果 (cid & k) == 0,按照降序交换两项。
if ((cid & k) == 0 && (vote[cid] > vote[ixj])) {
// 交换得票数。
tempvote = vote[cid];
vote[cid] = vote[ixj];
vote[ixj] = tempvote;
// 交换索引值。
tempindex = indexX[cid];
indexX[cid] = indexX[ixj];
indexX[ixj] = tempindex;
tempindex = indexY[cid];
indexY[cid] = indexY[ixj];
indexY[ixj] = tempindex;
// 如果 (cid & k) == 0,按照升序交换两项。
} else if ((cid & k) != 0 && vote[cid] < vote[ixj]) {
// 交换得票数。
tempvote = vote[cid];
vote[cid] = vote[ixj];
vote[ixj] = tempvote;
// 交换索引值。
tempindex = indexX[cid];
indexX[cid] = indexX[ixj];
indexX[ixj] = tempindex;
tempindex = indexY[cid];
indexY[cid] = indexY[ixj];
indexY[ixj] = tempindex;
}
}
__syncthreads();
}
}
}
// 将共享内存中的排序后的数组拷贝到全局内存中。
if (cid <lensec) {
distDev[rid * lensec + cid] = vote[cid];
indexXDev[rid * lensec + cid] = indexX[cid];
indexYDev[rid * lensec + cid] = indexY[cid];
}
}
// Kernel 函数: _shearSortRowAscKer(行升序排序)
static __global__ void _shearSortAscKer(
int distDev[], int indexXDev[], int indexYDev[], int lensec, int judge)
{
// 读取线程号和块号。
int cid = threadIdx.x;
int rid = blockIdx.x;
extern __shared__ int shared[];
// 通过偏移,获得存放得票数和索引值的两部分共享内存空间。
int *vote, *indexX, *indexY;
vote = shared;
indexX = shared + judge;
indexY = shared + judge * 2;
// 为共享内存赋初始值。
if (cid < lensec) {
vote[cid] = distDev[rid * lensec + cid];
indexX[cid] = indexXDev[rid * lensec + cid];
indexY[cid] = indexYDev[rid * lensec + cid];
}
// 块内同步。
__syncthreads();
// 声明临时变量
int ixj, tempvote, tempindex;
// 偶数行降序排序。
if (rid % 2 == 0) {
for (int k = 2; k <= lensec; k <<= 1) {
// 双调合并。
for (int j = k >> 1; j > 0; j >>= 1) {
// ixj 是与当前位置 cid 进行比较交换的位置。
ixj = cid ^ j;
if (ixj > cid) {
// 如果 (cid & k) == 0,按照降序交换两项。
if ((cid & k) == 0 && (vote[cid] > vote[ixj])) {
// 交换得票数。
tempvote = vote[cid];
vote[cid] = vote[ixj];
vote[ixj] = tempvote;
// 交换索引值。
tempindex = indexX[cid];
indexX[cid] = indexX[ixj];
indexX[ixj] = tempindex;
tempindex = indexY[cid];
indexY[cid] = indexY[ixj];
indexY[ixj] = tempindex;
// 如果 (cid & k) == 0,按照升序交换两项。
} else if ((cid & k) != 0 && vote[cid] < vote[ixj]) {
// 交换得票数。
tempvote = vote[cid];
vote[cid] = vote[ixj];
vote[ixj] = tempvote;
// 交换索引值。
tempindex = indexX[cid];
indexX[cid] = indexX[ixj];
indexX[ixj] = tempindex;
tempindex = indexY[cid];
indexY[cid] = indexY[ixj];
indexY[ixj] = tempindex;
}
}
__syncthreads();
}
}
// 奇数行升序排序。
} else {
for (int k = 2; k <= lensec; k <<= 1) {
// 双调合并。
for (int j = k >> 1; j > 0; j >>= 1) {
// ixj 是与当前位置 cid 进行比较交换的位置。
ixj = cid ^ j;
if (ixj > cid) {
// 如果 (cid & k) == 0,按照降序交换两项。
if ((cid & k) == 0 && (vote[cid] < vote[ixj])) {
// 交换得票数。
tempvote = vote[cid];
vote[cid] = vote[ixj];
vote[ixj] = tempvote;
// 交换索引值。
tempindex = indexX[cid];
indexX[cid] = indexX[ixj];
indexX[ixj] = tempindex;
tempindex = indexY[cid];
indexY[cid] = indexY[ixj];
indexY[ixj] = tempindex;
// 如果 (cid & k) == 0,按照升序交换两项。
} else if ((cid & k) != 0 && vote[cid] > vote[ixj]) {
// 交换得票数。
tempvote = vote[cid];
vote[cid] = vote[ixj];
vote[ixj] = tempvote;
// 交换索引值。
tempindex = indexX[cid];
indexX[cid] = indexX[ixj];
indexX[ixj] = tempindex;
tempindex = indexY[cid];
indexY[cid] = indexY[ixj];
indexY[ixj] = tempindex;
}
}
__syncthreads();
}
}
}
// 将共享内存中的排序后的数组拷贝到全局内存中。
if (cid <lensec) {
distDev[rid * lensec + cid] = vote[cid];
indexXDev[rid * lensec + cid] = indexX[cid];
indexYDev[rid * lensec + cid] = indexY[cid];
}
}
// Host 成员方法:minMax(最远距离最小的点)
__host__ int ICcircleRadii::minMax(Image *inimg, int picNum, int *minMaxDist,
int *minMaxIndexX, int *minMaxIndexY)
{
// 检查输入图像是否为空
if (inimg == NULL )
return NULL_POINTER;
// 检查图像是否为空
if (inimg->imgData == NULL)
return UNMATCH_IMG;
// 将输入图像复制到 Device
int errcode;
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// 提取输入图像的 ROI 子图
ImageCuda inSubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &inSubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 定义点集数量
int jugde = inSubimgCud.imgMeta.height * inSubimgCud.imgMeta.width;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量
dim3 gridsize, blocksize;
blocksize.x = IC_BLOCK_X;
blocksize.y = IC_BLOCK_Y;
blocksize.z = IC_BLOCK_Z;
gridsize.x = (inSubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x;
gridsize.y = (inSubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y;
gridsize.z = (jugde + blocksize.z - 1) / blocksize.z;
// host 端数据,用于初始化
int *minMax = NULL;
int lenth = -1;
int lensec = -1;
// host 端数组分配空间以及初始化
minMax = (int*)malloc(jugde * sizeof(int));
for (int i = 0; i < jugde; i++)
minMax[i] = IC_MAX_INT;
// 设备端数组,用于存储轮廓上的点和轮廓上点的数量
int *dev_contourX = NULL;
int *dev_contourY = NULL;
int *dev_lenth = NULL;
// 设备端数组,用于存储结果点的坐标以及数量
int *dev_indexX = NULL;
int *dev_indexY = NULL;
int *dev_lensec = NULL;
int *dev_minMax = NULL;
// 在 GPU 上分配内存,在 GPU 上申请一块连续的内存,通过指针将内存分配给
// contourX,contourY,lenth,minMaxDist,indexX,intdexY,lensec,minMax
cudaMalloc((void**)&dev_contourX, (jugde * 5 + 3) * sizeof(int));
dev_contourY = dev_contourX + jugde;
dev_lenth = dev_contourY + jugde;
dev_indexX = dev_lenth + 1;
dev_indexY = dev_indexX + jugde;
dev_lensec = dev_indexY + jugde;
dev_minMax = dev_lensec + 1;
// 将数据拷贝到设备端
cudaMemcpy(dev_minMax, minMaxDist, jugde * sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_lenth, &lenth, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_lensec, &lensec, sizeof(int), cudaMemcpyHostToDevice);
// 运行 Kernel 核函数 _getContourKer 求出轮廓上点的坐标
_getContourKer<<<gridsize, blocksize>>>(inSubimgCud, dev_lenth, dev_contourX,
dev_contourY);
// 运行 Kernel 核函数 _minMaxKer 求出距离最远的点
_minMaxKer<<<gridsize, blocksize>>>(inSubimgCud, dev_minMax, *dev_lenth,
dev_contourX, dev_contourY, dev_indexX, dev_indexY, dev_lensec);
// 运行 Kernel 核函数 _shearSortAecKer 对距离最远的点升序排序
_shearSortAscKer<<<1, jugde, 2*jugde*sizeof(int)>>>(dev_minMax, dev_indexX,
dev_indexY, *dev_lensec, jugde);
// 将结果拷贝至输出数组
cudaMemcpy(minMaxDist, dev_minMax, picNum * sizeof(int),
cudaMemcpyDeviceToHost);
// 调用 cudaGetLastError 判断程序是否出错
cudaError_t cuerrcode;
cuerrcode = cudaGetLastError();
if (cuerrcode != cudaSuccess)
return CUDA_ERROR;
// 处理完毕,退出
return NO_ERROR;
}
// Host 成员方法:maxMin(最近距离最大的点)
__host__ int ICcircleRadii::maxMin(Image *inimg, int picNum, int *maxMinDist,
int *maxMinIndexX, int *maxMinIndexY)
{
// 检查输入图像是否为空
if (inimg == NULL )
return NULL_POINTER;
// 检查图像是否为空
if (inimg->imgData == NULL)
return UNMATCH_IMG;
// 将输入图像复制到 Device
int errcode;
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// 提取输入图像的 ROI 子图
ImageCuda inSubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &inSubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 定义点集数量
int jugde = inSubimgCud.imgMeta.height * inSubimgCud.imgMeta.width;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量
dim3 gridsize, blocksize;
blocksize.x = IC_BLOCK_X;
blocksize.y = IC_BLOCK_Y;
blocksize.z = IC_BLOCK_Z;
gridsize.x = (inSubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x;
gridsize.y = (inSubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y;
gridsize.z = (jugde + blocksize.z - 1) / blocksize.z;
// host 端数据,用于初始化
int *maxMin = NULL;
int lenth = -1;
int lensec = -1;
// host 端数组分配空间以及初始化
maxMin = (int*)malloc(jugde * sizeof(int));
// 设备端数组,用于存储轮廓上的点和轮廓上点的数量
int *dev_contourX = NULL;
int *dev_contourY = NULL;
int *dev_lenth = NULL;
// 设备端数组,用于存储结果点的坐标以及数量
int *dev_indexX = NULL;
int *dev_indexY = NULL;
int *dev_lensec = NULL;
int *dev_maxMin = NULL;
// 在 GPU 上分配内存,在 GPU 上申请一块连续的内存,通过指针将内存分配给
// contourX,contourY,lenth,minMaxDist,indexX,intdexY,lensec,minMax
cudaMalloc((void**)&dev_contourX, (jugde * 5 + 3) * sizeof(int));
dev_contourY = dev_contourX + jugde;
dev_lenth = dev_contourY + jugde;
dev_indexX = dev_lenth + 1;
dev_indexY = dev_indexX + jugde;
dev_lensec = dev_indexY + jugde;
dev_maxMin = dev_lensec + 1;
// 初始化
for (int i = 0; i < jugde; i++)
maxMin[i] = IC_MIN_INT;
// 将数据拷贝到设备端
cudaMemcpy(dev_maxMin, maxMinDist, jugde * sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_lenth, &lenth, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_lensec, &lensec, sizeof(int), cudaMemcpyHostToDevice);
// 运行 Kernel 核函数 _getContourKer 求出轮廓上点的坐标
_getContourKer<<<gridsize, blocksize>>>(inSubimgCud, dev_lenth, dev_contourX,
dev_contourY);
// 运行 Kernel 核函数 _minMaxKer 求出距离最远的点
_maxMinKer<<<gridsize, blocksize>>>(inSubimgCud, dev_maxMin, *dev_lenth,
dev_contourX, dev_contourY, dev_indexX, dev_indexY, dev_lensec);
// 运行 Kernel 核函数 _shearSortDesKer 对距离最远的点降序排序
_shearSortDesKer<<<1, jugde, 2*jugde*sizeof(int)>>>(dev_maxMin, dev_indexX,
dev_indexY, *dev_lensec, jugde);
// 将结果拷贝至输出数组
cudaMemcpy(maxMinDist, dev_maxMin, picNum * sizeof(int),
cudaMemcpyDeviceToHost);
// 调用 cudaGetLastError 判断程序是否出错
cudaError_t cuerrcode;
cuerrcode = cudaGetLastError();
if (cuerrcode != cudaSuccess)
return CUDA_ERROR;
// 处理完毕,退出
return NO_ERROR;
}
|
the_stack
|
using namespace std;
namespace amgx
{
namespace fixcolor_gauss_seidel_solver
{
// -------------------------
// Kernels
// -------------------------
template<typename IndexType, typename ValueTypeA, int blockrows_per_cta, int blockrows_per_warp, int bsize, int bsize_sq>
__global__
void setupBlockGSSmooth3by3BlockDiaCsrKernel(const IndexType *row_offsets, const IndexType *column_indices, const ValueTypeA *values, const IndexType *dia_indices, ValueTypeA *Dinv, const int num_block_rows)
{
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x & 31;
// padding row blocks to fit in a single warp
if ( warp_thread_id >= blockrows_per_warp * bsize_sq ) { return; }
// new thread id with padding
int tid = warp_id * blockrows_per_warp * bsize_sq + warp_thread_id;
int cta_blockrow_id = tid / bsize_sq;
int blockrow_id = blockIdx.x * blockrows_per_cta + cta_blockrow_id;
const int mat_entry_index = tid - cta_blockrow_id * bsize_sq;
const int i_ind = mat_entry_index / bsize;
const int j_ind = mat_entry_index - i_ind * bsize;
volatile __shared__ ValueTypeA s_Amat[bsize_sq * blockrows_per_cta ];
ValueTypeA e_out;
while (blockrow_id < num_block_rows)
{
int offset = blockrow_id * bsize_sq + mat_entry_index;
// Store the diagonal
e_out = values[bsize_sq * dia_indices[blockrow_id] + mat_entry_index];
// Each thread stores its entry in s_Amat
s_Amat[tid] = e_out;
compute_block_inverse_row_major<IndexType, ValueTypeA, blockrows_per_cta, bsize, bsize_sq>
( s_Amat, cta_blockrow_id * bsize_sq, offset, i_ind, j_ind, Dinv );
blockrow_id += gridDim.x * blockrows_per_cta;
}
}
template<typename IndexType, typename ValueTypeA, int threads_per_block, int halfwarps_per_block, int bsize, int log_bsize, int bsize_sq, int log_bsize_sq>
__global__
void setupBlockGSSmooth4by4BlockDiaCsrKernel_V2(const IndexType *row_offsets, const IndexType *column_indices, const ValueTypeA *values, const IndexType *dia_indices, ValueTypeA *Dinv, const int num_block_rows)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int halfwarp_id = tid >> log_bsize_sq;
const int block_halfwarp_id = threadIdx.x >> log_bsize_sq;
const int mat_entry_index = threadIdx.x & (bsize_sq - 1);
const int i_ind = mat_entry_index >> log_bsize;
const int j_ind = mat_entry_index & (bsize - 1);
volatile __shared__ ValueTypeA s_Amat[bsize_sq * halfwarps_per_block ];
ValueTypeA e_out;
while (halfwarp_id < num_block_rows)
{
int offset = halfwarp_id * bsize_sq + mat_entry_index;
// Store the diagonal
e_out = values[bsize_sq * dia_indices[halfwarp_id] + mat_entry_index];
// Each thread stores its entry in s_Amat
s_Amat[threadIdx.x] = e_out;
compute_block_inverse_row_major<IndexType, ValueTypeA, halfwarps_per_block, bsize, bsize_sq>
( s_Amat, block_halfwarp_id * bsize_sq, offset, i_ind, j_ind, Dinv );
halfwarp_id += gridDim.x * blockDim.x >> log_bsize_sq;
}
}
template<typename IndexType, typename ValueTypeA, int threads_per_block, int halfwarps_per_block>
__global__
void setupBlockGSSmoothbBybBlockDiaCsrKernel(const IndexType *row_offsets, const IndexType *column_indices, const ValueTypeA *values, const IndexType *dia_indices, ValueTypeA *Dinv, const int num_block_rows, int bsize, int bsize_sq, ValueTypeA *temp1)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int halfwarp_id = tid >> 4;
const int block_halfwarp_id = threadIdx.x >> 4;
const int mat_entry_index = threadIdx.x & (16 - 1);
const int i_ind = mat_entry_index >> 2;
const int j_ind = mat_entry_index & 3;
extern __shared__ volatile char sharedc[];
volatile ValueTypeA *s_Amat;
s_Amat = (ValueTypeA *)&sharedc[0];
int tile_num = (bsize - 1) / 4 + 1;
ValueTypeA *e_out = &temp1[(blockIdx.x * blockDim.x + threadIdx.x) * tile_num * tile_num];
while (halfwarp_id < num_block_rows)
{
int offset = halfwarp_id * bsize_sq + i_ind * bsize + j_ind;
int s_offset = block_halfwarp_id * bsize_sq;
// Store the diagonal
for (int t1 = 0; t1 < tile_num; t1++)
for (int t2 = 0; t2 < tile_num; t2++)
if ((t1 * 4 + i_ind) < bsize && (t2 * 4 + j_ind) < bsize)
{
e_out[t1 * tile_num + t2] = values[bsize_sq * dia_indices[halfwarp_id] + (t1 * 4 + i_ind) * bsize + t2 * 4 + j_ind];
}
// Each thread stores its entry in s_Amat
for (int t1 = 0; t1 < tile_num; t1++)
for (int t2 = 0; t2 < tile_num; t2++)
if ((t1 * 4 + i_ind) < bsize && (t2 * 4 + j_ind) < bsize)
{
s_Amat[s_offset + (t1 * 4 + i_ind) * bsize + t2 * 4 + j_ind] = e_out[t1 * tile_num + t2];
}
compute_block_inverse2<IndexType, ValueTypeA, halfwarps_per_block>
( s_Amat, s_offset, offset, i_ind, j_ind, Dinv, tile_num, bsize, bsize_sq );
halfwarp_id += gridDim.x * halfwarps_per_block;
}
}
template<typename IndexType, typename ValueTypeA, int blockrows_per_cta, int bsize, int bsize_sq>
__global__
void setupBlockGSSmoothBlockDiaCsrKernel_V2(const IndexType *row_offsets, const IndexType *column_indices, const ValueTypeA *values, const IndexType *dia_indices,
ValueTypeA *Dinv, const int num_block_rows)
{
int cta_blockrow_id = threadIdx.x / bsize_sq;
int blockrow_id = blockIdx.x * blockrows_per_cta + cta_blockrow_id;
const int mat_entry_index = threadIdx.x - cta_blockrow_id * bsize_sq;
const int i_ind = mat_entry_index / bsize;
const int j_ind = mat_entry_index - i_ind * bsize;
volatile __shared__ ValueTypeA s_Amat[bsize_sq * blockrows_per_cta];
int offset, s_offset;
ValueTypeA e_out;
while (blockrow_id < num_block_rows && cta_blockrow_id < blockrows_per_cta)
{
// Store the diagonal
offset = blockrow_id * bsize_sq + mat_entry_index;
e_out = values[bsize_sq * dia_indices[blockrow_id] + mat_entry_index];
// Each thread stores its entry in s_Amat
s_Amat[threadIdx.x] = e_out;
s_offset = cta_blockrow_id * bsize_sq;
#define s_A(ROW,COL) s_Amat[s_offset+ROW*bsize+COL]
ValueTypeA diag;
for (int row = 0; row < bsize; row++)
{
diag = 1.0 / s_A(row, row);
if ((i_ind == 0) && !(j_ind == row))
{
s_A(row, j_ind) = s_A(row, j_ind) * diag;
}
if ((i_ind != row) && !(j_ind == row))
{
s_A(i_ind, j_ind) = -(s_A(i_ind, row) * s_A(row, j_ind)) + s_A(i_ind, j_ind);
}
if (i_ind == 0)
{
s_A(j_ind, row) = (j_ind == row) ? diag : -(s_A(j_ind, row) * diag);
}
}
Dinv[offset] = s_A(i_ind, j_ind);
blockrow_id += (gridDim.x * blockDim.x) / bsize_sq;
}
}
// Kernel to smooth, NAIVE implementation with texture
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int eighthwarps_per_block, int bsize, int log_bsize, int half_bsize>
__global__
void multicolorGSSmooth4by4BlockDiaCsrKernel_NAIVE_tex_readDinv2(const IndexType *row_offsets, const IndexType *column_indices, const IndexType *dia_indices, const ValueTypeA *nonzero_values, const ValueTypeA *Dinv,
const ValueTypeB *b, const ValueTypeB *x, ValueTypeB weight, const int num_rows_per_line, const int color_num, const int num_block_rows, ValueTypeB *xout)
{
const int vec_entry_index = threadIdx.x & (bsize - 1);
const int block_eighthwarp_id = threadIdx.x >> log_bsize;
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
int eighthwarp_id = tid >> log_bsize;
volatile __shared__ ValueTypeB s_xtemp[ bsize * eighthwarps_per_block ];
ValueTypeB bmAx, xin;
ValueTypeB temp[bsize];
int offset, i, s_offset;
int n1d2 = num_rows_per_line * num_rows_per_line;
int pln, tid2, row, col;
int idx_1 = color_num & 1;
int idx_2 = (color_num & 2) >> 1;
int idx_3 = (color_num & 4) >> 2;
while (true)
{
pln = eighthwarp_id / n1d2;
tid2 = eighthwarp_id % n1d2;
row = tid2 / num_rows_per_line;
col = tid2 % num_rows_per_line;
i = 4 * n1d2 * (2 * pln + idx_1) + 2 * num_rows_per_line * (2 * row + idx_2) + 2 * col + idx_3;
//if (pln >= num_rows_per_line) return;
//if (row >= num_rows_per_line) return;
if (i > num_block_rows) { return; }
offset = i * bsize + vec_entry_index;
bmAx = b[offset];
// Contribution from diagonal
xin = x[offset];
s_xtemp[threadIdx.x] = xin;
// Load dia_values and do matrix multiply
s_offset = block_eighthwarp_id * bsize;
loadAsVector<bsize>(nonzero_values + dia_indices[i]*bsize * bsize + vec_entry_index * bsize, temp);
#pragma unroll
for (int m = 0; m < bsize; m++)
{
bmAx -= temp[m] * s_xtemp[s_offset + m];
}
// Contribution from each nonzero column
int jmax = row_offsets[i + 1];
for (int jind = row_offsets[i]; jind < jmax; jind++)
{
IndexType jcol = column_indices[jind];
if (jcol != i)
{
offset = jcol * bsize + vec_entry_index;
s_xtemp[threadIdx.x] = x[offset];
// Load nonzero_values
offset = jind * bsize * bsize + vec_entry_index * bsize;
loadAsVector<bsize>(nonzero_values + offset, temp);
s_offset = block_eighthwarp_id * bsize;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
bmAx -= temp[m] * s_xtemp[s_offset + m];
}
}
}
s_xtemp[threadIdx.x] = bmAx;
bmAx = 0.;
// Load Dinv and multiply to RHS
offset = i * bsize * bsize + vec_entry_index * bsize;
loadAsVector<bsize>(Dinv + offset, temp);
s_offset = block_eighthwarp_id * bsize;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
bmAx += temp[m] * s_xtemp[s_offset + m];
}
xout[i * bsize + vec_entry_index] = xin + weight * bmAx;
eighthwarp_id += (gridDim.x * blockDim.x >> log_bsize);
}
}
// Kernel to smooth, NAIVE implementation with texture
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int blockrows_per_cta, int bsize>
__global__
void multicolorGSSmoothBlockDiaCsrKernel_NAIVE_tex_readDinv2(const IndexType *row_offsets, const IndexType *column_indices, const IndexType *dia_indices, const ValueTypeA *nonzero_values, const ValueTypeA *Dinv,
const ValueTypeB *b, const ValueTypeB *x, ValueTypeB weight, const int *sorted_rows_by_color, const int num_rows_per_color, const int num_block_rows, ValueTypeB *xout)
{
int cta_blockrow_id = (threadIdx.x) / bsize;
int blockrow_id = blockIdx.x * blockrows_per_cta + cta_blockrow_id;
const int vec_entry_index = threadIdx.x - cta_blockrow_id * bsize;
volatile __shared__ ValueTypeB s_xtemp[ bsize * blockrows_per_cta];
ValueTypeB bmAx, xin;
ValueTypeB temp[bsize];
int offset, i, s_offset;
while (blockrow_id < num_rows_per_color && cta_blockrow_id < blockrows_per_cta)
{
i = sorted_rows_by_color[blockrow_id];
offset = i * bsize + vec_entry_index;
bmAx = b[offset];
// Contribution from diagonal
xin = x[offset];
s_xtemp[threadIdx.x] = xin;
// Load dia_values and do matrix multiply
s_offset = cta_blockrow_id * bsize;
offset = dia_indices[i] * bsize * bsize + vec_entry_index * bsize;
#pragma unroll
for (int k = 0; k < bsize; k++)
{
temp[k] = nonzero_values[offset + k];
}
//loadAsVector<bsize>(dia_values+offset,temp);
#pragma unroll
for (int m = 0; m < bsize; m++)
{
bmAx -= temp[m] * s_xtemp[s_offset + m];
}
// Contribution from each nonzero column
int jmax = row_offsets[i + 1];
for (int jind = row_offsets[i]; jind < jmax; jind++)
{
IndexType jcol = column_indices[jind];
if (jcol != i)
{
offset = jcol * bsize + vec_entry_index;
s_xtemp[threadIdx.x] = x[offset];
// Load nonzero_values
offset = jind * bsize * bsize + vec_entry_index * bsize;
#pragma unroll
for (int k = 0; k < bsize; k++)
{
temp[k] = nonzero_values[offset + k];
}
//loadAsVector<bsize>(nonzero_values+offset,temp);
s_offset = cta_blockrow_id * bsize;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
bmAx -= temp[m] * s_xtemp[s_offset + m];
}
}
}
s_xtemp[threadIdx.x] = bmAx;
bmAx = 0.;
// Load Dinv and multiply to RHS
offset = i * bsize * bsize + vec_entry_index * bsize;
#pragma unroll
for (int k = 0; k < bsize; k++)
{
temp[k] = Dinv[offset + k];
}
//loadAsVector<bsize>(Dinv+offset,temp);
s_offset = cta_blockrow_id * bsize;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
bmAx += temp[m] * s_xtemp[s_offset + m];
}
xout[i * bsize + vec_entry_index] = xin + weight * bmAx;
blockrow_id += (gridDim.x * blockDim.x) / bsize;
}
}
// Kernel to smooth, NAIVE implementation with texture
// This is TERRIBLE in terms of memory access pattern
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__
void fixcolorGSSmoothCsrKernel_NAIVE_tex(const IndexType *row_offsets, const IndexType *column_indices, const IndexType *diag, const ValueTypeA *nonzero_values, const ValueTypeA *Dinv,
const ValueTypeB *b, const ValueTypeB *x, ValueTypeB weight, const int num_rows_per_line, const int color_num, const int num_block_rows, ValueTypeB *xout)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int n1d2 = num_rows_per_line * num_rows_per_line;
int pln, tid2, row, col;
int idx_1 = color_num & 1;
int idx_2 = (color_num & 2) >> 1;
int idx_3 = (color_num & 4) >> 2;
int i;
ValueTypeB bmAx, xin;
ValueTypeB temp, s_xtemp;
ValueTypeB dia;
while (true)
{
pln = tid / n1d2;
tid2 = tid % n1d2;
row = tid2 / num_rows_per_line;
col = tid2 % num_rows_per_line;
i = 4 * n1d2 * (2 * pln + idx_1) + 2 * num_rows_per_line * (2 * row + idx_2) + 2 * col + idx_3;
//if (pln >= num_rows_per_line) return;
//if (row >= num_rows_per_line) return;
if (i > num_block_rows) { return; }
bmAx = b[i];
xin = x[i];
dia = nonzero_values[diag[i]];
bmAx -= xin * dia;
// Contribution from each nonzero column
int jmax = row_offsets[i + 1];
for (int jind = row_offsets[i]; jind < jmax; jind++)
{
IndexType jcol = column_indices[jind];
if (jcol != i)
{
s_xtemp = x[jcol];
temp = nonzero_values[jind];
bmAx -= temp * s_xtemp;
}
}
bmAx /= dia;
xout[i] = xin + weight * bmAx;
tid += gridDim.x * blockDim.x;
}
}
// -------------------
// Methods
// -------------------
// Constructor
template<class T_Config>
FixcolorGaussSeidelSolver_Base<T_Config>::FixcolorGaussSeidelSolver_Base( AMG_Config &cfg, const std::string &cfg_scope) : Solver<T_Config>( cfg, cfg_scope)
{
this->weight = cfg.AMG_Config::getParameter<double>("relaxation_factor", cfg_scope);
symFlag = cfg.AMG_Config::getParameter<int>("symmetric_GS", cfg_scope);
if (cfg.AMG_Config::getParameter<int>("use_bsrxmv", cfg_scope))
{
this->use_bsrxmv = 1;
}
else
{
this->use_bsrxmv = 0;
}
if (weight == 0)
{
weight = 1.;
amgx_printf("Warning, setting weight to 1 instead of estimating largest_eigen_value in Multicolor GaussSeidel smoother\n");
}
num_colors = 8;
}
// Destructor
template<class T_Config>
FixcolorGaussSeidelSolver_Base<T_Config>::~FixcolorGaussSeidelSolver_Base()
{
}
template<class T_Config>
void
FixcolorGaussSeidelSolver_Base<T_Config>::printSolverParameters() const
{
std::cout << "relaxation_factor= " << this->weight << std::endl;
std::cout << "use_bsrxmv = " << this->use_bsrxmv << std::endl;
std::cout << "symmetric_GS = " << this->symFlag << std::endl;
}
template<class T_Config>
void FixcolorGaussSeidelSolver_Base<T_Config>::computeDinv(Matrix<T_Config> &A)
{
Matrix<T_Config> *A_as_matrix = (Matrix<T_Config> *) this->m_A;
ViewType oldView = A.currentView();
A.setView(A_as_matrix->getViewExterior());
if (A.get_block_dimx() == 4 && A.get_block_dimy() == 4)
{
computeDinv_4x4(A);
}
else if (A.get_block_dimx() == 3 && A.get_block_dimy() == 3)
{
computeDinv_3x3(A);
}
else if (A.get_block_dimx() == 1 && A.get_block_dimy() == 1)
{
computeDinv_1x1(A);
}
else if (A.get_block_dimx() == 2 && A.get_block_dimy() == 2)
{
computeDinv_2x2(A);
}
else if (A.get_block_dimx() == 5 && A.get_block_dimy() == 5)
{
computeDinv_5x5(A);
}
else if (A.get_block_dimx() == A.get_block_dimy())
{
computeDinv_bxb(A, A.get_block_dimx());
}
else
{
FatalError("Unsupported block size for FixcolorGaussSeidelSolver computeEinv", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
A.setView(oldView);
}
// Method to compute the diagonal matrix E in DILU smoother
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_3x3(const Matrix_h &A)
{
FatalError("Multicolor GS smoother not implemented with host format, exiting", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_4x4(const Matrix_h &A)
{
FatalError("Multicolor GS smoother not implemented with host format, exiting", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_2x2(const Matrix_h &A)
{
FatalError("Multicolor GS smoother not implemented with host format, exiting", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_5x5(const Matrix_h &A)
{
FatalError("Multicolor GS smoother not implemented with host format, exiting", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_1x1(const Matrix_h &A)
{
FatalError("Multicolor GS smoother not implemented with host format, exiting", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_bxb(const Matrix_h &A, const int bsize)
{
FatalError("Multicolor GS smoother not implemented with host format, exiting", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_4x4(const Matrix_d &A)
{
//both DIAG supported
this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), 0.0);
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_idx_ptr = A.diag.raw();
ValueTypeA *Dinv_ptr = this->Dinv.raw();
const ValueTypeA *A_nonzero_values_ptr = A.values.raw();
// MUST BE MULTIPLE OF 16
const int threads_per_block = 512;
const int halfwarps_per_block = threads_per_block / 16;
const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / halfwarps_per_block + 1);
cudaFuncSetCacheConfig(setupBlockGSSmooth4by4BlockDiaCsrKernel_V2<IndexType, ValueTypeA, threads_per_block, halfwarps_per_block, 4, 2, 16, 4>, cudaFuncCachePreferL1);
setupBlockGSSmooth4by4BlockDiaCsrKernel_V2<IndexType, ValueTypeA, threads_per_block, halfwarps_per_block, 4, 2, 16, 4> <<< num_blocks, threads_per_block>>>
(A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, Dinv_ptr, A.get_num_rows());
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_bxb(const Matrix_d &A, const int bsize)
{
//both DIAG supported
this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), 0.0);
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_idx_ptr = A.diag.raw();
ValueTypeA *Dinv_ptr = this->Dinv.raw();
const ValueTypeA *A_nonzero_values_ptr = A.values.raw();
MVector temp(AMGX_GRID_MAX_SIZE * ((bsize - 1) / 4 + 1) * ((bsize - 1) / 4 + 1));
ValueTypeA *temp_ptr = temp.raw();
// MUST BE MULTIPLE OF 16
const int threads_per_block = 512;
const int halfwarps_per_block = threads_per_block / 16;
const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / halfwarps_per_block + 1);
cudaFuncSetCacheConfig(setupBlockGSSmoothbBybBlockDiaCsrKernel<IndexType, ValueTypeA, threads_per_block, halfwarps_per_block>, cudaFuncCachePreferL1);
setupBlockGSSmoothbBybBlockDiaCsrKernel<IndexType, ValueTypeA, threads_per_block, halfwarps_per_block> <<< num_blocks, threads_per_block, sizeof(ValueTypeA)*bsize *bsize *halfwarps_per_block>>>
(A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, Dinv_ptr, A.get_num_rows(), bsize, bsize * bsize, temp_ptr);
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_3x3(const Matrix_d &A)
{
//both DIAG supported
this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), 0.0);
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_idx_ptr = A.diag.raw();
ValueTypeA *Dinv_ptr = this->Dinv.raw();
const ValueTypeA *A_nonzero_values_ptr = A.values.raw();
// MUST BE MULTIPLE OF 16
const int threads_per_block = 256;
const int blockrows_per_cta = threads_per_block / 9;
const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / blockrows_per_cta + 1);
cudaFuncSetCacheConfig(setupBlockGSSmooth3by3BlockDiaCsrKernel < IndexType, ValueTypeA, blockrows_per_cta, 32 / 9, 3, 9 >, cudaFuncCachePreferL1);
setupBlockGSSmooth3by3BlockDiaCsrKernel < IndexType, ValueTypeA, blockrows_per_cta, 32 / 9, 3, 9 > <<< num_blocks, threads_per_block>>>
(A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, Dinv_ptr, A.get_num_rows());
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_2x2(const Matrix_d &A)
{
//both DIAG supported
this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), 0.0);
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_idx_ptr = A.diag.raw();
ValueTypeA *Dinv_ptr = this->Dinv.raw();
const ValueTypeA *A_nonzero_values_ptr = A.values.raw();
// MUST BE MULTIPLE OF 16
const int threads_per_block = 256;
const int blockrows_per_cta = threads_per_block / 4;
const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / blockrows_per_cta + 1);
cudaFuncSetCacheConfig(setupBlockGSSmooth3by3BlockDiaCsrKernel < IndexType, ValueTypeA, blockrows_per_cta, 32 / 4, 2, 4 >, cudaFuncCachePreferL1);
setupBlockGSSmooth3by3BlockDiaCsrKernel < IndexType, ValueTypeA, blockrows_per_cta, 32 / 4, 2, 4 > <<< num_blocks, threads_per_block>>>
(A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, Dinv_ptr, A.get_num_rows());
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_5x5(const Matrix_d &A)
{
//both DIAG supported
this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), 0.0);
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_idx_ptr = A.diag.raw();
ValueTypeA *Dinv_ptr = this->Dinv.raw();
const ValueTypeA *A_nonzero_values_ptr = A.values.raw();
// MUST BE MULTIPLE OF 16
const int threads_per_block = 256;
const int blockrows_per_cta = threads_per_block / 25;
const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() - 1) / blockrows_per_cta + 1);
cudaFuncSetCacheConfig(setupBlockGSSmooth3by3BlockDiaCsrKernel < IndexType, ValueTypeA, blockrows_per_cta, 32 / 25, 5, 25 >, cudaFuncCachePreferL1);
setupBlockGSSmooth3by3BlockDiaCsrKernel < IndexType, ValueTypeA, blockrows_per_cta, 32 / 25, 5, 25 > <<< num_blocks, threads_per_block>>>
(A_row_offsets_ptr, A_column_indices_ptr, A_nonzero_values_ptr, A_dia_idx_ptr, Dinv_ptr, A.get_num_rows());
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_1x1(const Matrix_d &A)
{
}
// Solver setup
template<class T_Config>
void
FixcolorGaussSeidelSolver_Base<T_Config>::solver_setup(bool reuse_matrix_structure)
{
Matrix<T_Config> *A_as_matrix = dynamic_cast<Matrix<T_Config>*>(Base::m_A);
if (!A_as_matrix)
{
FatalError("FixcolorGaussSeidelSolver only works with explicit matrices", AMGX_ERR_INTERNAL);
}
if (!this->use_bsrxmv)
{
FatalError("Use bsrxmv implementation, old implementation is buggy for diagonal matrix", AMGX_ERR_NOT_IMPLEMENTED);
}
computeDinv(*A_as_matrix);
}
//
template<class T_Config>
void
FixcolorGaussSeidelSolver_Base<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero )
{
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(const Matrix_h &A, const VVector &b, VVector &x, ViewType separation_flag)
{
FatalError("Haven't implemented Block Multicolor Gauss-Seidel smoother for host format", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_3x3(const Matrix_h &A, const VVector &b, VVector &x, ViewType separation_flag)
{
FatalError("Haven't implemented Block Multicolor Gauss-Seidel smoother for host format", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_4x4(const Matrix_h &A, const VVector &b, VVector &x, ViewType separation_flag)
{
FatalError("Haven't implemented Block Multicolor Gauss-Seidel smoother for host format", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_BxB(Matrix_h &A, VVector &b, VVector &x, ViewType separation_flag)
{
FatalError("Haven't implemented Block Multicolor Gauss-Seidel smoother for host format", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(const Matrix_d &A, const VVector &b, VVector &x, ViewType separation_flag)
{
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const ValueTypeA *A_nonzero_values_ptr = A.values.raw();
const ValueTypeB *b_ptr = b.raw();
const IndexType *A_diag_ptr = A.diag.raw();
const ValueTypeA *Dinv_ptr = this->Dinv.raw();
ValueTypeB *x_ptr = x.raw();
int n = A.get_num_rows();
int num_rows_per_line;
num_rows_per_line = (int) ceil(cbrt((double)n) / 2);
for (int i = 0; i < this->num_colors; i++)
{
int num_rows_per_color = n / this->num_colors + 1;
const int threads_per_block = 512;
//const int eightwarps_per_block = threads_per_block/4;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int)(num_rows_per_color - 1) / threads_per_block + 1 );
cudaFuncSetCacheConfig(fixcolorGSSmoothCsrKernel_NAIVE_tex<IndexType, ValueTypeA, ValueTypeB>, cudaFuncCachePreferL1);
fixcolorGSSmoothCsrKernel_NAIVE_tex<IndexType, ValueTypeA, ValueTypeB> <<< num_blocks, threads_per_block>>>
(A_row_offsets_ptr, A_column_indices_ptr, A_diag_ptr, A_nonzero_values_ptr, Dinv_ptr,
b_ptr, x_ptr, this->weight, num_rows_per_line, i, n, x_ptr);
cudaCheckError();
}
if (this->symFlag == 1)
{
int num_rows_per_color = n / this->num_colors + 1;
for (int i = this->num_colors - 1; i >= 0; i--)
{
const int threads_per_block = 512;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int)(num_rows_per_color - 1) / threads_per_block + 1 );
cudaFuncSetCacheConfig(fixcolorGSSmoothCsrKernel_NAIVE_tex<IndexType, ValueTypeA, ValueTypeB>, cudaFuncCachePreferL1);
fixcolorGSSmoothCsrKernel_NAIVE_tex<IndexType, ValueTypeA, ValueTypeB> <<< num_blocks, threads_per_block>>>
(A_row_offsets_ptr, A_column_indices_ptr, A_diag_ptr, A_nonzero_values_ptr, Dinv_ptr,
b_ptr, x_ptr, this->weight, num_rows_per_line, i, n, x_ptr);
}
}
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_4x4(const Matrix_d &A, const VVector &b, VVector &x, ViewType separation_flag)
{
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_idx_ptr = A.diag.raw();
const ValueTypeA *A_nonzero_values_ptr = A.values.raw();
const ValueTypeB *b_ptr = b.raw();
//const IndexType *A_sorted_rows_by_color_ptr = A.getMatrixColoring().getSortedRowsByColor().raw();
const ValueTypeA *Dinv_ptr = this->Dinv.raw();
ValueTypeB *x_ptr = x.raw();
int n = A.get_num_rows();
int num_rows_per_line;
num_rows_per_line = (int) ceil(cbrt((double)n) / 2);
for (int i = 0; i < this->num_colors; i++)
{
const IndexType num_rows_per_color = n / this->num_colors + 1;
const int threads_per_block = 512;
const int eightwarps_per_block = threads_per_block / 4;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int)(num_rows_per_color - 1) / eightwarps_per_block + 1 );
cudaFuncSetCacheConfig(multicolorGSSmooth4by4BlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2>, cudaFuncCachePreferL1);
multicolorGSSmooth4by4BlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2> <<< num_blocks, threads_per_block>>>
(A_row_offsets_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, Dinv_ptr,
b_ptr, x_ptr, this->weight, num_rows_per_line, i, n, x_ptr);
cudaCheckError();
}
if (this->symFlag == 1)
{
for (int i = this->num_colors - 1; i >= 0; i--)
{
const IndexType num_rows_per_color = n / this->num_colors + 1;
const int threads_per_block = 512;
const int eightwarps_per_block = threads_per_block / 4;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int)(num_rows_per_color - 1) / eightwarps_per_block + 1 );
cudaFuncSetCacheConfig(multicolorGSSmooth4by4BlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2>, cudaFuncCachePreferL1);
multicolorGSSmooth4by4BlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, eightwarps_per_block, 4, 2, 2> <<< num_blocks, threads_per_block>>>
(A_row_offsets_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, Dinv_ptr,
b_ptr, x_ptr, this->weight, num_rows_per_line, i, n, x_ptr);
cudaCheckError();
} // End of loop over colors
} // End of if symFlag
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_3x3(const Matrix_d &A, const VVector &b, VVector &x, ViewType separation_flag)
{
Matrix_d *A_as_matrix = (Matrix_d *) this->m_A;
if (!A.hasProps(COLORING)) { FatalError("Matrix is not colored, exiting", AMGX_ERR_BAD_PARAMETERS); }
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_idx_ptr = A.diag.raw();
const ValueTypeA *A_nonzero_values_ptr = A.values.raw();
const ValueTypeB *b_ptr = b.raw();
//TODO: generate required indices on the fly since coloring is not requested for this solver
const IndexType *A_sorted_rows_by_color_ptr = A.getMatrixColoring().getSortedRowsByColor().raw();
const ValueTypeA *Dinv_ptr = this->Dinv.raw();
ValueTypeB *x_ptr = x.raw();
const int threads_per_block = 64;
int num_colors = this->num_colors;
for (int i = 0; i < num_colors; i++)
{
const IndexType color_offset = ((separation_flag & INTERIOR) == 0) ? A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i] : A.getMatrixColoring().getOffsetsRowsPerColor()[i];
const IndexType num_rows_per_color = ((separation_flag == A_as_matrix->getViewInterior()) ? A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i] : A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1]) - color_offset;
if (num_rows_per_color <= 0) { continue; }
const int threads_per_block = 512;
const int blockrows_per_cta = threads_per_block / 3;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) (num_rows_per_color - 1) / blockrows_per_cta + 1);
cudaFuncSetCacheConfig(multicolorGSSmoothBlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, 3>, cudaFuncCachePreferL1);
multicolorGSSmoothBlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, 3> <<< num_blocks, threads_per_block>>>
(A_row_offsets_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, Dinv_ptr,
b_ptr, x_ptr, this->weight, A_sorted_rows_by_color_ptr + color_offset, num_rows_per_color, A.get_num_rows(), x_ptr);
cudaCheckError();
}
if (this->symFlag == 1)
{
for (int i = num_colors - 1; i >= 0; i--)
{
const IndexType color_offset = ((separation_flag & INTERIOR) == 0) ? A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i] : A.getMatrixColoring().getOffsetsRowsPerColor()[i];
const IndexType num_rows_per_color = ((separation_flag == A_as_matrix->getViewInterior()) ? A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i] : A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1]) - color_offset;
if (num_rows_per_color == 0) { continue; }
const int blockrows_per_cta = threads_per_block / 3;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) (num_rows_per_color - 1) / blockrows_per_cta + 1);
cudaFuncSetCacheConfig(multicolorGSSmoothBlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, 3>, cudaFuncCachePreferL1);
multicolorGSSmoothBlockDiaCsrKernel_NAIVE_tex_readDinv2<IndexType, ValueTypeA, ValueTypeB, blockrows_per_cta, 3> <<< num_blocks, threads_per_block>>>
(A_row_offsets_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, Dinv_ptr,
b_ptr, x_ptr, this->weight, A_sorted_rows_by_color_ptr + color_offset, num_rows_per_color, A.get_num_rows(), x_ptr);
cudaCheckError();
} // End of loop over colors
} // End of if symFlag
}
template <>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_device, AMGX_vecDouble, AMGX_matFloat, AMGX_indInt> >::smooth_BxB(Matrix<TemplateConfig<AMGX_device, AMGX_vecDouble, AMGX_matFloat, AMGX_indInt> > &A, Vector<TemplateConfig<AMGX_device, AMGX_vecDouble, AMGX_matFloat, AMGX_indInt> > &b, Vector<TemplateConfig<AMGX_device, AMGX_vecDouble, AMGX_matFloat, AMGX_indInt> > &x, ViewType separation_flag)
{
FatalError("Mixed precision is not supported", AMGX_ERR_NOT_IMPLEMENTED);
};
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void FixcolorGaussSeidelSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_BxB(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flag)
{
if (!A.hasProps(COLORING)) { FatalError("Matrix is not colored, exiting", AMGX_ERR_BAD_PARAMETERS); }
int num_colors = this->num_colors;
VVector y(b.size());
thrust::copy(b.begin(), b.end(), y.begin()); // y= b for all colors
cudaCheckError();
for (int color = 0; color < num_colors; color++)
{
Cusparse::bsrmv(Cusparse::ALL_COLORS, color, (ValueTypeB) - 1.0f, A, x, (ValueTypeB)1.0f, y, separation_flag); // y= -A.x + y for current color
Cusparse::bsrmv(color, (ValueTypeB)this->weight, A, this->Dinv, y, (ValueTypeB)1.0f, x, separation_flag); // x= w*Dinv.y + x for current color
}
cudaCheckError();
if (this->symFlag)
{
y = b; // y= b for all colors
for (int color = num_colors - 1; color >= 0; color--)
{
Cusparse::bsrmv(Cusparse::ALL_COLORS, color, (ValueTypeB) - 1.0f, A, x, (ValueTypeB)1.0f, y, separation_flag); // y= -A.x + y for current color
Cusparse::bsrmv(color, (ValueTypeB)this->weight, A, this->Dinv, y, (ValueTypeB)1.0f, x, separation_flag); // x= w*Dinv.y + x for current color
}
}
cudaCheckError();
}
// Solve one iteration
template<class T_Config>
bool
FixcolorGaussSeidelSolver_Base<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero)
{
Matrix<T_Config> *A_as_matrix = (Matrix<T_Config> *) this->m_A;
if (xIsZero) { x.dirtybit = 0; }
if (!A_as_matrix->is_matrix_singleGPU())
{
A_as_matrix->manager->exchange_halo_async(x, x.tag);
A_as_matrix->manager->exchange_halo_async(b, b.tag);
}
if (A_as_matrix->getViewExterior() == A_as_matrix->getViewInterior())
{
if (!A_as_matrix->is_matrix_singleGPU())
{
A_as_matrix->manager->exchange_halo_wait(x, x.tag);
A_as_matrix->manager->exchange_halo_wait(b, b.tag);
}
}
if ((A_as_matrix->get_block_dimx() == 4 && A_as_matrix->get_block_dimy() == 4) || (A_as_matrix->get_block_dimx() == 1 && A_as_matrix->get_block_dimy() == 1))
{
if (!A_as_matrix->is_matrix_singleGPU())
{
A_as_matrix->manager->exchange_halo_wait(x, x.tag);
A_as_matrix->manager->exchange_halo_wait(b, b.tag);
}
}
ViewType oldView = A_as_matrix->currentView();
bool latencyHiding = true;
ViewType flags;
if (A_as_matrix->is_matrix_singleGPU() || (x.dirtybit == 0 && b.dirtybit == 0))
{
latencyHiding = false;
A_as_matrix->setViewExterior();
flags = A_as_matrix->getViewExterior();
}
else
{
flags = A_as_matrix->getViewInterior();
A_as_matrix->setViewInterior();
}
if (xIsZero)
{
thrust::fill(x.begin(), x.end(), 0.);
cudaCheckError();
}
if (this->use_bsrxmv && A_as_matrix->get_block_dimx() == A_as_matrix->get_block_dimy())
{
smooth_BxB(*A_as_matrix, b, x, flags);
}
else if ( A_as_matrix->get_block_dimx() == 4 && A_as_matrix->get_block_dimy() == 4 )
{
smooth_4x4(*A_as_matrix, b, x, flags);
}
else if ( A_as_matrix->get_block_dimx() == 3 && A_as_matrix->get_block_dimy() == 3 )
{
smooth_3x3(*A_as_matrix, b, x, flags);
}
else if ( A_as_matrix->get_block_dimx() == 1 && A_as_matrix->get_block_dimy() == 1 )
{
smooth_1x1(*A_as_matrix, b, x, flags);
}
else
{
FatalError("Unsupported block size for MulticolorGaussSeidelSolver smooth", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
if (latencyHiding)
{
if (!A_as_matrix->is_matrix_singleGPU())
{
A_as_matrix->manager->exchange_halo_wait(x, x.tag);
A_as_matrix->manager->exchange_halo_wait(b, b.tag);
}
A_as_matrix->setViewExterior();
flags = (ViewType)(~(A_as_matrix->getViewInterior()) & A_as_matrix->getViewExterior());
if (flags != 0)
{
if (this->use_bsrxmv && A_as_matrix->get_block_dimx() == A_as_matrix->get_block_dimy())
{
smooth_BxB(*A_as_matrix, b, x, flags);
}
else if ( A_as_matrix->get_block_dimx() == 4 && A_as_matrix->get_block_dimy() == 4 )
{
smooth_4x4(*A_as_matrix, b, x, flags);
}
else if ( A_as_matrix->get_block_dimx() == 3 && A_as_matrix->get_block_dimy() == 3 )
{
smooth_3x3(*A_as_matrix, b, x, flags);
}
else if ( A_as_matrix->get_block_dimx() == 1 && A_as_matrix->get_block_dimy() == 1 )
{
smooth_1x1(*A_as_matrix, b, x, flags);
}
}
}
x.dirtybit = 1;
A_as_matrix->setView(oldView);
return this->converged( b, x );
}
template<class T_Config>
void
FixcolorGaussSeidelSolver_Base<T_Config>::solve_finalize(VVector &b, VVector &x)
{
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class FixcolorGaussSeidelSolver_Base<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class FixcolorGaussSeidelSolver<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
} // namespace amgx
|
the_stack
|
#include "cuda_helper.h"
static __constant__ uint64_t c_PaddedMessage80[16]; // padded message (80 bytes + padding)
static __constant__ uint32_t statebufferpre[8];
static __constant__ uint32_t statechainvpre[40];
#define MULT2(a,j) {\
tmp = a[7+(8*j)];\
a[7+(8*j)] = a[6+(8*j)];\
a[6+(8*j)] = a[5+(8*j)];\
a[5+(8*j)] = a[4+(8*j)];\
a[4+(8*j)] = a[3+(8*j)] ^ tmp;\
a[3+(8*j)] = a[2+(8*j)] ^ tmp;\
a[2+(8*j)] = a[1+(8*j)];\
a[1+(8*j)] = a[0+(8*j)] ^ tmp;\
a[0+(8*j)] = tmp;\
}
#define TWEAK(a0,a1,a2,a3,j) { \
a0 = (a0<<(j))|(a0>>(32-j));\
a1 = (a1<<(j))|(a1>>(32-j));\
a2 = (a2<<(j))|(a2>>(32-j));\
a3 = (a3<<(j))|(a3>>(32-j));\
}
#define STEP(c0,c1) { \
SUBCRUMB(chainv[0],chainv[1],chainv[2],chainv[3],tmp);\
SUBCRUMB(chainv[5],chainv[6],chainv[7],chainv[4],tmp);\
MIXWORD(chainv[0],chainv[4]);\
MIXWORD(chainv[1],chainv[5]);\
MIXWORD(chainv[2],chainv[6]);\
MIXWORD(chainv[3],chainv[7]);\
ADD_CONSTANT(chainv[0],chainv[4],c0,c1);\
}
#define SUBCRUMB(a0,a1,a2,a3,a4)\
a4 = a0;\
a0 |= a1;\
a2 ^= a3;\
a1 = ~a1;\
a0 ^= a3;\
a3 &= a4;\
a1 ^= a3;\
a3 ^= a2;\
a2 &= a0;\
a0 = ~a0;\
a2 ^= a1;\
a1 |= a3;\
a4 ^= a1;\
a3 ^= a2;\
a2 &= a1;\
a1 ^= a0;\
a0 = a4;
#define MIXWORD(a0,a4)\
a4 ^= a0;\
a0 = (a0<<2) | (a0>>(30));\
a0 ^= a4;\
a4 = (a4<<14) | (a4>>(18));\
a4 ^= a0;\
a0 = (a0<<10) | (a0>>(22));\
a0 ^= a4;\
a4 = (a4<<1) | (a4>>(31));
#define ADD_CONSTANT(a0,b0,c0,c1)\
a0 ^= c0;\
b0 ^= c1;
/* initial values of chaining variables */
__constant__ uint32_t c_IV[40];
static const uint32_t h_IV[40] = {
0x6d251e69,0x44b051e0,0x4eaa6fb4,0xdbf78465,
0x6e292011,0x90152df4,0xee058139,0xdef610bb,
0xc3b44b95,0xd9d2f256,0x70eee9a0,0xde099fa3,
0x5d9b0557,0x8fc944b3,0xcf1ccf0e,0x746cd581,
0xf7efc89d,0x5dba5781,0x04016ce5,0xad659c05,
0x0306194f,0x666d1836,0x24aa230a,0x8b264ae7,
0x858075d5,0x36d79cce,0xe571f7d7,0x204b1f67,
0x35870c6a,0x57e9e923,0x14bcb808,0x7cde72ce,
0x6c68e9be,0x5ec41e22,0xc825b7c7,0xaffb4363,
0xf5df3999,0x0fc688f1,0xb07224cc,0x03e86cea};
__constant__ uint32_t c_CNS[80];
static const uint32_t h_CNS[80] = {
0x303994a6,0xe0337818,0xc0e65299,0x441ba90d,
0x6cc33a12,0x7f34d442,0xdc56983e,0x9389217f,
0x1e00108f,0xe5a8bce6,0x7800423d,0x5274baf4,
0x8f5b7882,0x26889ba7,0x96e1db12,0x9a226e9d,
0xb6de10ed,0x01685f3d,0x70f47aae,0x05a17cf4,
0x0707a3d4,0xbd09caca,0x1c1e8f51,0xf4272b28,
0x707a3d45,0x144ae5cc,0xaeb28562,0xfaa7ae2b,
0xbaca1589,0x2e48f1c1,0x40a46f3e,0xb923c704,
0xfc20d9d2,0xe25e72c1,0x34552e25,0xe623bb72,
0x7ad8818f,0x5c58a4a4,0x8438764a,0x1e38e2e7,
0xbb6de032,0x78e38b9d,0xedb780c8,0x27586719,
0xd9847356,0x36eda57f,0xa2c78434,0x703aace7,
0xb213afa5,0xe028c9bf,0xc84ebe95,0x44756f91,
0x4e608a22,0x7e8fce32,0x56d858fe,0x956548be,
0x343b138f,0xfe191be2,0xd0ec4e3d,0x3cb226e5,
0x2ceb4882,0x5944a28e,0xb3ad2208,0xa1c4c355,
0xf0d2e9e3,0x5090d577,0xac11d7fa,0x2d1925ab,
0x1bcb66f2,0xb46496ac,0x6f2d9bc9,0xd1925ab0,
0x78602649,0x29131ab6,0x8edae952,0x0fc053c3,
0x3b6ba548,0x3f014f0c,0xedae9520,0xfc053c31};
/***************************************************/
__device__ __forceinline__
void rnd512(uint32_t *statebuffer, uint32_t *statechainv)
{
int i,j;
uint32_t t[40];
uint32_t chainv[8];
uint32_t tmp;
#pragma unroll 8
for(i=0; i<8; i++) {
t[i]=0;
#pragma unroll 5
for(j=0; j<5; j++)
t[i] ^= statechainv[i+8*j];
}
MULT2(t, 0);
#pragma unroll 5
for(j=0; j<5; j++) {
#pragma unroll 8
for(i=0; i<8; i++)
statechainv[i+8*j] ^= t[i];
}
#pragma unroll 5
for(j=0; j<5; j++) {
#pragma unroll 8
for(i=0; i<8; i++)
t[i+8*j] = statechainv[i+8*j];
}
#pragma unroll
for(j=0; j<5; j++)
MULT2(statechainv, j);
#pragma unroll 5
for(j=0; j<5; j++) {
#pragma unroll 8
for(i=0; i<8; i++)
statechainv[8*j+i] ^= t[8*((j+1)%5)+i];
}
#pragma unroll 5
for(j=0; j<5; j++) {
#pragma unroll 8
for(i=0; i<8; i++)
t[i+8*j] = statechainv[i+8*j];
}
#pragma unroll
for(j=0; j<5; j++)
MULT2(statechainv, j);
#pragma unroll 5
for(j=0; j<5; j++) {
#pragma unroll 8
for(i=0; i<8; i++)
statechainv[8*j+i] ^= t[8*((j+4)%5)+i];
}
#pragma unroll 5
for(j=0; j<5; j++) {
#pragma unroll 8
for(i=0; i<8; i++)
statechainv[i+8*j] ^= statebuffer[i];
MULT2(statebuffer, 0);
}
#pragma unroll
for(i=0; i<8; i++)
chainv[i] = statechainv[i];
#pragma unroll
for(i=0; i<8; i++)
STEP(c_CNS[(2*i)], c_CNS[(2*i)+1]);
#pragma unroll
for(i=0; i<8; i++) {
statechainv[i] = chainv[i];
chainv[i] = statechainv[i+8];
}
TWEAK(chainv[4],chainv[5],chainv[6],chainv[7],1);
#pragma unroll
for(i=0; i<8; i++)
STEP(c_CNS[(2*i)+16], c_CNS[(2*i)+16+1]);
#pragma unroll
for(i=0; i<8; i++) {
statechainv[i+8] = chainv[i];
chainv[i] = statechainv[i+16];
}
TWEAK(chainv[4],chainv[5],chainv[6],chainv[7],2);
#pragma unroll
for(i=0; i<8; i++)
STEP(c_CNS[(2*i)+32],c_CNS[(2*i)+32+1]);
#pragma unroll
for(i=0; i<8; i++) {
statechainv[i+16] = chainv[i];
chainv[i] = statechainv[i+24];
}
TWEAK(chainv[4],chainv[5],chainv[6],chainv[7],3);
#pragma unroll
for(i=0; i<8; i++)
STEP(c_CNS[(2*i)+48],c_CNS[(2*i)+48+1]);
#pragma unroll
for(i=0; i<8; i++) {
statechainv[i+24] = chainv[i];
chainv[i] = statechainv[i+32];
}
TWEAK(chainv[4],chainv[5],chainv[6],chainv[7],4);
#pragma unroll
for(i=0; i<8; i++)
STEP(c_CNS[(2*i)+64],c_CNS[(2*i)+64+1]);
#pragma unroll 8
for(i=0; i<8; i++)
statechainv[i+32] = chainv[i];
}
static void rnd512_cpu(uint32_t *statebuffer, uint32_t *statechainv)
{
int i, j;
uint32_t t[40];
uint32_t chainv[8];
uint32_t tmp;
for (i = 0; i<8; i++) {
t[i] = statechainv[i];
for (j = 1; j<5; j++)
t[i] ^= statechainv[i + 8 * j];
}
MULT2(t, 0);
for (j = 0; j<5; j++) {
for (i = 0; i<8; i++)
statechainv[i + 8 * j] ^= t[i];
}
for (j = 0; j<5; j++) {
for (i = 0; i<8; i++)
t[i + 8 * j] = statechainv[i + 8 * j];
}
for (j = 0; j<5; j++)
MULT2(statechainv, j);
for (j = 0; j<5; j++) {
for (i = 0; i<8; i++)
statechainv[8 * j + i] ^= t[8 * ((j + 1) % 5) + i];
}
for (j = 0; j<5; j++) {
for (i = 0; i<8; i++)
t[i + 8 * j] = statechainv[i + 8 * j];
}
for (j = 0; j<5; j++)
MULT2(statechainv, j);
for (j = 0; j<5; j++) {
for (i = 0; i<8; i++)
statechainv[8 * j + i] ^= t[8 * ((j + 4) % 5) + i];
}
for (j = 0; j<5; j++) {
for (i = 0; i<8; i++)
statechainv[i + 8 * j] ^= statebuffer[i];
MULT2(statebuffer, 0);
}
for (i = 0; i<8; i++)
chainv[i] = statechainv[i];
for (i = 0; i<8; i++)
STEP(h_CNS[(2 * i)], h_CNS[(2 * i) + 1]);
for (i = 0; i<8; i++) {
statechainv[i] = chainv[i];
chainv[i] = statechainv[i + 8];
}
TWEAK(chainv[4], chainv[5], chainv[6], chainv[7], 1);
for (i = 0; i<8; i++)
STEP(h_CNS[(2 * i) + 16], h_CNS[(2 * i) + 16 + 1]);
for (i = 0; i<8; i++) {
statechainv[i + 8] = chainv[i];
chainv[i] = statechainv[i + 16];
}
TWEAK(chainv[4], chainv[5], chainv[6], chainv[7], 2);
for (i = 0; i<8; i++)
STEP(h_CNS[(2 * i) + 32], h_CNS[(2 * i) + 32 + 1]);
for (i = 0; i<8; i++) {
statechainv[i + 16] = chainv[i];
chainv[i] = statechainv[i + 24];
}
TWEAK(chainv[4], chainv[5], chainv[6], chainv[7], 3);
for (i = 0; i<8; i++)
STEP(h_CNS[(2 * i) + 48], h_CNS[(2 * i) + 48 + 1]);
for (i = 0; i<8; i++) {
statechainv[i + 24] = chainv[i];
chainv[i] = statechainv[i + 32];
}
TWEAK(chainv[4], chainv[5], chainv[6], chainv[7], 4);
for (i = 0; i<8; i++)
STEP(h_CNS[(2 * i) + 64], h_CNS[(2 * i) + 64 + 1]);
for (i = 0; i<8; i++)
statechainv[i + 32] = chainv[i];
}
/***************************************************/
__device__ __forceinline__
void Update512(uint32_t* statebuffer, uint32_t *statechainv, const uint32_t *const __restrict__ data)
{
#pragma unroll
for (int i = 0; i<8; i++)
statebuffer[i] = cuda_swab32((data[i]));
rnd512(statebuffer, statechainv);
#pragma unroll
for(int i=0; i<8; i++)
statebuffer[i] = cuda_swab32((data[i+8]));
rnd512(statebuffer, statechainv);
#pragma unroll
for(int i=0; i<4; i++)
statebuffer[i] = cuda_swab32((data[i+16]));
}
/***************************************************/
__device__ __forceinline__
void finalization512(uint32_t* statebuffer, uint32_t *statechainv, uint32_t *b)
{
int i,j;
statebuffer[4] = 0x80000000U;
#pragma unroll 3
for(int i=5; i<8; i++)
statebuffer[i] = 0;
rnd512(statebuffer, statechainv);
/*---- blank round with m=0 ----*/
#pragma unroll
for(i=0; i<8; i++)
statebuffer[i] =0;
rnd512(statebuffer, statechainv);
#pragma unroll
for(i=0; i<8; i++) {
b[i] = 0;
#pragma unroll 5
for(j=0; j<5; j++)
b[i] ^= statechainv[i+8*j];
b[i] = cuda_swab32((b[i]));
}
#pragma unroll
for(i=0; i<8; i++)
statebuffer[i]=0;
rnd512(statebuffer, statechainv);
#pragma unroll
for(i=0; i<8; i++)
{
b[8+i] = 0;
#pragma unroll 5
for(j=0; j<5; j++)
b[8+i] ^= statechainv[i+8*j];
b[8+i] = cuda_swab32((b[8+i]));
}
}
/***************************************************/
__global__
void qubit_luffa512_gpu_hash_80(uint32_t threads, uint32_t startNounce, uint32_t *outputHash)
{
uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
if (thread < threads)
{
uint32_t nounce = startNounce + thread;
union {
uint64_t buf64[16];
uint32_t buf32[32];
} buff;
#pragma unroll 8
for (int i=8; i < 16; i++)
buff.buf64[i] = c_PaddedMessage80[i];
// die Nounce durch die thread-spezifische ersetzen
buff.buf64[9] = REPLACE_HIDWORD(buff.buf64[9], cuda_swab32(nounce));
uint32_t statebuffer[8], statechainv[40];
#pragma unroll
for (int i = 0; i<4; i++)
statebuffer[i] = cuda_swab32(buff.buf32[i + 16]);
#pragma unroll 4
for (int i = 4; i<8; i++)
statebuffer[i] = statebufferpre[i];
#pragma unroll
for (int i = 0; i<40; i++)
statechainv[i] = statechainvpre[i];
uint32_t *outHash = &outputHash[thread * 16];
finalization512(statebuffer, statechainv, outHash);
}
}
__host__
void qubit_luffa512_cpu_init(int thr_id, uint32_t threads)
{
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_IV, h_IV, sizeof(h_IV), 0, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_CNS, h_CNS, sizeof(h_CNS), 0, cudaMemcpyHostToDevice));
}
__host__
void qubit_luffa512_cpu_hash_80(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_outputHash,int order)
{
const uint32_t threadsperblock = 256;
dim3 grid((threads + threadsperblock-1)/threadsperblock);
dim3 block(threadsperblock);
size_t shared_size = 0;
qubit_luffa512_gpu_hash_80 <<<grid, block, shared_size>>> (threads, startNounce, d_outputHash);
}
__host__
void qubit_cpu_precalc(uint32_t* message)
{
uint32_t statebuffer[8];
uint32_t statechainv[40] =
{
0x6d251e69, 0x44b051e0, 0x4eaa6fb4, 0xdbf78465,
0x6e292011, 0x90152df4, 0xee058139, 0xdef610bb,
0xc3b44b95, 0xd9d2f256, 0x70eee9a0, 0xde099fa3,
0x5d9b0557, 0x8fc944b3, 0xcf1ccf0e, 0x746cd581,
0xf7efc89d, 0x5dba5781, 0x04016ce5, 0xad659c05,
0x0306194f, 0x666d1836, 0x24aa230a, 0x8b264ae7,
0x858075d5, 0x36d79cce, 0xe571f7d7, 0x204b1f67,
0x35870c6a, 0x57e9e923, 0x14bcb808, 0x7cde72ce,
0x6c68e9be, 0x5ec41e22, 0xc825b7c7, 0xaffb4363,
0xf5df3999, 0x0fc688f1, 0xb07224cc, 0x03e86cea
};
for (int i = 0; i<8; i++)
statebuffer[i] = cuda_swab32(message[i]);
rnd512_cpu(statebuffer, statechainv);
for (int i = 0; i<8; i++)
statebuffer[i] = cuda_swab32(message[i+8]);
rnd512_cpu(statebuffer, statechainv);
cudaMemcpyToSymbol(statebufferpre, statebuffer, sizeof(statebuffer), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(statechainvpre, statechainv, sizeof(statechainv), 0, cudaMemcpyHostToDevice);
}
__host__
void qubit_luffa512_cpu_setBlock_80(void *pdata)
{
unsigned char PaddedMessage[128];
memcpy(PaddedMessage, pdata, 80);
memset(PaddedMessage+80, 0, 48);
PaddedMessage[80] = 0x80;
PaddedMessage[111] = 1;
PaddedMessage[126] = 0x02;
PaddedMessage[127] = 0x80;
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_PaddedMessage80, PaddedMessage, sizeof(PaddedMessage), 0, cudaMemcpyHostToDevice));
qubit_cpu_precalc((uint32_t*) PaddedMessage);
}
|
the_stack
|
#include "BilateralFilter.h"
#include <iostream>
#include <cmath>
using namespace std;
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// 纹理内存只能用于全局变量,使用全局存储时需要加入边界判断,经测试效率不及
// 纹理内存
static texture<unsigned char, 2, cudaReadModeElementType> _bilateralInimgTex;
// Host 函数:initTexture(初始化纹理内存)
// 将输入图像数据绑定到纹理内存
static __host__ int // 返回值:若正确执行返回 NO_ERROR
_initTexture(
Image *insubimg // 输入图像
);
// Kernel 函数:_bilateralFilterKer(使用 ImageCuda 实现的双边滤波)
// 空域参数只影响高斯表,在调用该方法前初始化高斯表即可
static __global__ void // kernel 函数无返回值
_bilateralFilterKer(
ImageCuda outimg, // 输出图像
int radius, // 双边滤波半径
TemplateCuda gauCud, // 高斯表
TemplateCuda euCud // 欧氏距离表
);
// Host 函数:initTexture(初始化纹理内存)
static __host__ int _initTexture(Image *inimg)
{
cudaError_t cuerrcode;
int errcode; // 局部变量,错误码
// 将输入图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// 提取输入图像的 ROI 子图像。
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 设置数据通道描述符,因为只有一个颜色通道(灰度图),因此描述符中只有第一
// 个分量含有数据。概述据通道描述符用于纹理内存的绑定操作。
struct cudaChannelFormatDesc chndesc;
chndesc = cudaCreateChannelDesc(sizeof (unsigned char) * 8, 0, 0, 0,
cudaChannelFormatKindUnsigned);
// 将输入图像数据绑定至纹理内存(texture)
cuerrcode = cudaBindTexture2D(
NULL, &_bilateralInimgTex, insubimgCud.imgMeta.imgData, &chndesc,
insubimgCud.imgMeta.width, insubimgCud.imgMeta.height,
insubimgCud.pitchBytes);
if (cuerrcode != cudaSuccess)
return CUDA_ERROR;
return NO_ERROR;
}
// Kernel 函数:_bilateralFilterKer(使用 ImageCuda 实现的双边滤波)
static __global__ void _bilateralFilterKer(
ImageCuda outimg, int radius, TemplateCuda gauCud, TemplateCuda euCud)
{
// 给定半径不在范围内时直接跳出
if (radius <= 0|| radius > DEF_FILTER_RANGE)
return;
// 半径对应的高斯表数组的下标
int gi = (2 * radius + 1) * (2 * radius + 1);
// 计算想成对应的输出点的位置,其中 dstc 和 dstr 分别表示线程处理的像素点的
// 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并
// 行度缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻
// 4 行上,因此,对于 dstr 需要进行乘 4 计算。
int dstc = blockIdx.x * blockDim.x + threadIdx.x;
int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if (dstc >= outimg.imgMeta.width || dstr >= outimg.imgMeta.height)
return;
// 计算第一个输出坐标点对应的图像数据数组下标。
int dstidx = dstr * outimg.pitchBytes + dstc;
// 邻域像素与参数乘积的累加值
float sum[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
// 存储参数的临时变量
float factor;
// 邻域参数的累加值
float t[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
// 获取当前处理点的像素值,即为中心点,取同一列的四个点
unsigned char center[4];
// 第一个中心点
center[0] = tex2D(_bilateralInimgTex, dstc, dstr);
// 第二个中心点,位于第一个中心点下方
center[1] = tex2D(_bilateralInimgTex, dstc, dstr + 1);
// 处于同列的第三个中心点
center[2] = tex2D(_bilateralInimgTex, dstc, dstr + 2);
// 处于同列的第四个中心点
center[3] = tex2D(_bilateralInimgTex, dstc, dstr + 3);
for (int col = 0; col <= gi; col++)
{
// 获取当前处理点的横纵坐标
int i = gauCud.tplMeta.tplData[2 * col],
j = gauCud.tplMeta.tplData[2 * col + 1];
// 获取当前处理点的像素值
unsigned char curPix = tex2D(_bilateralInimgTex,
dstc + j, dstr + i);
// 计算当前点与中心点的像素差值
unsigned char euindex = curPix > center[0] ? curPix - center[0] :
center[0] - curPix;
// 欧氏距离与高斯距离的乘积
factor = gauCud.attachedData[col] * euCud.attachedData[euindex];
t[0] += factor * curPix;
sum[0] += factor;
// 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因
// 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各
// 点不变,由于是位于循环体内部不可直接进行 ++ 运算,且当列超出时也
// 不能进行 return,否则邻域扫描将终止,且输出图像不能赋值
if (dstr + 1 >= outimg.imgMeta.height)
continue;
// 获取当前处理点的像素值
curPix = tex2D(_bilateralInimgTex, dstc + j, dstr + i);
// 计算当前点与中心点的像素差值
euindex = curPix > center[1] ? curPix - center[1] :
center[1] - curPix;
// 欧氏距离与高斯距离的乘积
factor = gauCud.attachedData[col] * euCud.attachedData[euindex];
t[1] += factor * curPix;
sum[1] += factor;
// 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因
// 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各
// 点不变,由于是位于循环体内部不可直接进行 ++ 运算,且当列超出时也
// 不能进行 return,否则邻域扫描将终止,且输出图像不能赋值
if (dstr + 2 >= outimg.imgMeta.height)
continue;
// 获取当前处理点的像素值
curPix = tex2D(_bilateralInimgTex, dstc + j, dstr + i);
// 计算当前点与中心点的像素差值
euindex = curPix > center[2] ? curPix - center[2] :
center[2] - curPix;
// 欧氏距离与高斯距离的乘积
factor = gauCud.attachedData[col] * euCud.attachedData[euindex];
t[2] += factor * curPix;
sum[2] += factor;
// 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因
// 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各
// 点不变,由于是位于循环体内部不可直接进行 ++ 运算,且当列超出时也
// 不能进行 return,否则邻域扫描将终止,且输出图像不能赋值
if (dstr + 3 >= outimg.imgMeta.height)
continue;
// 获取当前处理点的像素值
curPix = tex2D(_bilateralInimgTex, dstc + j, dstr + i);
// 计算当前点与中心点的像素差值
euindex = curPix > center[3] ? curPix - center[3] :
center[3] - curPix;
// 欧氏距离与高斯距离的乘积
factor = gauCud.attachedData[col] * euCud.attachedData[euindex];
t[3] += factor * curPix;
sum[3] += factor;
}
// 对第一列的点进行赋值
outimg.imgMeta.imgData[dstidx] = (unsigned char)(t[0] / sum[0]);
// 若列超出范围,此处可直接使用 return 直接跳出
if (++dstr >= outimg.imgMeta.height)
return;
// 将对应数据的下标加一行
dstidx += outimg.pitchBytes;
// 对第二列的点进行赋值
outimg.imgMeta.imgData[dstidx] = (unsigned char)(t[1] / sum[1]);
// 准备处理第三列
if (++dstr >= outimg.imgMeta.height)
return;
dstidx += outimg.pitchBytes;
outimg.imgMeta.imgData[dstidx] = (unsigned char)(t[2] / sum[2]);
// 处理第四列
if (++dstr >= outimg.imgMeta.height)
return;
dstidx += outimg.pitchBytes;
outimg.imgMeta.imgData[dstidx] = (unsigned char)(t[3] / sum[3]);
}
// Host 成员方法:doFilter(执行滤波)
__host__ int BilateralFilter::doFilter(Image *inoutimg)
{
// 给定半径不在范围内时直接跳出
if (radius <= 0 && radius > DEF_FILTER_RANGE)
return INVALID_DATA;
// 若滤波的重复次数为 0 ,则不进行滤波返回正确执行
if (repeat <= 0)
return INVALID_DATA;
// 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。
if (inoutimg == NULL)
return NULL_POINTER;
// 检查模板数据
if (gaussian == NULL || euclid == NULL)
return INVALID_DATA;
int errcode; // 局部变量,错误码
// 初始化纹理内存,将输入图像与之绑定
_initTexture(inoutimg);
// 将高斯模板数据拷贝至 Device 端避免核函数中无法访问
errcode = TemplateBasicOp::copyToCurrentDevice(gaussian);
if (errcode != NO_ERROR)
return errcode;
// 将欧式距离模板数据拷贝至 Device 端避免核函数中无法访问
errcode = TemplateBasicOp::copyToCurrentDevice(euclid);
if (errcode != NO_ERROR)
return errcode;
// 提取输入图像的 ROI 子图像。
ImageCuda inoutsubimgCud;
errcode = ImageBasicOp::roiSubImage(inoutimg, &inoutsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (inoutsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x;
gridsize.y = (inoutsubimgCud.imgMeta.height + blocksize.y * 4 - 1) /
(blocksize.y * 4);
// 进行重复滤波以提高质量
for (int i = 0; i < repeat; i++) {
// 调用核函数进行滤波
_bilateralFilterKer<<<gridsize, blocksize>>>(
inoutsubimgCud, radius, *TEMPLATE_CUDA(gaussian),
*TEMPLATE_CUDA(euclid));
}
return NO_ERROR;
}
// Host 成员方法:doFilter(执行滤波)
__host__ int BilateralFilter::doFilter(Image *inimg, Image *outimg)
{
// 给定半径不在范围内时直接跳出
if (radius <= 0 && radius > DEF_FILTER_RANGE)
return INVALID_DATA;
// 若滤波的重复次数为 0 ,则不进行滤波返回正确执行
if (repeat <= 0)
return INVALID_DATA;
// 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。
if (inimg == NULL || outimg == NULL)
return NULL_POINTER;
// 检查模板数据
if (gaussian == NULL || euclid == NULL)
return INVALID_DATA;
int errcode; // 局部变量,错误码
// 初始化纹理内存,将输入图像与之绑定,需将第一次运行结果保存至 outimg ,
// 之后的重复则相当于在 outimg 上的 inplace 版本,这样保证了 inimg 中的数据
// 一致性
_initTexture(inimg);
// 将高斯模板数据拷贝至 Device 端避免核函数中无法访问
errcode = TemplateBasicOp::copyToCurrentDevice(gaussian);
if (errcode != NO_ERROR)
return errcode;
// 将欧式距离模板数据拷贝至 Device 端避免核函数中无法访问
errcode = TemplateBasicOp::copyToCurrentDevice(euclid);
if (errcode != NO_ERROR)
return errcode;
// 提取输入图像的 ROI 子图像。
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 将输出图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(outimg);
if (errcode != NO_ERROR) {
errcode = ImageBasicOp::makeAtCurrentDevice(
outimg, inimg->roiX2 - inimg->roiX1,
inimg->roiY2 - inimg->roiY1);
if (errcode != NO_ERROR)
return errcode;
}
// 提取输出图像的 ROI 子图像。
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一
if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width)
insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width;
else
outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width;
if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height)
insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height;
else
outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x;
gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) /
(blocksize.y * 4);
// 调用核函数进行滤波
_bilateralFilterKer<<<gridsize, blocksize>>>(
outsubimgCud, radius, *TEMPLATE_CUDA(gaussian),
*TEMPLATE_CUDA(euclid));
// 进行重复滤波以提高质量
for (int i = 1; i < repeat; i++) {
// 调用核函数进行滤波
_bilateralFilterKer<<<gridsize, blocksize>>>(
outsubimgCud, radius, *TEMPLATE_CUDA(gaussian),
*TEMPLATE_CUDA(euclid));
}
return NO_ERROR;
}
|
the_stack
|
#include "../shaders/tonemap.cuh"
#include "../shaders/uniforms.cuh"
#define FRAMEBUFFER_GLOBAL
#include "framebuffer.cuh"
#include "viewport.cuh"
__device__ void clamp_d(math::int2& v, const math::int2& lim)
{
v.x = max(0, min(lim.x, v.x));
v.y = max(0, min(lim.y, v.y));
}
__device__ void add(math::float4& sum, float weight, float4& color)
{
sum.x += weight * color.x;
sum.y += weight * color.y;
sum.z += weight * color.z;
}
extern "C"
{
__global__ void smoothTest1(cudaSurfaceObject_t target, cudaTextureObject_t source)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < color_buffer_size.x && y < color_buffer_size.y)
{
math::float2 c;
int left = 8 * (x / 8);
int bottom = 8 * (y / 8);
int right = left + 8;
int up = bottom + 8;
c.x = (left + right) * 0.5f * pixel_scale.x + pixel_scale.z;
c.y = (bottom + up) * 0.5f * pixel_scale.y + pixel_scale.w;
if (dot(c, c) > uniform[2])
{
math::float4 sum = { 0, 0, 0, 0 };
math::float2 candidate;
if (uniform[1] == 1)
{
math::uint2 in_rep(x % 4, y % 4);
math::float2 close;
close.x = (in_rep.x % 2 ? 1.f : -1.f);
close.y = (in_rep.y % 2 ? 1.f : -1.f);
if (in_rep.x < 2 ^ in_rep.y < 2) // in unfilled
{
math::float2 loc = { x + 0.5f, y + 0.5f };
float4 sample;
candidate.x = loc.x + close.x;
candidate.y = loc.y;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.5f, sample);
candidate.x = loc.x;
candidate.y = loc.y + close.y;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.5f, sample);
uchar4 color = { static_cast<unsigned char>(255 * sum.x), static_cast<unsigned char>(255 * sum.y), static_cast<unsigned char>(255 * sum.z), static_cast<unsigned char>(255 * sum.w) };
FrameBuffer::writeColor(x, y, color);
}
}
}
}
}
__global__ void smoothTest2(cudaSurfaceObject_t target, cudaTextureObject_t source)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < color_buffer_size.x && y < color_buffer_size.y)
{
math::float2 c;
int left = 8 * (x / 8);
int bottom = 8 * (y / 8);
int right = left + 8;
int up = bottom + 8;
c.x = (left + right) * 0.5f * pixel_scale.x + pixel_scale.z;
c.y = (bottom + up) * 0.5f * pixel_scale.y + pixel_scale.w;
if (dot(c, c) > uniform[2])
{
math::float4 sum = { 0, 0, 0, 0 };
math::float2 candidate;
if (uniform[1] == 1)
{
math::uint2 in_rep(x % 4, y % 4);
math::float2 close;
close.x = (in_rep.x % 2 ? 1.f : -1.f);
close.y = (in_rep.y % 2 ? 1.f : -1.f);
if ((in_rep.x < 2 ^ in_rep.y >= 2)) // in filled
{
if (close.y < 0)
{
math::float2 loc = { x + 0.5f, y + 0.5f };
float4 sample;
candidate.x = loc.x;
candidate.y = loc.y;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.5f, sample);
candidate.x = loc.x + close.x;
candidate.y = loc.y + close.y;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.5f, sample);
uchar4 color = { static_cast<unsigned char>(255 * sum.x), static_cast<unsigned char>(255 * sum.y), static_cast<unsigned char>(255 * sum.z), static_cast<unsigned char>(255 * sum.w) };
FrameBuffer::writeColor(x, y, color);
FrameBuffer::writeColor(x + close.x, y + close.y, color);
}
}
}
}
}
}
__global__ void smoothTest3(cudaSurfaceObject_t target, cudaTextureObject_t source)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < color_buffer_size.x && y < color_buffer_size.y)
{
math::float2 c;
int left = 8 * (x / 8);
int bottom = 8 * (y / 8);
int right = left + 8;
int up = bottom + 8;
c.x = (left + right) * 0.5f * pixel_scale.x + pixel_scale.z;
c.y = (bottom + up) * 0.5f * pixel_scale.y + pixel_scale.w;
if (dot(c, c) > uniform[2])
{
math::float4 sum = { 0, 0, 0, 0 };
math::float2 candidate;
if (uniform[1] == 1)
{
math::float2 loc = { x + 0.5f, y + 0.5f };
float4 sample;
math::float4 sum = { 0, 0, 0, 0 };
candidate = { loc.x, loc.y - 1 };
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.0625f, sample);
candidate = { loc.x, loc.y - 1 };
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.125f, sample);
candidate = { loc.x + 1, loc.y - 1 };
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.0625f, sample);
//
candidate = { loc.x - 1, loc.y };
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.125f, sample);
candidate = { loc.x, loc.y };
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.25f, sample);
candidate = { loc.x + 1, loc.y };
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.125f, sample);
//
candidate = { loc.x - 1, loc.y + 1 };
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.0625f, sample);
candidate = { loc.x, loc.y + 1 };
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.125f, sample);
candidate = { loc.x + 1, loc.y + 1 };
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.0625f, sample);
//
uchar4 color = { static_cast<unsigned char>(255 * sum.x), static_cast<unsigned char>(255 * sum.y), static_cast<unsigned char>(255 * sum.z), static_cast<unsigned char>(255 * sum.w) };
FrameBuffer::writeColor(x, y, color);
}
}
}
}
__global__ void smoothImageQuad(cudaSurfaceObject_t target, cudaTextureObject_t source)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < color_buffer_size.x && y < color_buffer_size.y)
{
int left = 8 * (x / 8);
int bottom = 8 * (y / 8);
math::float2 c { left + 4 - (viewport.left + viewport.right) * 0.5f, bottom + 4 - (viewport.top + viewport.bottom) * 0.5f };
if (dot(c, c) > CHECKERBOARD_RADIUS * CHECKERBOARD_RADIUS)
{
math::float4 sum = { 0, 0, 0, 1 };
math::float2 candidate;
math::uint2 in_rep(x % 4, y % 4);
math::float2 close((in_rep.x % 2 ? 1 : -1), (in_rep.y % 2 ? 1 : -1));
if (in_rep.x < 2 ^ in_rep.y < 2) // in unfilled
{
math::float2 loc = { x + 0.5f, y + 0.5f };
float4 sample;
candidate.x = loc.x + close.x;
candidate.y = loc.y - close.y * 0.25f;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.375f, sample);
candidate.x = loc.x - close.x * 0.25f;
candidate.y = loc.y + close.y;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.375f, sample);
candidate.x = loc.x - close.x * 0.25f;
candidate.y = loc.y - 2.f * close.y;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.125f, sample);
candidate.x = loc.x - 2.f * close.x;
candidate.y = loc.y - close.y * 0.25f;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.125f, sample);
FrameBuffer::writeColor(target, x, y, sum);
}
else
{
math::float2 loc = { x + 0.5f, y + 0.5f };
float4 sample;
candidate.x = loc.x - 0.25f * close.x;
candidate.y = loc.y - 0.25f * close.y;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.5f, sample);
candidate.x = loc.x + close.x;
candidate.y = loc.y + close.y;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.28125f, sample);
candidate.x = loc.x - 2 * close.x;
candidate.y = loc.y + close.y;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.09375f, sample);
candidate.x = loc.x + close.x;
candidate.y = loc.y - 2 * close.y;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.09375f, sample);
candidate.x = loc.x - 2 * close.x;
candidate.y = loc.y - 2 * close.y;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.03125f, sample);
FrameBuffer::writeColor(target, x, y, sum);
}
}
else
{
math::float2 loc = { x + 0.5f, y + 0.5f };
float4 sample = tex2D<float4>(source, loc.x, loc.y);
FrameBuffer::writeColor(target, x, y, math::float4(sample.x, sample.y, sample.z, sample.w));
}
}
}
__device__ float inside(const math::int2& in_tile, float offx, float offy, math::float2 c)
{
math::float2 unit;
unit.x = (in_tile.x + offx < 0 ? -1.f : in_tile.x + offx >= 8);
unit.y = (in_tile.y + offy < 0 ? -1.f : in_tile.y + offy >= 8);
c.x += unit.x * 8;
c.y += unit.y * 8;
return (dot(c, c) < CHECKERBOARD_RADIUS * CHECKERBOARD_RADIUS ? 1.f : 0.0f);
}
__global__ void smoothImage(cudaSurfaceObject_t target, cudaTextureObject_t source)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < color_buffer_size.x && y < color_buffer_size.y)
{
int left = 8 * (x / 8);
int bottom = 8 * (y / 8);
math::float2 c { left + 4 - (viewport.left + viewport.right) * 0.5f, bottom + 4 - (viewport.top + viewport.bottom) * 0.5f };
if (dot(c, c) > CHECKERBOARD_RADIUS * CHECKERBOARD_RADIUS)
{
math::float4 sum = { 0, 0, 0, 1 };
math::float2 candidate;
math::float2 c2 { abs(c.x) - 8, abs(c.y) - 8 };
if (dot(c2, c2) > CHECKERBOARD_RADIUS * CHECKERBOARD_RADIUS)
{
if ((x % 2) ^ (y % 2)) // in unfilled
{
math::float2 loc(x, y);
float4 sample;
candidate.x = loc.x;
candidate.y = loc.y;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.875f, sample);
candidate.x = loc.x + 1;
candidate.y = loc.y + 1;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.875f, sample);
candidate.x = loc.x - 1;
candidate.y = loc.y - 1;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.0625F, sample);
candidate.x = loc.x + 2;
candidate.y = loc.y - 1;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.0625F, sample);
candidate.x = loc.x + 2;
candidate.y = loc.y + 2;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.0625F, sample);
candidate.x = loc.x - 1;
candidate.y = loc.y + 2;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.0625F, sample);
FrameBuffer::writeColor(target, x, y, sum);
}
else
{
math::float2 loc = { x + 0.5f, y + 0.5f };
float4 sample;
float third = 1.f / 3.f;
candidate.x = loc.x;
candidate.y = loc.y;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.375f, sample);
candidate.x = loc.x - 1 + third;
candidate.y = loc.y - 1 - third;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.28125f, sample);
candidate.x = loc.x + 1 + third;
candidate.y = loc.y - 1 + third;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.28125f, sample);
candidate.x = loc.x + 1 - third;
candidate.y = loc.y + 1 + third;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.28125f, sample);
candidate.x = loc.x - 1 - third;
candidate.y = loc.y + 1 - third;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.28125f, sample);
FrameBuffer::writeColor(target, x, y, sum);
}
}
else
{
math::int2 in_tile = { x - left, y - bottom };
float4 sample;
float in, in2;
if ((x % 2) ^ (y % 2)) // in unfilled
{
math::float2 loc(x, y);
candidate.x = loc.x;
candidate.y = loc.y;
sample = tex2D<float4>(source, candidate.x, candidate.y);
in = inside(in_tile, -1, -1, c);
add(sum, 0.875f - (in * 0.2916666f), sample);
candidate.x = loc.x - 1;
candidate.y = loc.y - 1;
sample = tex2D<float4>(source, candidate.x, candidate.y);
in2 = inside(in_tile, -2, -2, c);
add(sum, 0.0625F - (in2 * 0.02083333f) - (in * 0.010416666f), sample);
candidate.x = loc.x + 1;
candidate.y = loc.y + 1;
sample = tex2D<float4>(source, candidate.x, candidate.y);
in = inside(in_tile, 1, 1, c);
add(sum, 0.875f - (in * 0.2916666f), sample);
candidate.x = loc.x + 2;
candidate.y = loc.y + 2;
sample = tex2D<float4>(source, candidate.x, candidate.y);
in2 = inside(in_tile, 2, 2, c);
add(sum, 0.0625F - (in2 * 0.02083333f) - (in * 0.010416666f), sample);
candidate.x = loc.x + 2;
candidate.y = loc.y - 1;
sample = tex2D<float4>(source, candidate.x, candidate.y);
in = inside(in_tile, 1, -1, c);
in2 = inside(in_tile, 2, -2, c);
add(sum, 0.0625F - (in2 * 0.02083333f) - (in * 0.010416666f), sample);
candidate.x = loc.x - 1;
candidate.y = loc.y + 2;
sample = tex2D<float4>(source, candidate.x, candidate.y);
in = inside(in_tile, -1, 1, c);
in2 = inside(in_tile, -2, 2, c);
add(sum, 0.0625F - (in2 * 0.02083333f) - (in * 0.010416666f), sample);
FrameBuffer::writeColor(target, x, y, sum);
}
else
{
math::float2 loc = { x + 0.5f, y + 0.5f };
float third = 1.f / 3.f;
candidate.x = loc.x;
candidate.y = loc.y;
sample = tex2D<float4>(source, candidate.x, candidate.y);
add(sum, 0.375f, sample);
candidate.x = loc.x - 1 + third;
candidate.y = loc.y - 1 - third;
sample = tex2D<float4>(source, candidate.x, candidate.y);
in = inside(in_tile, 0, -1, c);
in2 = inside(in_tile, -1, -2, c);
add(sum, 0.28125f - in2 * 0.1026785f - in * 0.022321f, sample);
candidate.x = loc.x + 1 + third;
candidate.y = loc.y - 1 + third;
sample = tex2D<float4>(source, candidate.x, candidate.y);
in = inside(in_tile, 1, 0, c);
in2 = inside(in_tile, 2, -1, c);
add(sum, 0.28125f - in2 * 0.1026785f - in * 0.022321f, sample);
candidate.x = loc.x + 1 - third;
candidate.y = loc.y + 1 + third;
sample = tex2D<float4>(source, candidate.x, candidate.y);
in = inside(in_tile, 0, 1, c);
in2 = inside(in_tile, 1, 2, c);
add(sum, 0.28125f - in2 * 0.1026785f - in * 0.022321f, sample);
candidate.x = loc.x - 1 - third;
candidate.y = loc.y + 1 - third;
sample = tex2D<float4>(source, candidate.x, candidate.y);
in = inside(in_tile, -1, 0, c);
in2 = inside(in_tile, -2, 1, c);
add(sum, 0.28125f - in2 * 0.1026785f - in * 0.022321f, sample);
FrameBuffer::writeColor(target, x, y, sum);
}
}
}
else
{
math::float2 loc = { x + 0.5f, y + 0.5f };
float4 sample = tex2D<float4>(source, loc.x, loc.y);
FrameBuffer::writeColor(target, x, y, math::float4(sample.x, sample.y, sample.z, sample.w));
}
}
}
__global__ void clearColorBuffer(uchar4 color)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < color_buffer_size.x && y < color_buffer_size.y)
{
FrameBuffer::writeColor(x, y, color);
}
}
__global__ void clearColorBufferCheckers(uchar4 c1, uchar4 c2, unsigned int s)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < color_buffer_size.x && y < color_buffer_size.y)
{
FrameBuffer::writeColor(x, y, (((x >> s) ^ (y >> s)) & 0x1U) == 0U ? c1 : c2);
}
}
__global__ void clearColorBufferTexture()
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < color_buffer_size.x && y < color_buffer_size.y)
{
math::float2 px = { 2.f * ((x + 0.5f) / ((float)color_buffer_size.x)) - 1.f,
2.f * ((y + 0.5f) / ((float)color_buffer_size.y)) - 1.f };
math::float4 on_near = { px.x, px.y, 0, 1.f };
math::float4 on_far = { px.x, px.y, 1, 1.f };
on_near = camera.PV_inv * on_near;
on_far = camera.PV_inv * on_far;
math::float3 r = normalize(on_far.xyz() / on_far.w - on_near.xyz() / on_near.w);
math::float2 uv;
uv.x = (1.f / (2.f * math::constants<float>::pi())) * (atan2(r.z, r.x) + math::constants<float>::pi());
uv.y = 1.f - (2.f / (math::constants<float>::pi()) * asin(r.y));
float4 tex_color_ = tex2D(texf, uv.x, uv.y);
math::float3 tex_color(tex_color_.x, tex_color_.y, tex_color_.z);
tex_color = tonemap(tex_color);
//uchar4 color = { fminf(1.f, tex_color.x) * 255, fminf(1.f, tex_color.y) * 255, fminf(1.f, tex_color.z) * 255, 255 };
FrameBuffer::writeColor(x, y, { tex_color, 1.0f });
}
}
__global__ void clearDepthBuffer(float depth)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < depth_buffer_size.x && y < depth_buffer_size.y)
{
FrameBuffer::writeDepth(x, y, depth);
}
}
}
|
the_stack
|
namespace amgx
{
namespace locally_downwind_kernels
{
// ---------------------------
// Kernels
// ---------------------------
// Kernel to color the rows of the matrix, using min-max approach
template <typename IndexType>
__global__
void colorRowsKernel(IndexType *row_colors, const int num_colors, const int num_rows)
{
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < num_rows; i += gridDim.x * blockDim.x)
{
row_colors[i] = i % num_colors;
}
}
//is this edge and outgoing edge?
template <typename IndexType, typename ValueType>
__host__ __device__
bool outgoing_edge( const IndexType *ia, const IndexType *ja, const ValueType *aa, IndexType i, IndexType ii, int blocksize )
{
IndexType j = ja[ii];
ValueType weight = 0.0;
for (IndexType iii = ii * blocksize; iii < (ii + 1)*blocksize; iii++)
{
weight += (aa[iii] * aa[iii]);
}
for (IndexType jj = ia[j]; jj < ia[j + 1]; jj++)
{
if ( ja[jj] == i )
{
ValueType counter_weight = 0.0;
for (IndexType jjj = jj * blocksize; jjj < (jj + 1)*blocksize; jjj++)
{
counter_weight += (aa[jjj] * aa[jjj]);
}
return (weight >= counter_weight);
}
}
return true;
}
template <typename IndexType>
struct Buffer
{
IndexType *buffer;
int buffersize;
int loptr;
int hiptr;
int offset;
__device__ int size();
__device__ IndexType pop();
__device__ void push( IndexType node );
};
template <typename IndexType>
__device__
IndexType Buffer<IndexType>::pop()
{
int pos = loptr % buffersize;
loptr++;
return buffer[offset + pos];
}
template <typename IndexType>
__device__
void Buffer<IndexType>::push(IndexType element )
{
buffer[offset + hiptr] = element;
hiptr = (hiptr + 1) % buffersize;
}
template <typename IndexType>
__device__
int Buffer<IndexType>::size()
{
return (hiptr - loptr) % buffersize;
}
template <typename IndexType, typename ValueType>
__global__
void traverse( const IndexType *ia, const IndexType *ja, const ValueType *aa, const IndexType *ria, const IndexType *rja, const IndexType *agg, IndexType *color, IndexType numAggregates, int buffersize, int blocksize )
{
extern __shared__ IndexType tr_smem[];
Buffer<IndexType> ring;
ring.buffer = &tr_smem[0];
ring.buffersize = buffersize;
ring.loptr = 0;
ring.hiptr = 0;
ring.offset = threadIdx.x * buffersize;
int aggregate = threadIdx.x + blockDim.x * blockIdx.x;
if ( aggregate < numAggregates )
{
bool nodesLeft = true;
for (int allowed_incoming_edges = 0; nodesLeft; allowed_incoming_edges++)
{
nodesLeft = false;
for (IndexType ii = ria[aggregate]; ii < ria[aggregate + 1]; ii++)
{
//find possible root node for modified BFS coloring
IndexType node = rja[ii];
int found_edges = 0;
//not colored yet
if ( color[node] == -1 )
{
for (IndexType jj = ia[node]; jj < ia[node + 1]; jj++)
{
if ( agg[ja[jj]] == aggregate && !outgoing_edge( ia, ja, aa, node, jj, blocksize ) )
{
found_edges++;
}
}
nodesLeft = true;
}
else
{
found_edges = allowed_incoming_edges + 1;
}
//start modified BFS
if ( found_edges <= allowed_incoming_edges )
{
color[node] = 0; //TODO: make the node look around for already set neighbors first
ring.push(node);
while ( ring.size() > 0 )
{
//this node is already colored.
node = ring.pop();
int myInitialColor = color[node];
//traverse all neighbors to determin minimum own color
int myColor = myInitialColor;
for (IndexType jj = ia[node]; jj < ia[node + 1]; jj++)
{
IndexType j = ja[jj];
if ( color[j] == myColor && j != node)
{
//try next color
myColor++;
jj = ia[node];
}
}
//repair own color
//color[node] = myColor;
//traverse all children
for (IndexType jj = ia[node]; jj < ia[node + 1]; jj++)
{
IndexType child = ja[jj];
if ( agg[child] != aggregate || //only set colors in own aggregate
!outgoing_edge( ia, ja, aa, node, jj, blocksize )) //only process outgoing edges
{
continue;
}
//unset -> set and push
if ( color[child] == -1)
{
color[child] = myColor + 1;
ring.push( child );
}
if ( color[child] >= myInitialColor )
{
color[child] = myColor + 1; //max( myColor+1, color[child] );
ring.push( child );
}
/*
//same color: reset and reset children, no push
if( color[child] == myColor )
{
color[child] = myColor+1;
for(IndexType kk = ia[child]; kk < ia[child+1]; kk++)
{
//same restrictions for setting colors apply here:
//stay in aggregate, only set outgoing edges, only set uncolored or same level
IndexType grandchild = ja[kk];
if( agg[grandchild] != aggregate || !outgoing_edge( ia, ja, aa, child, kk, blocksize ) )
continue;
if( color[grandchild] == -1 )
{
color[grandchild] = myColor+2;
ring.push( grandchild );
}
if( color[grandchild] == myColor+1 )
color[grandchild] = myColor+2;
}
}
*/
}
}
}
}
}
}
}
template <typename IndexType, typename ValueType>
__global__
void repair( const IndexType *ia, const IndexType *ja, const ValueType *aa, const IndexType *ria, const IndexType *rja, const IndexType *agg, IndexType *color, IndexType numAggregates, int buffersize, int blocksize )
{
extern __shared__ IndexType tr_smem[];
Buffer<IndexType> ring;
ring.buffer = &tr_smem[0];
ring.buffersize = buffersize;
ring.loptr = 0;
ring.hiptr = 0;
ring.offset = threadIdx.x * buffersize;
int aggregate = threadIdx.x + blockDim.x * blockIdx.x;
if ( aggregate < numAggregates )
{
bool nodesLeft = true;
for (int allowed_incoming_edges = 0; nodesLeft; allowed_incoming_edges++)
{
nodesLeft = false;
for (IndexType ii = ria[aggregate]; ii < ria[aggregate + 1]; ii++)
{
//find possible root node for modified BFS coloring
IndexType node = rja[ii];
int found_edges = 0;
//not colored yet
if ( color[node] == -1 )
{
for (IndexType jj = ia[node]; jj < ia[node + 1]; jj++)
{
if ( agg[ja[jj]] == aggregate && !outgoing_edge( ia, ja, aa, node, jj, blocksize ) )
{
found_edges++;
}
}
nodesLeft = true;
}
else
{
found_edges = allowed_incoming_edges + 1;
}
//start modified BFS
if ( found_edges <= allowed_incoming_edges )
{
color[node] = 0; //TODO: make the node look around for already set neighbors first
ring.push(node);
while ( ring.size() > 0 )
{
//this node is already colored.
node = ring.pop();
int myInitialColor = color[node];
//traverse all neighbors to determin minimum own color
int myColor = myInitialColor;
for (IndexType jj = ia[node]; jj < ia[node + 1]; jj++)
{
IndexType j = ja[jj];
if ( color[j] == myColor && j != node)
{
//try next color
myColor++;
jj = ia[node];
}
}
//traverse all children
for (IndexType jj = ia[node]; jj < ia[node + 1]; jj++)
{
IndexType child = ja[jj];
if ( agg[child] != aggregate || //only set colors in own aggregate
!outgoing_edge( ia, ja, aa, node, jj, blocksize )) //only process outgoing edges
{
continue;
}
//unset -> set and push
if ( color[child] == -1)
{
color[child] = myColor + 1;
ring.push( child );
}
if ( color[child] >= myInitialColor )
{
color[child] = myColor + 1; //max( myColor+1, color[child] );
ring.push( child );
}
/*
//same color: reset and reset children, no push
if( color[child] == myColor )
{
color[child] = myColor+1;
for(IndexType kk = ia[child]; kk < ia[child+1]; kk++)
{
//same restrictions for setting colors apply here:
//stay in aggregate, only set outgoing edges, only set uncolored or same level
IndexType grandchild = ja[kk];
if( agg[grandchild] != aggregate || !outgoing_edge( ia, ja, aa, child, kk, blocksize ) )
continue;
if( color[grandchild] == -1 )
{
color[grandchild] = myColor+2;
ring.push( grandchild );
}
if( color[grandchild] == myColor+1 )
color[grandchild] = myColor+2;
}
}
*/
}
}
}
}
}
}
}
}//locally_downwind_kernels namepsace
// ---------------------------
// Methods
// ---------------------------
template<class T_Config>
LocallyDownwindColoringBase<T_Config>::LocallyDownwindColoringBase(AMG_Config &cfg, const std::string &cfg_scope) : MatrixColoring<T_Config>(cfg, cfg_scope)
{
this->m_num_colors = cfg.AMG_Config::getParameter<int>("num_colors", cfg_scope);
}
template<class TConfig>
void LocallyDownwindColoringBase<TConfig>::colorMatrix(Matrix<TConfig> &A)
{
//wait for colorMatrixUsingAggregates to be called
std::cout << "coloring denied" << std::endl;
return;
}
template<class TConfig>
void LocallyDownwindColoringBase<TConfig>::colorMatrixUsingAggregates(Matrix<TConfig> &A, IVector &R_row_offsets, IVector &R_col_indices, IVector &aggregates )
{
#define CPU_VERSION
#ifdef CPU_VERSION
IndexType numRows = A.get_num_rows();
IndexType nnz = A.get_num_nz();
int blockdim = A.get_block_dimx() * A.get_block_dimy();
//allocate memory on host
IndexType *ia = new IndexType[numRows + 1];
IndexType *ja = new IndexType[A.col_indices.size()];
ValueType *aa = new ValueType[nnz * A.get_block_dimx()*A.get_block_dimy()];
IndexType *ria = new IndexType[R_row_offsets.size()];
IndexType *rja = new IndexType[R_col_indices.size()];
IndexType *agg = new IndexType[aggregates.size()];
IndexType *color = new IndexType[numRows];
//copy data from device to host
cudaMemcpy(ia, A.row_offsets.raw(), (numRows + 1)*sizeof(IndexType), cudaMemcpyDeviceToHost );
cudaMemcpy(ja, A.col_indices.raw(), A.col_indices.size()*sizeof(IndexType), cudaMemcpyDeviceToHost );
cudaMemcpy(aa, A.values.raw(), nnz * blockdim * sizeof(ValueType), cudaMemcpyDeviceToHost );
cudaMemcpy(ria, R_row_offsets.raw(), R_row_offsets.size()*sizeof(IndexType), cudaMemcpyDeviceToHost );
cudaMemcpy(rja, R_col_indices.raw(), R_col_indices.size()*sizeof(IndexType), cudaMemcpyDeviceToHost );
cudaMemcpy(agg, aggregates.raw(), aggregates.size()*sizeof(IndexType), cudaMemcpyDeviceToHost );
for (IndexType i = 0; i < numRows; i++)
{
color[i] = -1;
}
//color aggregate by aggregate
for (IndexType aggregate = 0; aggregate < R_row_offsets.size() - 1; aggregate++)
{
std::queue<IndexType> q;
bool nodesLeft = true;
while ( nodesLeft )
{
//find uncolored node to start with (i.e. with minimum in degree)
IndexType min_in_degree = numRows;
IndexType next_node = -1;
nodesLeft = false;
for (IndexType node_index = ria[aggregate]; node_index < ria[aggregate + 1]; node_index++)
{
IndexType node = rja[node_index];
int in_degree = 0;
if ( color[node] == -1 )
{
nodesLeft = true;
for (IndexType ii = ia[node]; ii < ia[node + 1]; ii++)
{
if ( agg[ja[ii]] == aggregate && !locally_downwind_kernels::outgoing_edge( ia, ja, aa, node, ii, blockdim ) )
{
in_degree++;
}
}
if ( in_degree < min_in_degree )
{
min_in_degree = in_degree;
next_node = node;
}
}
}
if (!nodesLeft)
{
break;
}
//start modified BFS
color[next_node] = 0;
q.push( next_node );
while ( q.size() > 0 )
{
IndexType node = q.front();
q.pop();
int myInitialColor = color[node];
int myColor = myInitialColor;
//find valid color for this node
for (IndexType ii = ia[node]; ii < ia[node + 1]; ii++)
{
if ( color[ja[ii]] == myColor && ja[ii] != node && (agg[ja[ii]] != aggregate || !locally_downwind_kernels::outgoing_edge( ia, ja, aa, node, ii, blockdim )) )
{
myColor++;
ii = ia[node] - 1;
}
}
//set color
color[node] = myColor;
//update children
for (IndexType jj = ia[node]; jj < ia[node + 1]; jj++)
{
IndexType child = ja[jj];
if ( agg[child] != aggregate || //only set colors in own aggregate
!locally_downwind_kernels::outgoing_edge( ia, ja, aa, node, jj, blockdim ) ||//only process outgoing edges
child == node) // don't mess with yourself
{
continue;
}
//unset -> set and push
if ( color[child] >= myInitialColor )
{
color[child] = max( myColor + 1, color[child] );
q.push( child );
}
if ( color[child] == -1)
{
color[child] = myColor + 1;
q.push( child );
}
}
}
}
}
//copy back results
this->m_row_colors.resize(A.row_offsets.size() - 1, 0);
cudaMemcpy( this->m_row_colors.raw(), color, numRows * sizeof(IndexType), cudaMemcpyHostToDevice );
//free all the others
delete [] ia;
delete [] ja;
delete [] aa;
delete [] agg;
delete [] ria;
delete [] rja;
delete [] color;
#else
std::cout << "coloring with aggregate information" << std::endl;
ViewType oldView = A.currentView();
this->m_row_colors.resize(A.row_offsets.size() - 1, 0);
if (this->m_halo_coloring == SYNC_COLORS) { A.setView(ALL); }
else { A.setViewExterior(); }
if (this->m_coloring_level == 0)
{
FatalError("Calling coloring scheme but coloring level==0", AMGX_ERR_NOT_IMPLEMENTED);
}
else if (this->m_coloring_level == 1)
{
IndexType numRows = A.get_num_rows();
IndexType numAggregates = R_row_offsets.size() - 1;
int blocksize = A.get_block_dimx() * A.get_block_dimy();
int max_aggregate_size = 100;
const int threads_per_block = 64;
const int num_blocks = (numAggregates - 1) / threads_per_block + 1;
const int smem_size = max_aggregate_size * threads_per_block * sizeof(IndexType);
this->m_row_colors.resize( numRows );
thrust::fill( this->m_row_colors.begin(), this->m_row_colors.end(), -1 );
cudaCheckError();
std::cout << "start coloring kernel" << std::endl;
locally_downwind_kernels::traverse <<< num_blocks, threads_per_block, smem_size>>>( A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
R_row_offsets.raw(),
R_col_indices.raw(),
aggregates.raw(),
this->m_row_colors.raw(),
numAggregates,
max_aggregate_size,
blocksize);
cudaDeviceSynchronize();
cudaCheckError();
std::cout << "uncolored nodes: " << thrust::count( this->m_row_colors.begin(), this->m_row_colors.end(), -1 ) << std::endl;
}
else
{
FatalError("Locally Downwind coloring algorithm can only do one ring coloring", AMGX_ERR_NOT_IMPLEMENTED);
}
A.setView(oldView);
#endif
}
// Block version
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void LocallyDownwindColoring<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::colorMatrixOneRing(Matrix_d &A)
{
FatalError("This method is no longer needed", AMGX_ERR_NOT_IMPLEMENTED);
/*
// One thread per row
const int num_rows = A.get_num_rows();
IndexType *row_colors_ptr = this->m_row_colors.raw();
const int threads_per_block = 64;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) (num_rows-1)/threads_per_block + 1);
locally_downwind_kernels::colorRowsKernel<IndexType> <<<num_blocks,threads_per_block>>>(row_colors_ptr, this->m_num_colors, num_rows);
cudaCheckError();
*/
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void LocallyDownwindColoring<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::colorMatrixOneRing(Matrix_h &A)
{
FatalError("Haven't implemented locally downwind coloring for host", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
#define AMGX_CASE_LINE(CASE) template class LocallyDownwindColoringBase<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class LocallyDownwindColoring<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // end namespace amgx
|
the_stack
|
* \file
* cub::PersistentBlockHisto256 implements a stateful abstraction of CUDA thread blocks for histogramming multiple tiles as part of device-wide 256-bin histogram.
*/
#pragma once
#include <iterator>
#include "../../util_arch.cuh"
#include "../../block/block_load.cuh"
#include "../../block/block_histo_256.cuh"
#include "../../block/block_radix_sort.cuh"
#include "../../block/block_discontinuity.cuh"
#include "../../grid/grid_mapping.cuh"
#include "../../grid/grid_even_share.cuh"
#include "../../grid/grid_queue.cuh"
#include "../../util_vector.cuh"
#include "../../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Algorithmic variants
******************************************************************************/
/**
* \brief PersistentBlockHisto256Algorithm enumerates alternative algorithms for the parallel construction of 8b histograms.
*/
enum PersistentBlockHisto256Algorithm
{
/**
* \par Overview
* A two-kernel approach in which:
* -# Thread blocks in the first kernel aggregate their own privatized
* histograms using block-wide sorting (see BlockHisto256Algorithm::BLOCK_HISTO_256_SORT).
* -# A single thread block in the second kernel reduces them into the output histogram(s).
*
* \par Performance Considerations
* Delivers consistent throughput regardless of sample bin distribution.
*/
GRID_HISTO_256_SORT,
/**
* \par Overview
* A two-kernel approach in which:
* -# Thread blocks in the first kernel aggregate their own privatized
* histograms using shared-memory \p atomicAdd().
* -# A single thread block in the second kernel reduces them into the
* output histogram(s).
*
* \par Performance Considerations
* Performance is strongly tied to the hardware implementation of atomic
* addition, and may be significantly degraded for non uniformly-random
* input distributions where many concurrent updates are likely to be
* made to the same bin counter.
*/
GRID_HISTO_256_SHARED_ATOMIC,
/**
* \par Overview
* A single-kernel approach in which thread blocks update the output histogram(s) directly
* using global-memory \p atomicAdd().
*
* \par Performance Considerations
* Performance is strongly tied to the hardware implementation of atomic
* addition, and may be significantly degraded for non uniformly-random
* input distributions where many concurrent updates are likely to be
* made to the same bin counter.
*/
GRID_HISTO_256_GLOBAL_ATOMIC,
};
/******************************************************************************
* Tuning policy
******************************************************************************/
/**
* Tuning policy for PersistentBlockHisto256
*/
template <
int _BLOCK_THREADS,
int _ITEMS_PER_THREAD,
PersistentBlockHisto256Algorithm _GRID_ALGORITHM,
GridMappingStrategy _GRID_MAPPING,
int _SM_OCCUPANCY>
struct PersistentBlockHisto256Policy
{
enum
{
BLOCK_THREADS = _BLOCK_THREADS,
ITEMS_PER_THREAD = _ITEMS_PER_THREAD,
SM_OCCUPANCY = _SM_OCCUPANCY,
};
static const PersistentBlockHisto256Algorithm GRID_ALGORITHM = _GRID_ALGORITHM;
static const GridMappingStrategy GRID_MAPPING = _GRID_MAPPING;
};
/******************************************************************************
* PersistentBlockHisto256
******************************************************************************/
/**
* \brief implements a stateful abstraction of CUDA thread blocks for histogramming multiple tiles as part of device-wide 256-bin histogram.
*/
template <
typename PersistentBlockHisto256Policy, ///< Tuning policy
int CHANNELS, ///< Number of channels interleaved in the input data (may be greater than the number of active channels being histogrammed)
int ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed
typename InputIteratorRA, ///< The input iterator type (may be a simple pointer type). Must have a value type that is assignable to <tt>unsigned char</tt>
typename HistoCounter, ///< Integral type for counting sample occurrences per histogram bin
typename SizeT, ///< Integer type for offsets
PersistentBlockHisto256Algorithm GRID_ALGORITHM = PersistentBlockHisto256Policy::GRID_ALGORITHM>
struct PersistentBlockHisto256;
/**
* Specialized for GRID_HISTO_256_GLOBAL_ATOMIC
*/
template <
typename PersistentBlockHisto256Policy, ///< Tuning policy
int CHANNELS, ///< Number of channels interleaved in the input data (may be greater than the number of active channels being histogrammed)
int ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed
typename InputIteratorRA, ///< The input iterator type (may be a simple pointer type). Must have a value type that is assignable to <tt>unsigned char</tt>
typename HistoCounter, ///< Integral type for counting sample occurrences per histogram bin
typename SizeT> ///< Integer type for offsets
struct PersistentBlockHisto256<PersistentBlockHisto256Policy, CHANNELS, ACTIVE_CHANNELS, InputIteratorRA, HistoCounter, SizeT, GRID_HISTO_256_GLOBAL_ATOMIC>
{
//---------------------------------------------------------------------
// Types and constants
//---------------------------------------------------------------------
// Constants
enum
{
BLOCK_THREADS = PersistentBlockHisto256Policy::BLOCK_THREADS,
ITEMS_PER_THREAD = PersistentBlockHisto256Policy::ITEMS_PER_THREAD,
TILE_CHANNEL_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD,
TILE_ITEMS = TILE_CHANNEL_ITEMS * CHANNELS,
};
// Shared memory type required by this thread block
struct SmemStorage {};
//---------------------------------------------------------------------
// Per-thread fields
//---------------------------------------------------------------------
/// Reference to smem_storage
SmemStorage &smem_storage;
/// Reference to output histograms
HistoCounter* (&d_out_histograms)[ACTIVE_CHANNELS];
/// Input data to reduce
InputIteratorRA d_in;
//---------------------------------------------------------------------
// Interface
//---------------------------------------------------------------------
/**
* Constructor
*/
__device__ __forceinline__ PersistentBlockHisto256(
SmemStorage &smem_storage, ///< Reference to smem_storage
InputIteratorRA d_in, ///< Input data to reduce
HistoCounter* (&d_out_histograms)[ACTIVE_CHANNELS]) : ///< Reference to output histograms
smem_storage(smem_storage),
d_in(d_in),
d_out_histograms(d_out_histograms)
{}
/**
* The number of items processed per "tile"
*/
__device__ __forceinline__ int TileItems()
{
return TILE_ITEMS;
}
/**
* Process a single tile.
*/
__device__ __forceinline__ void ConsumeTile(
bool &sync_after,
SizeT block_offset,
int num_valid)
{
if (num_valid < TILE_ITEMS)
{
// Only a partially-full tile of samples to read and composite
int bounds = num_valid - (threadIdx.x * CHANNELS);
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
#pragma unroll
for (int CHANNEL = 0; CHANNEL < CHANNELS; ++CHANNEL)
{
if (((ACTIVE_CHANNELS == CHANNELS) || (CHANNEL < ACTIVE_CHANNELS)) && ((ITEM * BLOCK_THREADS * CHANNELS) + CHANNEL < bounds))
{
unsigned char item = d_in[block_offset + (ITEM * BLOCK_THREADS * CHANNELS) + (threadIdx.x * CHANNELS) + CHANNEL];
atomicAdd(d_out_histograms[CHANNEL] + item, 1);
}
}
}
}
else
{
// Full tile of samples to read and composite
unsigned char items[ITEMS_PER_THREAD][CHANNELS];
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
#pragma unroll
for (int CHANNEL = 0; CHANNEL < CHANNELS; ++CHANNEL)
{
if (CHANNEL < ACTIVE_CHANNELS)
{
items[ITEM][CHANNEL] = d_in[block_offset + (ITEM * BLOCK_THREADS * CHANNELS) + (threadIdx.x * CHANNELS) + CHANNEL];
}
}
}
__threadfence_block();
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
#pragma unroll
for (int CHANNEL = 0; CHANNEL < CHANNELS; ++CHANNEL)
{
if (CHANNEL < ACTIVE_CHANNELS)
{
atomicAdd(d_out_histograms[CHANNEL] + items[ITEM][CHANNEL], 1);
}
}
}
}
// No need to sync after processing this tile to ensure smem coherence
sync_after = false;
}
/**
* Finalize the computation.
*/
__device__ __forceinline__ void Finalize(
int dummy_result)
{}
};
/**
* Specialized for GRID_HISTO_256_SHARED_ATOMIC
*/
template <
typename PersistentBlockHisto256Policy, ///< Tuning policy
int CHANNELS, ///< Number of channels interleaved in the input data (may be greater than the number of active channels being histogrammed)
int ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed
typename InputIteratorRA, ///< The input iterator type (may be a simple pointer type). Must have a value type that is assignable to <tt>unsigned char</tt>
typename HistoCounter, ///< Integral type for counting sample occurrences per histogram bin
typename SizeT> ///< Integer type for offsets
struct PersistentBlockHisto256<PersistentBlockHisto256Policy, CHANNELS, ACTIVE_CHANNELS, InputIteratorRA, HistoCounter, SizeT, GRID_HISTO_256_SHARED_ATOMIC>
{
//---------------------------------------------------------------------
// Types and constants
//---------------------------------------------------------------------
// Constants
enum
{
BLOCK_THREADS = PersistentBlockHisto256Policy::BLOCK_THREADS,
ITEMS_PER_THREAD = PersistentBlockHisto256Policy::ITEMS_PER_THREAD,
TILE_CHANNEL_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD,
TILE_ITEMS = TILE_CHANNEL_ITEMS * CHANNELS,
};
// Shared memory type required by this thread block
struct SmemStorage
{
HistoCounter histograms[ACTIVE_CHANNELS][256];
};
//---------------------------------------------------------------------
// Per-thread fields
//---------------------------------------------------------------------
/// Reference to smem_storage
SmemStorage &smem_storage;
/// Reference to output histograms
HistoCounter* (&d_out_histograms)[ACTIVE_CHANNELS];
/// Input data to reduce
InputIteratorRA d_in;
//---------------------------------------------------------------------
// Interface
//---------------------------------------------------------------------
/**
* Constructor
*/
__device__ __forceinline__ PersistentBlockHisto256(
SmemStorage &smem_storage, ///< Reference to smem_storage
InputIteratorRA d_in, ///< Input data to reduce
HistoCounter* (&d_out_histograms)[ACTIVE_CHANNELS]) : ///< Reference to output histograms
smem_storage(smem_storage),
d_in(d_in),
d_out_histograms(d_out_histograms)
{
// Initialize histogram bin counts to zeros
#pragma unroll
for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL)
{
int histo_offset = 0;
#pragma unroll
for(; histo_offset + BLOCK_THREADS <= 256; histo_offset += BLOCK_THREADS)
{
smem_storage.histograms[CHANNEL][histo_offset + threadIdx.x] = 0;
}
// Finish up with guarded initialization if necessary
if ((histo_offset < BLOCK_THREADS) && (histo_offset + threadIdx.x < 256))
{
smem_storage.histograms[CHANNEL][histo_offset + threadIdx.x] = 0;
}
}
}
/**
* The number of items processed per "tile"
*/
__device__ __forceinline__ int TileItems()
{
return TILE_ITEMS;
}
/**
* Process a single tile.
*/
__device__ __forceinline__ void ConsumeTile(
bool &sync_after,
SizeT block_offset,
int num_valid)
{
if (num_valid < TILE_ITEMS)
{
// Only a partially-full tile of samples to read and composite
int bounds = num_valid - (threadIdx.x * CHANNELS);
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
#pragma unroll
for (int CHANNEL = 0; CHANNEL < CHANNELS; ++CHANNEL)
{
if (((ACTIVE_CHANNELS == CHANNELS) || (CHANNEL < ACTIVE_CHANNELS)) && ((ITEM * BLOCK_THREADS * CHANNELS) + CHANNEL < bounds))
{
unsigned char item = d_in[block_offset + (ITEM * BLOCK_THREADS * CHANNELS) + (threadIdx.x * CHANNELS) + CHANNEL];
atomicAdd(smem_storage.histograms[CHANNEL] + item, 1);
}
}
}
}
else
{
// Full tile of samples to read and composite
unsigned char items[ITEMS_PER_THREAD][CHANNELS];
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
#pragma unroll
for (int CHANNEL = 0; CHANNEL < CHANNELS; ++CHANNEL)
{
if (CHANNEL < ACTIVE_CHANNELS)
{
items[ITEM][CHANNEL] = d_in[block_offset + (ITEM * BLOCK_THREADS * CHANNELS) + (threadIdx.x * CHANNELS) + CHANNEL];
}
}
}
__threadfence_block();
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
#pragma unroll
for (int CHANNEL = 0; CHANNEL < CHANNELS; ++CHANNEL)
{
if (CHANNEL < ACTIVE_CHANNELS)
{
atomicAdd(smem_storage.histograms[CHANNEL] + items[ITEM][CHANNEL], 1);
}
}
}
}
// No need to sync after processing this tile to ensure smem coherence
sync_after = false;
}
/**
* Finalize the computation.
*/
__device__ __forceinline__ void Finalize(
int dummy_result)
{
// Barrier to ensure shared memory histograms are coherent
__syncthreads();
// Copy shared memory histograms to output
#pragma unroll
for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL)
{
int channel_offset = (blockIdx.x * 256);
int histo_offset = 0;
#pragma unroll
for(; histo_offset + BLOCK_THREADS <= 256; histo_offset += BLOCK_THREADS)
{
d_out_histograms[CHANNEL][channel_offset + histo_offset + threadIdx.x] = smem_storage.histograms[CHANNEL][histo_offset + threadIdx.x];
}
// Finish up with guarded initialization if necessary
if ((histo_offset < BLOCK_THREADS) && (histo_offset + threadIdx.x < 256))
{
d_out_histograms[CHANNEL][channel_offset + histo_offset + threadIdx.x] = smem_storage.histograms[CHANNEL][histo_offset + threadIdx.x];
}
}
}
};
/**
* Specialized for GRID_HISTO_256_SORT
*/
template <
typename PersistentBlockHisto256Policy, ///< Tuning policy
int CHANNELS, ///< Number of channels interleaved in the input data (may be greater than the number of active channels being histogrammed)
int ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed
typename InputIteratorRA, ///< The input iterator type (may be a simple pointer type). Must have a value type that is assignable to <tt>unsigned char</tt>
typename HistoCounter, ///< Integral type for counting sample occurrences per histogram bin
typename SizeT> ///< Integer type for offsets
struct PersistentBlockHisto256<PersistentBlockHisto256Policy, CHANNELS, ACTIVE_CHANNELS, InputIteratorRA, HistoCounter, SizeT, GRID_HISTO_256_SORT>
{
//---------------------------------------------------------------------
// Types and constants
//---------------------------------------------------------------------
// Constants
enum
{
BLOCK_THREADS = PersistentBlockHisto256Policy::BLOCK_THREADS,
ITEMS_PER_THREAD = PersistentBlockHisto256Policy::ITEMS_PER_THREAD,
TILE_CHANNEL_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD,
TILE_ITEMS = TILE_CHANNEL_ITEMS * CHANNELS,
STRIPED_COUNTERS_PER_THREAD = (256 + BLOCK_THREADS - 1) / BLOCK_THREADS,
};
// Parameterize BlockRadixSort type for our thread block
typedef BlockRadixSort<unsigned char, BLOCK_THREADS, ITEMS_PER_THREAD> BlockRadixSortT;
// Parameterize BlockDiscontinuity type for our thread block
typedef BlockDiscontinuity<unsigned char, BLOCK_THREADS> BlockDiscontinuityT;
// Shared memory type required by this thread block
union SmemStorage
{
// Storage for sorting bin values
typename BlockRadixSortT::SmemStorage sort_storage;
struct
{
// Storage for detecting discontinuities in the tile of sorted bin values
typename BlockDiscontinuityT::SmemStorage discont_storage;
// Storage for noting begin/end offsets of bin runs in the tile of sorted bin values
unsigned int run_begin[BLOCK_THREADS * STRIPED_COUNTERS_PER_THREAD];
unsigned int run_end[BLOCK_THREADS * STRIPED_COUNTERS_PER_THREAD];
};
};
// Discontinuity functor
struct DiscontinuityOp
{
// Reference to smem_storage
SmemStorage &smem_storage;
// Constructor
__device__ __forceinline__ DiscontinuityOp(SmemStorage &smem_storage) : smem_storage(smem_storage) {}
// Discontinuity predicate
__device__ __forceinline__ bool operator()(const unsigned char &a, const unsigned char &b, unsigned int b_index)
{
if (a != b)
{
// Note the begin/end offsets in shared storage
smem_storage.run_begin[b] = b_index;
smem_storage.run_end[a] = b_index;
return true;
}
else
{
return false;
}
}
};
//---------------------------------------------------------------------
// Per-thread fields
//---------------------------------------------------------------------
/// Reference to smem_storage
SmemStorage &smem_storage;
/// Histogram counters striped across threads
HistoCounter thread_counters[ACTIVE_CHANNELS][STRIPED_COUNTERS_PER_THREAD];
/// Reference to output histograms
HistoCounter* (&d_out_histograms)[ACTIVE_CHANNELS];
/// Input data to reduce
InputIteratorRA d_in;
//---------------------------------------------------------------------
// Interface
//---------------------------------------------------------------------
/**
* Constructor
*/
__device__ __forceinline__ PersistentBlockHisto256(
SmemStorage &smem_storage, ///< Reference to smem_storage
InputIteratorRA d_in, ///< Input data to reduce
HistoCounter* (&d_out_histograms)[ACTIVE_CHANNELS]) : ///< Reference to output histograms
smem_storage(smem_storage),
d_in(d_in),
d_out_histograms(d_out_histograms)
{
// Initialize histogram counters striped across threads
#pragma unroll
for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL)
{
#pragma unroll
for (int COUNTER = 0; COUNTER < STRIPED_COUNTERS_PER_THREAD; ++COUNTER)
{
thread_counters[CHANNEL][COUNTER] = 0;
}
}
}
/**
* The number of items processed per "tile"
*/
__device__ __forceinline__ int TileItems()
{
return TILE_ITEMS;
}
/**
* Composite a tile of input items
*/
__device__ __forceinline__ void Composite(
unsigned char (&items)[ITEMS_PER_THREAD], ///< Tile of samples
HistoCounter thread_counters[STRIPED_COUNTERS_PER_THREAD]) ///< Histogram counters striped across threads
{
// Sort bytes in blocked arrangement
BlockRadixSortT::SortBlocked(smem_storage.sort_storage, items);
__syncthreads();
// Initialize the shared memory's run_begin and run_end for each bin
#pragma unroll
for (int COUNTER = 0; COUNTER < STRIPED_COUNTERS_PER_THREAD; ++COUNTER)
{
smem_storage.run_begin[(COUNTER * BLOCK_THREADS) + threadIdx.x] = TILE_CHANNEL_ITEMS;
smem_storage.run_end[(COUNTER * BLOCK_THREADS) + threadIdx.x] = TILE_CHANNEL_ITEMS;
}
__syncthreads();
// Note the begin/end run offsets of bin runs in the sorted tile
int flags[ITEMS_PER_THREAD]; // unused
DiscontinuityOp flag_op(smem_storage);
BlockDiscontinuityT::Flag(smem_storage.discont_storage, items, flag_op, flags);
// Update begin for first item
if (threadIdx.x == 0) smem_storage.run_begin[items[0]] = 0;
__syncthreads();
// Composite into histogram
// Initialize the shared memory's run_begin and run_end for each bin
#pragma unroll
for (int COUNTER = 0; COUNTER < STRIPED_COUNTERS_PER_THREAD; ++COUNTER)
{
int bin = (COUNTER * BLOCK_THREADS) + threadIdx.x;
thread_counters[COUNTER] += smem_storage.run_end[bin] - smem_storage.run_begin[bin];
}
}
/**
* Process one channel within a tile.
*/
__device__ __forceinline__ void ConsumeTileChannel(
int channel,
SizeT block_offset,
int num_valid)
{
// Load items in striped fashion
if (num_valid < TILE_ITEMS)
{
// Only a partially-full tile of samples to read and composite
unsigned char items[ITEMS_PER_THREAD];
// Assign our tid as the bin for out-of-bounds items (to give an even distribution), and keep track of how oob items to subtract out later
int bounds = (num_valid - (threadIdx.x * CHANNELS));
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
items[ITEM] = ((ITEM * BLOCK_THREADS * CHANNELS) < bounds) ?
d_in[channel + block_offset + (ITEM * BLOCK_THREADS * CHANNELS) + (threadIdx.x * CHANNELS)] :
0;
}
// Composite our histogram data
Composite(items, thread_counters[channel]);
__syncthreads();
// Correct the overcounting in the zero-bin from invalid (out-of-bounds) items
if (threadIdx.x == 0)
{
int extra = (TILE_ITEMS - num_valid) / CHANNELS;
thread_counters[channel][0] -= extra;
}
}
else
{
// Full tile of samples to read and composite
unsigned char items[ITEMS_PER_THREAD];
// Unguarded loads
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++)
{
items[ITEM] = d_in[channel + block_offset + (ITEM * BLOCK_THREADS * CHANNELS) + (threadIdx.x * CHANNELS)];
}
// Composite our histogram data
Composite(items, thread_counters[channel]);
}
}
/**
* Template iteration over channels (to silence not-unrolled warnings for SM10-13). Inductive step.
*/
template <int CHANNEL, int END>
struct IterateChannels
{
/**
* Process one channel within a tile.
*/
static __device__ __forceinline__ void ConsumeTileChannel(
PersistentBlockHisto256 *persistent_block_histo,
SizeT block_offset,
int num_valid)
{
__syncthreads();
persistent_block_histo->ConsumeTileChannel(CHANNEL, block_offset, num_valid);
IterateChannels<CHANNEL + 1, END>::ConsumeTileChannel(persistent_block_histo, block_offset, num_valid);
}
};
/**
* Template iteration over channels (to silence not-unrolled warnings for SM10-13). Base step.
*/
template <int END>
struct IterateChannels<END, END>
{
static __device__ __forceinline__ void ConsumeTileChannel(PersistentBlockHisto256 *persistent_block_histo, SizeT block_offset, int num_valid) {}
};
/**
* Process a single tile.
*
* We take several passes through the tile in this variant, extracting the samples for one channel at a time
*/
__device__ __forceinline__ void ConsumeTile(
bool &sync_after,
SizeT block_offset,
int num_valid)
{
// First channel
ConsumeTileChannel(0, block_offset, num_valid);
// Iterate through remaining channels
IterateChannels<1, ACTIVE_CHANNELS>::ConsumeTileChannel(this, block_offset, num_valid);
// Need to sync after processing this tile to ensure smem coherence
sync_after = true;
}
/**
* Finalize the computation.
*/
__device__ __forceinline__ void Finalize(
int dummy_result)
{
// Copy counters striped across threads into the histogram output
#pragma unroll
for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL)
{
int channel_offset = (blockIdx.x * 256);
#pragma unroll
for (int COUNTER = 0; COUNTER < STRIPED_COUNTERS_PER_THREAD; ++COUNTER)
{
int bin = (COUNTER * BLOCK_THREADS) + threadIdx.x;
if ((STRIPED_COUNTERS_PER_THREAD * BLOCK_THREADS == 256) || (bin < 256))
{
d_out_histograms[CHANNEL][channel_offset + bin] = thread_counters[CHANNEL][COUNTER];
}
}
}
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
|
the_stack
|
#include "EventFilter/HcalRawToDigi/plugins/DecodeGPU.h"
#include <cooperative_groups.h>
using namespace cooperative_groups;
namespace hcal {
namespace raw {
__forceinline__ __device__ char const* get_subdet_str(DetId const& did) {
switch (did.subdetId()) {
case HcalEmpty:
return "HcalEmpty";
break;
case HcalBarrel:
return "HcalBarrel";
break;
case HcalEndcap:
return "HcalEndcap";
break;
case HcalOuter:
return "HcalOuter";
break;
case HcalForward:
return "HcalForward";
break;
case HcalTriggerTower:
return "HcalTriggerTower";
break;
case HcalOther:
return "HcalOther";
break;
default:
return "Unknown";
break;
}
return "Unknown";
}
__forceinline__ __device__ bool is_channel_header_word(uint16_t const* ptr) {
uint8_t bit = (*ptr >> 15) & 0x1;
return bit == 1;
}
template <typename T>
constexpr bool is_power_of_two(T x) {
return (x != 0) && ((x & (x - 1)) == 0);
}
template <int NTHREADS>
__global__ void kernel_rawdecode_test(unsigned char const* data,
uint32_t const* offsets,
int const* feds,
uint32_t const* eid2did,
uint32_t const* eid2tid,
uint16_t* digisF01HE,
uint32_t* idsF01HE,
uint16_t* digisF5HB,
uint32_t* idsF5HB,
uint8_t* npresamplesF5HB,
uint16_t* digisF3HB,
uint32_t* idsF3HB,
uint32_t* pChannelsCounters,
uint32_t const nsamplesF01HE,
uint32_t const nsamplesF5HB,
uint32_t const nsamplesF3HB,
uint32_t const nBytesTotal) {
// in order to properly use cooperative groups
static_assert(is_power_of_two(NTHREADS) == true && NTHREADS <= 32);
thread_block_tile<NTHREADS> thread_group = tiled_partition<NTHREADS>(this_thread_block());
auto const iamc = threadIdx.x / NTHREADS;
auto const ifed = blockIdx.x;
auto const offset = offsets[ifed];
#ifdef HCAL_RAWDECODE_GPUDEBUG_CG
if (ifed > 0 || iamc > 0)
return;
printf("threadIdx.x = %d rank = %d iamc = %d\n", threadIdx.x, thread_group.thread_rank(), iamc);
#endif
#ifdef HCAL_RAWDECODE_GPUDEBUG
auto const fed = feds[ifed];
auto const size = ifed == gridDim.x - 1 ? nBytesTotal - offset : offsets[ifed + 1] - offset;
printf("ifed = %d fed = %d offset = %u size = %u\n", ifed, fed, offset, size);
#endif
// offset to the right raw buffer
uint64_t const* buffer = reinterpret_cast<uint64_t const*>(data + offset);
#ifdef HCAL_RAWDECODE_GPUDEBUG
//
// fed header
//
auto const fed_header = buffer[0];
uint32_t const fed_id = (fed_header >> 8) & 0xfff;
uint32_t const bx = (fed_header >> 20) & 0xfff;
uint32_t const lv1 = (fed_header >> 32) & 0xffffff;
uint8_t const trigger_type = (fed_header >> 56) & 0xf;
uint8_t const bid_fed_header = (fed_header >> 60) & 0xf;
printf("fed = %d fed_id = %u bx = %u lv1 = %u trigger_type = %u bid = %u\n",
fed,
fed_id,
bx,
lv1,
trigger_type,
bid_fed_header);
#endif
// amc 13 header
auto const amc13word = buffer[1];
uint8_t const namc = (amc13word >> 52) & 0xf;
if (iamc >= namc)
return;
#ifdef HCAL_RAWDECODE_GPUDEBUG
uint8_t const amc13version = (amc13word >> 60) & 0xf;
uint32_t const amc13OrbitNumber = (amc13word >> 4) & 0xffffffffu;
printf("fed = %d namc = %u amc13version = %u amc13OrbitNumber = %u\n", fed, namc, amc13version, amc13OrbitNumber);
#endif
// compute the offset int to the right buffer
uint32_t amcoffset = 0;
for (uint8_t ii = 0u; ii < iamc; ii++) {
auto const word = buffer[2 + ii];
int const amcSize = (word >> 32) & 0xffffff;
amcoffset += amcSize;
}
auto const word = buffer[2 + iamc];
int const amcSize = (word >> 32) & 0xffffff;
#ifdef HCAL_RAWDECODE_GPUDEBUG
uint16_t const amcid = word & 0xffff;
int const slot = (word >> 16) & 0xf;
int const amcBlockNumber = (word >> 20) & 0xff;
printf("fed = %d amcid = %u slot = %d amcBlockNumber = %d\n", fed, amcid, slot, amcBlockNumber);
bool const amcmore = ((word >> 61) & 0x1) != 0;
bool const amcSegmented = ((word >> 60) & 0x1) != 0;
bool const amcLengthOk = ((word >> 62) & 0x1) != 0;
bool const amcCROk = ((word >> 56) & 0x1) != 0;
bool const amcDataPresent = ((word >> 58) & 0x1) != 0;
bool const amcDataValid = ((word >> 56) & 0x1) != 0;
bool const amcEnabled = ((word >> 59) & 0x1) != 0;
printf(
"fed = %d amcmore = %d amcSegmented = %d, amcLengthOk = %d amcCROk = %d\n>> amcDataPresent = %d amcDataValid "
"= %d amcEnabled = %d\n",
fed,
static_cast<int>(amcmore),
static_cast<int>(amcSegmented),
static_cast<int>(amcLengthOk),
static_cast<int>(amcCROk),
static_cast<int>(amcDataPresent),
static_cast<int>(amcDataValid),
static_cast<int>(amcEnabled));
#endif
// get to the payload
auto const* payload64 = buffer + 2 + namc + amcoffset;
#ifdef HCAL_RAWDECODE_GPUDEBUG
// uhtr header v1 1st 64 bits
auto const payload64_w0 = payload64[0];
#endif
// uhtr n bytes comes from amcSize, according to the cpu version!
uint32_t const data_length64 = amcSize;
#ifdef HCAL_RAWDECODE_GPUDEBUG
uint16_t bcn = (payload64_w0 >> 20) & 0xfff;
uint32_t evn = (payload64_w0 >> 32) & 0xffffff;
printf("fed = %d data_length64 = %u bcn = %u evn = %u\n", fed, data_length64, bcn, evn);
#endif
// uhtr header v1 2nd 64 bits
auto const payload64_w1 = payload64[1];
uint8_t const uhtrcrate = payload64_w1 & 0xff;
uint8_t const uhtrslot = (payload64_w1 >> 8) & 0xf;
uint8_t const presamples = (payload64_w1 >> 12) & 0xf;
uint8_t const payloadFormat = (payload64_w1 >> 44) & 0xf;
#ifdef HCAL_RAWDECODE_GPUDEBUG
uint16_t const orbitN = (payload64_w1 >> 16) & 0xffff;
uint8_t const firmFlavor = (payload64_w1 >> 32) & 0xff;
uint8_t const eventType = (payload64_w1 >> 40) & 0xf;
printf(
"fed = %d crate = %u slot = %u presamples = %u\n>>> orbitN = %u firmFlavor = %u eventType = %u payloadFormat "
"= %u\n",
fed,
uhtrcrate,
uhtrslot,
presamples,
orbitN,
firmFlavor,
eventType,
payloadFormat);
#endif
// this should be filtering out uMNio...
if (payloadFormat != 1)
return;
// skip uhtr header words
auto const channelDataSize = data_length64 - 2; // 2 uhtr header v1 words
auto const* channelDataBuffer64Start = payload64 + 2; // 2 uhtr header v2 wds
auto const* ptr = reinterpret_cast<uint16_t const*>(channelDataBuffer64Start);
auto const* end = ptr + sizeof(uint64_t) / sizeof(uint16_t) * (channelDataSize - 1);
auto const t_rank = thread_group.thread_rank();
// iterate through the channel data
while (ptr != end) {
// this is the starting point for this thread group for this iteration
// with respect to this pointer every thread will move forward afterwards
auto const* const start_ptr = ptr;
#ifdef HCAL_RAWDECODE_GPUDEBUG_CG
thread_group.sync();
#endif
// skip to the header word of the right channel for this thread
int counter = 0;
while (counter < thread_group.thread_rank()) {
// just a check for threads that land beyond the end
if (ptr == end)
break;
// move ptr one forward past header
if (is_channel_header_word(ptr))
++ptr;
else {
// go to the next channel and do not consider this guy as a channel
while (ptr != end)
if (!is_channel_header_word(ptr))
++ptr;
else
break;
continue;
}
// skip
while (ptr != end)
if (!is_channel_header_word(ptr))
++ptr;
else
break;
counter++;
}
#ifdef HCAL_RAWDECODE_GPUDEBUG_CG
thread_group.sync();
printf("ptr - start_ptr = %d counter = %d rank = %d\n", static_cast<int>(ptr - start_ptr), counter, t_rank);
#endif
// when the end is near, channels will land outside of the [start_ptr, end) region
if (ptr != end) {
// for all of the flavors, these 2 guys have the same bit layout
uint8_t const flavor = (ptr[0] >> 12) & 0x7;
uint8_t const channelid = ptr[0] & 0xff;
auto const* const new_channel_start = ptr;
// flavor dependent stuff
switch (flavor) {
case 0:
case 1: {
// treat eid and did
uint8_t fiber = (channelid >> 3) & 0x1f;
uint8_t fchannel = channelid & 0x7;
HcalElectronicsId eid{uhtrcrate, uhtrslot, fiber, fchannel, false};
auto const did = HcalDetId{eid2did[eid.linearIndex()]};
#ifdef HCAL_RAWDECODE_GPUDEBUG
printf("erawId = %u linearIndex = %u drawid = %u subdet = %s\n",
eid.rawId(),
eid.linearIndex(),
did.rawId(),
get_subdet_str(did));
printf("flavor = %u crate = %u slot = %u channelid = %u fiber = %u fchannel = %u\n",
flavor,
uhtrcrate,
uhtrslot,
channelid,
fiber,
fchannel);
#endif
// remove digis not for HE
if (did.subdetId() != HcalEndcap)
break;
// count words
auto const* channel_header_word = ptr++;
while (!is_channel_header_word(ptr) && ptr != end)
++ptr;
auto const* channel_end = ptr; // set ptr
uint32_t const nwords = channel_end - channel_header_word;
// filter out this digi if nwords does not equal expected
auto const expected_words = compute_stride<Flavor1>(nsamplesF01HE);
if (nwords != expected_words)
break;
// inc the number of digis of this type
auto const pos = atomicAdd(&pChannelsCounters[OutputF01HE], 1);
#ifdef HCAL_RAWDECODE_GPUDEBUG_CG
printf("rank = %d pos = %d\n", thread_group.thread_rank(), pos);
#endif
// store to global mem words for this digi
idsF01HE[pos] = did.rawId();
for (uint32_t iword = 0; iword < expected_words; iword++)
digisF01HE[pos * expected_words + iword] = channel_header_word[iword];
#ifdef HCAL_RAWDECODE_GPUDEBUG
printf("nwords = %u\n", nwords);
#endif
break;
}
case 3: {
// treat eid and did
uint8_t fiber = (channelid >> 3) & 0x1f;
uint8_t fchannel = channelid & 0x7;
HcalElectronicsId eid{uhtrcrate, uhtrslot, fiber, fchannel, false};
auto const did = HcalDetId{eid2did[eid.linearIndex()]};
#ifdef HCAL_RAWDECODE_GPUDEBUG
printf("erawId = %u linearIndex = %u drawid = %u subdet = %s\n",
eid.rawId(),
eid.linearIndex(),
did.rawId(),
get_subdet_str(did));
printf("flavor = %u crate = %u slot = %u channelid = %u fiber = %u fchannel = %u\n",
flavor,
uhtrcrate,
uhtrslot,
channelid,
fiber,
fchannel);
#endif
// remove digis not for HE
if (did.subdetId() != HcalBarrel)
break;
// count words
auto const* channel_header_word = ptr++;
while (!is_channel_header_word(ptr) && ptr != end)
++ptr;
auto const* channel_end = ptr; // set ptr
uint32_t const nwords = channel_end - channel_header_word;
// filter out this digi if nwords does not equal expected
auto const expected_words = compute_stride<Flavor3>(nsamplesF3HB);
if (nwords != expected_words)
break;
// inc the number of digis of this type
auto const pos = atomicAdd(&pChannelsCounters[OutputF3HB], 1);
// store to global mem words for this digi
idsF3HB[pos] = did.rawId();
for (uint32_t iword = 0; iword < expected_words; iword++)
digisF3HB[pos * expected_words + iword] = channel_header_word[iword];
#ifdef HCAL_RAWDECODE_GPUDEBUG
printf("nwords = %u\n", nwords);
#endif
break;
}
case 2: {
uint8_t fiber = (channelid >> 3) & 0x1f;
uint8_t fchannel = channelid & 0x7;
HcalElectronicsId eid{uhtrcrate, uhtrslot, fiber, fchannel, false};
auto const did = DetId{eid2did[eid.linearIndex()]};
#ifdef HCAL_RAWDECODE_GPUDEBUG
printf("erawId = %u linearIndex = %u drawid = %u subdet = %s\n",
eid.rawId(),
eid.linearIndex(),
did.rawId(),
get_subdet_str(did));
printf("flavor = %u crate = %u slot = %u channelid = %u fiber = %u fchannel = %u\n",
flavor,
uhtrcrate,
uhtrslot,
channelid,
fiber,
fchannel);
#endif
break;
}
case 4: {
uint8_t link = (channelid >> 4) & 0xf;
uint8_t tower = channelid & 0xf;
HcalElectronicsId eid{uhtrcrate, uhtrslot, link, tower, true};
auto const did = DetId{eid2tid[eid.linearIndex()]};
#ifdef HCAL_RAWDECODE_GPUDEBUG
printf("erawId = %u linearIndex = %u drawid = %u subdet = %s\n",
eid.rawId(),
eid.linearIndex(),
did.rawId(),
get_subdet_str(did));
printf("flavor = %u crate = %u slot = %u channelid = %u link = %u tower = %u\n",
flavor,
uhtrcrate,
uhtrslot,
channelid,
link,
tower);
#endif
break;
}
case 5: {
uint8_t fiber = (channelid >> 2) & 0x3f;
uint8_t fchannel = channelid & 0x3;
HcalElectronicsId eid{uhtrcrate, uhtrslot, fiber, fchannel, false};
auto const did = HcalDetId{eid2did[eid.linearIndex()]};
#ifdef HCAL_RAWDECODE_GPUDEBUG
printf("erawId = %u linearIndex = %u drawid = %u subdet = %s\n",
eid.rawId(),
eid.linearIndex(),
did.rawId(),
get_subdet_str(did));
printf("flavor = %u crate = %u slot = %u channelid = %u fiber = %u fchannel = %u\n",
flavor,
uhtrcrate,
uhtrslot,
channelid,
fiber,
fchannel);
#endif
// remove digis not for HB
if (did.subdetId() != HcalBarrel)
break;
// count words
auto const* channel_header_word = ptr++;
while (!is_channel_header_word(ptr) && ptr != end)
++ptr;
auto const* channel_end = ptr; // set ptr
uint32_t const nwords = channel_end - channel_header_word;
// filter out this digi if nwords does not equal expected
auto const expected_words = compute_stride<Flavor5>(nsamplesF5HB);
if (nwords != expected_words)
break;
// inc the number of digis of this type
auto const pos = atomicAdd(&pChannelsCounters[OutputF5HB], 1);
#ifdef HCAL_RAWDECODE_GPUDEBUG_CG
printf("rank = %d pos = %d\n", thread_group.thread_rank(), pos);
#endif
// store to global mem words for this digi
idsF5HB[pos] = did.rawId();
npresamplesF5HB[pos] = presamples;
for (uint32_t iword = 0; iword < expected_words; iword++)
digisF5HB[pos * expected_words + iword] = channel_header_word[iword];
break;
}
case 7: {
uint8_t const fiber = (channelid >> 2) & 0x3f;
uint8_t const fchannel = channelid & 0x3;
HcalElectronicsId eid{uhtrcrate, uhtrslot, fiber, fchannel, false};
auto const did = DetId{eid2did[eid.linearIndex()]};
/* uncomment to check the linear index validity
if (eid.rawId() >= HcalElectronicsId::maxLinearIndex) {
#ifdef HCAL_RAWDECODE_GPUDEBUG
printf("*** rawid = %u has no known det id***\n", eid.rawId());
#endif
break;
}
*/
#ifdef HCAL_RAWDECODE_GPUDEBUG
printf("erawId = %u linearIndex = %u drawid = %u\n", eid.rawId(), eid.linearIndex(), did.rawId());
printf("flavor = %u crate = %u slot = %u channelid = %u fiber = %u fchannel = %u\n",
flavor,
uhtrcrate,
uhtrslot,
channelid,
fiber,
fchannel);
#endif
break;
}
default:
#ifdef HCAL_RAWDECODE_GPUDEBUG
printf("flavor = %u crate = %u slot = %u channelid = %u\n", flavor, uhtrcrate, uhtrslot, channelid);
#endif
;
}
// skip to the next word in case
// 1) current channel was not treated
// 2) we are in the middle of the digi words and not at the end
while (new_channel_start == ptr || !is_channel_header_word(ptr) && ptr != end)
++ptr;
}
// thread with rank 31 of the group will have the ptr pointing to the
// header word of the next channel or the end
int const offset_to_shuffle = ptr - start_ptr;
// always receive from the last guy in the group
auto const offset_for_rank31 = thread_group.shfl(offset_to_shuffle, NTHREADS - 1);
#ifdef HCAL_RAWDECODE_GPUDEBUG_CG
printf("rank = %d offset_to_shuffle = %d offset_for_rank32 = %d\n",
thread_group.thread_rank(),
offset_to_shuffle,
offset_for_rank31);
#endif
// update the ptr for all threads of this group
// NOTE: relative to the start_ptr that is the same for all threads of
// this group
ptr = start_ptr + offset_for_rank31;
}
}
void entryPoint(InputDataCPU const& inputCPU,
InputDataGPU& inputGPU,
OutputDataGPU& outputGPU,
ScratchDataGPU& scratchGPU,
OutputDataCPU& outputCPU,
ConditionsProducts const& conditions,
ConfigurationParameters const& config,
cudaStream_t cudaStream,
uint32_t const nfedsWithData,
uint32_t const nbytesTotal) {
// transfer
cudaCheck(cudaMemcpyAsync(inputGPU.data.get(),
inputCPU.data.get(),
nbytesTotal * sizeof(unsigned char),
cudaMemcpyHostToDevice,
cudaStream));
cudaCheck(cudaMemcpyAsync(inputGPU.offsets.get(),
inputCPU.offsets.get(),
nfedsWithData * sizeof(uint32_t),
cudaMemcpyHostToDevice,
cudaStream));
cudaCheck(
cudaMemsetAsync(scratchGPU.pChannelsCounters.get(), 0, sizeof(uint32_t) * numOutputCollections, cudaStream));
cudaCheck(cudaMemcpyAsync(
inputGPU.feds.get(), inputCPU.feds.get(), nfedsWithData * sizeof(int), cudaMemcpyHostToDevice, cudaStream));
// 12 is the max number of modules per crate
kernel_rawdecode_test<32><<<nfedsWithData, 12 * 32, 0, cudaStream>>>(inputGPU.data.get(),
inputGPU.offsets.get(),
inputGPU.feds.get(),
conditions.eMappingProduct.eid2did,
conditions.eMappingProduct.eid2tid,
outputGPU.digisF01HE.data.get(),
outputGPU.digisF01HE.ids.get(),
outputGPU.digisF5HB.data.get(),
outputGPU.digisF5HB.ids.get(),
outputGPU.digisF5HB.npresamples.get(),
outputGPU.digisF3HB.data.get(),
outputGPU.digisF3HB.ids.get(),
scratchGPU.pChannelsCounters.get(),
config.nsamplesF01HE,
config.nsamplesF5HB,
config.nsamplesF3HB,
nbytesTotal);
cudaCheck(cudaGetLastError());
cudaCheck(cudaMemcpyAsync(outputCPU.nchannels.get(),
scratchGPU.pChannelsCounters.get(),
sizeof(uint32_t) * numOutputCollections,
cudaMemcpyDeviceToHost,
cudaStream));
}
} // namespace raw
} // namespace hcal
|
the_stack
|
* Copyright (c) 2015 by Contributors
* \file pad.cu
* \brief
* \author Sebastian Bodenstein
*/
#include <algorithm>
#include "./pad-inl.h"
#include "../common/cuda/utils.h"
namespace mshadow {
namespace cuda {
////////////////////////////////////////////////////////////////////////////////
// Special Case: 2d image (so only pad width + height)
// Case 1: Replication Padding
// single_image_2d_edge adapted from Torch
// https://github.com/torch/cunn/blob/master/lib/THCUNN/SpatialReplicationPadding.cu
template <int n_bits, typename DType>
__global__ void image_2d_pad_edge_kernel(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> src,
const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= dst.size(2) * dst.size(3)) {
return;
}
int outputPointX = outputPointId % dst.size(3);
int outputPointY = outputPointId / dst.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX =
min(max(padL, outputPointX), static_cast<int>(src.size(3)) + padL - 1) - oStartX + iStartX;
int inputPointY =
min(max(padT, outputPointY), static_cast<int>(src.size(2)) + padT - 1) - oStartY + iStartY;
DType valueToCopy = src[batch][plane][inputPointY][inputPointX];
dst[batch][plane][outputPointY][outputPointX] = valueToCopy;
}
template <typename DType>
inline void image_pad_edge(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> &src,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
image_2d_pad_edge_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(dst, src,
padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_edge_kernel);
}
template <int n_bits, typename DType>
__global__ void image_2d_pad_edge_grad_kernel(
Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out,
const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3)) {
return;
}
int outputPointX = outputPointId % grad_out.size(3);
int outputPointY = outputPointId / grad_out.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = min(max(padL, outputPointX), static_cast<int>(grad_in.size(3)) + padL - 1) -
oStartX + iStartX;
int inputPointY = min(max(padT, outputPointY), static_cast<int>(grad_in.size(2)) + padT - 1) -
oStartY + iStartY;
DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy);
}
template <typename DType>
inline void image_pad_edge_grad(Tensor<gpu, 4, DType> grad_in,
const Tensor<gpu, 4, DType> &grad_out,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (grad_out.size(2) * grad_out.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(grad_out.stream_);
image_2d_pad_edge_grad_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
grad_in, grad_out, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_edge_grad_kernel);
}
// Case 2: Constant Padding
template <int n_bits, typename DType>
__global__ void image_2d_pad_constant_kernel(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> src,
const int padT, const int padL,
const DType constant) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
if (outputPointId >= dst.size(2) * dst.size(3)) {
return;
}
// cast sizes to int to use in min/max
int Ny = src.size(2);
int Nx = src.size(3);
int plane = blockIdx.y;
int batch = blockIdx.z;
int outputPointX = outputPointId % dst.size(3);
int outputPointY = outputPointId / dst.size(3);
int checkT = max(0, outputPointY - padT + 1);
int checkB = max(0, padT + Ny - outputPointY);
int checkL = max(0, outputPointX - padL + 1);
int checkR = max(0, padL + Nx - outputPointX);
int inputPointX = min(max(outputPointX - padL, 0), Nx - 1);
int inputPointY = min(max(outputPointY - padT, 0), Ny - 1);
// 1 if need padding, 0 if not
int need_pad = !(checkT * checkB * checkL * checkR);
DType valueToCopy = src[batch][plane][inputPointY][inputPointX];
dst[batch][plane][outputPointY][outputPointX] =
valueToCopy * (!need_pad) + need_pad * constant;
}
template <typename DType>
inline void image_pad_constant(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> &src,
const mxnet::TShape &pad, const DType constant) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
image_2d_pad_constant_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
dst, src, padT, padL, constant);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_constant_kernel);
}
template <int n_bits, typename DType>
__global__ void image_2d_pad_constant_grad_kernel(
Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out,
const int padT, const int padL) {
int inPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
int pixel_num = grad_in.size(2) * grad_in.size(3);
if (inPointId >= pixel_num) {
return;
}
int inPointX = inPointId % grad_in.size(3);
int inPointY = inPointId / grad_in.size(3);
int outPointX = inPointX + padL;
int outPointY = inPointY + padT;
grad_in[batch][plane][inPointY][inPointX] =
grad_out[batch][plane][outPointY][outPointX];
}
template <typename DType>
inline void image_pad_constant_grad(Tensor<gpu, 4, DType> grad_in,
const Tensor<gpu, 4, DType> &grad_out,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (grad_in.size(2) * grad_in.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_in.size(1), grad_in.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(grad_in.stream_);
image_2d_pad_constant_grad_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
grad_in, grad_out, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_constant_grad_kernel);
}
// Case 3: Reflection Padding
// adapted from Torch
// https://github.com/torch/cunn/blob/master/lib/THCUNN/SpatialReflectionPadding.cu
template <int n_bits, typename DType>
__global__ void image_2d_pad_reflect_kernel(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> src,
const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= dst.size(2) * dst.size(3)) {
return;
}
int outputPointX = outputPointId % dst.size(3);
int outputPointY = outputPointId / dst.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = __sad(outputPointX, padL, 0)
- __sad(outputPointX, src.size(3) + padL - 1, 0)
- outputPointX
+ 2 * padL + src.size(3) - 1
- oStartX + iStartX;
int inputPointY = __sad(outputPointY, padT, 0)
- __sad(outputPointY, src.size(2) + padT - 1, 0)
- outputPointY
+ 2 * padT + src.size(2) - 1
- oStartY + iStartY;
DType valueToCopy = src[batch][plane][inputPointY][inputPointX];
dst[batch][plane][outputPointY][outputPointX] = valueToCopy;
}
template <typename DType>
inline void image_pad_reflect(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> &src,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
image_2d_pad_reflect_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(dst, src,
padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_reflect_kernel);
}
template <int n_bits, typename DType>
__global__ void image_2d_pad_reflect_grad_kernel(
Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out,
const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3)) {
return;
}
int outputPointX = outputPointId % grad_out.size(3);
int outputPointY = outputPointId / grad_out.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = __sad(outputPointX, padL, 0)
- __sad(outputPointX, grad_in.size(3) + padL - 1, 0)
- outputPointX
+ 2 * padL + grad_in.size(3) - 1
- oStartX + iStartX;
int inputPointY = __sad(outputPointY, padT, 0)
- __sad(outputPointY, grad_in.size(2) + padT - 1, 0)
- outputPointY
+ 2 * padT + grad_in.size(2) - 1
- oStartY + iStartY;
DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy);
}
template <typename DType>
inline void image_pad_reflect_grad(Tensor<gpu, 4, DType> grad_in,
const Tensor<gpu, 4, DType> &grad_out,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (grad_out.size(2) * grad_out.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(grad_out.stream_);
image_2d_pad_reflect_grad_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
grad_in, grad_out, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_reflect_grad_kernel);
}
////////////////////////////////////////////////////////////////////////////////
// Special Case: 3d image (pad depth + width + height)
// Case 1: Replication Padding
// single_image_3_edge adapted from Torch
// https://github.com/torch/cunn/blob/master/lib/THCUNN/VolumetricReplicationPadding.cu
template <int n_bits, typename DType>
__global__ void image_3d_pad_edge_kernel(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> src,
const int padF, const int padT,
const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) {
return;
}
int outputPointX = outputPointId % dst.size(4);
int outputPointY = (outputPointId / dst.size(4)) % dst.size(3);
int outputPointZ = outputPointId / (dst.size(3) * dst.size(4));
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int iStartZ = max(0, -padF);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int oStartZ = max(0, padF);
int inputPointX =
min(max(padL, outputPointX), static_cast<int>(src.size(4)) + padL - 1) - oStartX + iStartX;
int inputPointY =
min(max(padT, outputPointY), static_cast<int>(src.size(3)) + padT - 1) - oStartY + iStartY;
int inputPointZ =
min(max(padF, outputPointZ), static_cast<int>(src.size(2)) + padF - 1) - oStartZ + iStartZ;
DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX];
dst[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy;
}
template <typename DType>
inline void image_pad_edge(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> &src,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
image_3d_pad_edge_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
dst, src, padF, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_edge_kernel);
}
template <int n_bits, typename DType>
__global__ void image_3d_pad_edge_grad_kernel(
Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out,
const int padF, const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3) * grad_out.size(4)) {
return;
}
int outputPointX = outputPointId % grad_out.size(4);
int outputPointY = (outputPointId / grad_out.size(4)) % grad_out.size(3);
int outputPointZ = outputPointId / (grad_out.size(3) * grad_out.size(4));
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int iStartZ = max(0, -padF);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int oStartZ = max(0, padF);
int inputPointX = min(max(padL, outputPointX), static_cast<int>(grad_in.size(4)) + padL - 1) -
oStartX + iStartX;
int inputPointY = min(max(padT, outputPointY), static_cast<int>(grad_in.size(3)) + padT - 1) -
oStartY + iStartY;
int inputPointZ = min(max(padF, outputPointZ), static_cast<int>(grad_in.size(2)) + padF - 1) -
oStartZ + iStartZ;
DType valueToCopy =
grad_out[batch][plane][outputPointZ][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointZ][inputPointY][inputPointX],
valueToCopy);
}
template <typename DType>
inline void image_pad_edge_grad(Tensor<gpu, 5, DType> grad_in,
const Tensor<gpu, 5, DType> &grad_out,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize =
(grad_out.size(2) * grad_out.size(3) * grad_out.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(grad_out.stream_);
image_3d_pad_edge_grad_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
grad_in, grad_out, padF, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_edge_grad_kernel);
}
// Case 2: Constant Padding
template <int n_bits, typename DType>
__global__ void image_3d_pad_constant_kernel(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> src,
const int padF, const int padT,
const int padL,
const DType constant) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) {
return;
}
// cast sizes to int to use in min/max
int Nz = src.size(2);
int Ny = src.size(3);
int Nx = src.size(4);
int plane = blockIdx.y;
int batch = blockIdx.z;
int outputPointX = outputPointId % dst.size(4);
int outputPointY = (outputPointId / dst.size(4)) % dst.size(3);
int outputPointZ = outputPointId / (dst.size(3) * dst.size(4));
int checkFront = max(0, outputPointZ - padF + 1);
int checkBack = max(0, padF + Nz - outputPointZ);
int checkTop = max(0, outputPointY - padT + 1);
int checkBottom = max(0, padT + Ny - outputPointY);
int checkLeft = max(0, outputPointX - padL + 1);
int checkRight = max(0, padL + Nx - outputPointX);
int inputPointZ = min(max(outputPointZ - padF, 0), Nz - 1);
int inputPointX = min(max(outputPointX - padL, 0), Nx - 1);
int inputPointY = min(max(outputPointY - padT, 0), Ny - 1);
// 1 if need padding, 0 if not
int need_pad = !(checkFront * checkBack * checkTop * checkBottom * checkLeft *
checkRight);
DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX];
dst[batch][plane][outputPointZ][outputPointY][outputPointX] =
valueToCopy * (!need_pad) + need_pad * constant;
}
template <typename DType>
inline void image_pad_constant(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> &src,
const mxnet::TShape &pad, const DType constant) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
image_3d_pad_constant_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
dst, src, padF, padT, padL, constant);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_constant_kernel);
}
template <int n_bits, typename DType>
__global__ void image_3d_pad_constant_grad_kernel(
Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out,
const int padF, const int padT, const int padL) {
int inPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
int pixel_num = grad_in.size(2) * grad_in.size(3) * grad_in.size(4);
if (inPointId >= pixel_num) {
return;
}
int inPointX = inPointId % grad_in.size(4);
int inPointY = (inPointId / grad_in.size(4)) % grad_in.size(3);
int inPointZ = inPointId / (grad_in.size(3) * grad_in.size(4));
int outPointZ = inPointZ + padF;
int outPointX = inPointX + padL;
int outPointY = inPointY + padT;
grad_in[batch][plane][inPointZ][inPointY][inPointX] =
grad_out[batch][plane][outPointZ][outPointY][outPointX];
}
template <typename DType>
inline void image_pad_constant_grad(Tensor<gpu, 5, DType> grad_in,
const Tensor<gpu, 5, DType> &grad_out,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize =
(grad_in.size(2) * grad_in.size(3) * grad_in.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_in.size(1), grad_in.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(grad_in.stream_);
image_3d_pad_constant_grad_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
grad_in, grad_out, padF, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_constant_grad_kernel);
}
// Case 3: Reflection Padding
template <int n_bits, typename DType>
__global__ void image_3d_pad_reflect_kernel(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> src,
const int padF, const int padT,
const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) {
return;
}
int outputPointX = outputPointId % dst.size(4);
int outputPointY = (outputPointId / dst.size(4)) % dst.size(3);
int outputPointZ = outputPointId / (dst.size(3) * dst.size(4));
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int iStartZ = max(0, -padF);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int oStartZ = max(0, padF);
int inputPointX = __sad(outputPointX, padL, 0)
- __sad(outputPointX, src.size(4) + padL - 1, 0)
- outputPointX
+ 2 * padL + src.size(4) - 1
- oStartX + iStartX;
int inputPointY = __sad(outputPointY, padT, 0)
- __sad(outputPointY, src.size(3) + padT - 1, 0)
- outputPointY
+ 2 * padT + src.size(3) - 1
- oStartY + iStartY;
int inputPointZ = __sad(outputPointZ, padF, 0)
- __sad(outputPointZ, src.size(2) + padF - 1, 0)
- outputPointZ
+ 2 * padF + src.size(2) - 1
- oStartZ + iStartZ;
DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX];
dst[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy;
}
template <typename DType>
inline void image_pad_reflect(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> &src,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
image_3d_pad_reflect_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
dst, src, padF, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_reflect_kernel);
}
template <int n_bits, typename DType>
__global__ void image_3d_pad_reflect_grad_kernel(
Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out,
const int padF, const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3) * grad_out.size(4)) {
return;
}
int outputPointX = outputPointId % grad_out.size(4);
int outputPointY = (outputPointId / grad_out.size(4)) % grad_out.size(3);
int outputPointZ = outputPointId / (grad_out.size(3) * grad_out.size(4));
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int iStartZ = max(0, -padF);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int oStartZ = max(0, padF);
int inputPointX = __sad(outputPointX, padL, 0)
- __sad(outputPointX, grad_in.size(4) + padL - 1, 0)
- outputPointX
+ 2 * padL + grad_in.size(4) - 1
- oStartX + iStartX;
int inputPointY = __sad(outputPointY, padT, 0)
- __sad(outputPointY, grad_in.size(3) + padT - 1, 0)
- outputPointY
+ 2 * padT + grad_in.size(3) - 1
- oStartY + iStartY;
int inputPointZ = __sad(outputPointZ, padF, 0)
- __sad(outputPointZ, grad_in.size(2) + padF - 1, 0)
- outputPointZ
+ 2 * padF + grad_in.size(2) - 1
- oStartZ + iStartZ;
DType valueToCopy =
grad_out[batch][plane][outputPointZ][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointZ][inputPointY][inputPointX],
valueToCopy);
}
/* int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3)) {
return;
}
int outputPointX = outputPointId % grad_out.size(3);
int outputPointY = outputPointId / grad_out.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = __sad(outputPointX, padL, 0)
- __sad(outputPointX, grad_in.size(3) + padL - 1, 0)
- outputPointX
+ 2 * padL + grad_in.size(3) - 1
- oStartX + iStartX;
int inputPointY = __sad(outputPointY, padT, 0)
- __sad(outputPointY, grad_in.size(2) + padT - 1, 0)
- outputPointY
+ 2 * padT + grad_in.size(2) - 1
- oStartY + iStartY;
DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy);*/
template <typename DType>
inline void image_pad_reflect_grad(Tensor<gpu, 5, DType> grad_in,
const Tensor<gpu, 5, DType> &grad_out,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize =
(grad_out.size(2) * grad_out.size(3) * grad_out.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(grad_out.stream_);
image_3d_pad_reflect_grad_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
grad_in, grad_out, padF, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_reflect_grad_kernel);
}
////////////////////////////////////////////////////////////////////////////////
} // namespace cuda
template <int dim, typename DType>
void pad_image(Tensor<gpu, dim, DType> dst, const Tensor<gpu, dim, DType> src,
const mxnet::TShape pad, int mode, const DType constant_value) {
switch (mode) {
case mxnet::op::pad_enum::kEdge:
cuda::image_pad_edge(dst, src, pad);
break;
case mxnet::op::pad_enum::kConstant:
cuda::image_pad_constant(dst, src, pad, constant_value);
break;
case mxnet::op::pad_enum::kReflect:
cuda::image_pad_reflect(dst, src, pad);
break;
}
}
template <int dim, typename DType>
void pad_image_grad(Tensor<gpu, dim, DType> grad_in,
const Tensor<gpu, dim, DType> grad_out,
const mxnet::TShape pad, int mode) {
switch (mode) {
case mxnet::op::pad_enum::kEdge:
cuda::image_pad_edge_grad(grad_in, grad_out, pad);
break;
case mxnet::op::pad_enum::kConstant:
cuda::image_pad_constant_grad(grad_in, grad_out, pad);
break;
case mxnet::op::pad_enum::kReflect:
cuda::image_pad_reflect_grad(grad_in, grad_out, pad);
break;
}
}
} // namespace mshadow
////////////////////////////////////////////////////////////////////////////////
namespace mxnet {
namespace op {
template <>
Operator *CreateOp<gpu>(PadParam param, int dtype) {
Operator *op = nullptr;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new PadOp<gpu, DType>(param); })
return op;
}
} // namespace op
} // namespace mxnet
|
the_stack
|
#include <cuda.h>
// Disable strict aliasing errors for CUDA 9.
#if CUDA_VERSION >= 9000
#ifdef __GNUC__
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
#pragma GCC diagnostic push
#endif
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#endif // __GNUC__
#endif // CUDA_VERSION >= 9000
#include <cuda_fp16.h>
#if CUDA_VERSION >= 9000
#ifdef __GNUC__
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
#pragma GCC diagnostic pop
#endif
#endif // __GNUC__
#endif // CUDA_VERSION >= 9000
namespace gloo {
const cudaStream_t kStreamNotSet = (cudaStream_t)(-1);
const int kInvalidDeviceId = -1;
// Default mutex to synchronize contentious CUDA and NCCL operations
static std::mutex defaultCudaMutex;
std::atomic<std::mutex*> CudaShared::mutex_(&defaultCudaMutex);
CudaStream::CudaStream(int deviceId, cudaStream_t stream)
: deviceId_(deviceId),
stream_(stream),
streamOwner_(false) {
CudaDeviceScope scope(deviceId_);
// Create new stream if it wasn't specified
if (stream_ == kStreamNotSet) {
#ifndef __HIP_PLATFORM_HCC__
int loPri, hiPri;
CUDA_CHECK(cudaDeviceGetStreamPriorityRange(&loPri, &hiPri));
CUDA_CHECK(cudaStreamCreateWithPriority(
&stream_, cudaStreamNonBlocking, hiPri));
#else
// hipStreamCreateWithPriority is a new API introduced in ROCm 2.0
// it hangs on some distributed runs
CUDA_CHECK(cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking));
#endif
streamOwner_ = true;
}
// Create new event to synchronize operations against
CUDA_CHECK(cudaEventCreateWithFlags(&event_, cudaEventDisableTiming));
}
CudaStream::CudaStream(CudaStream&& other) noexcept
: deviceId_(other.deviceId_),
stream_(other.stream_),
event_(other.event_),
streamOwner_(other.streamOwner_) {
other.deviceId_ = kInvalidDeviceId;
other.stream_ = nullptr;
other.event_ = nullptr;
}
CudaStream::~CudaStream() noexcept(false) {
if (deviceId_ == kInvalidDeviceId) {
return;
}
if (event_ != nullptr) {
// Make sure outstanding operations are complete. If the event
// hasn't been queued this call will return immediately.
CUDA_CHECK(cudaEventSynchronize(event_));
CUDA_CHECK(cudaEventDestroy(event_));
}
if (streamOwner_ && stream_ != nullptr) {
CUDA_CHECK(cudaStreamDestroy(stream_));
}
}
template <typename T>
void CudaStream::copyAsync(
CudaHostPointer<T>& dst,
CudaDevicePointer<T>& src) {
CudaDeviceScope scope(deviceId_);
GLOO_ENFORCE_LE(dst.getCount(), src.getCount());
CUDA_CHECK(cudaMemcpyAsync(
*dst,
*src,
dst.getCount() * sizeof(T),
cudaMemcpyDeviceToHost,
stream_));
CUDA_CHECK(cudaEventRecord(event_, stream_));
}
template <typename T>
void CudaStream::copyAsync(
CudaHostPointer<T>& dst,
CudaHostPointer<T>& src) {
CudaDeviceScope scope(deviceId_);
GLOO_ENFORCE_LE(dst.getCount(), src.getCount());
CUDA_CHECK(cudaMemcpyAsync(
*dst,
*src,
dst.getCount() * sizeof(T),
cudaMemcpyHostToHost,
stream_));
CUDA_CHECK(cudaEventRecord(event_, stream_));
}
template <typename T>
void CudaStream::copyAsync(
CudaDevicePointer<T>& dst,
CudaDevicePointer<T>& src) {
CudaDeviceScope scope(deviceId_);
GLOO_ENFORCE_LE(dst.getCount(), src.getCount());
CUDA_CHECK(cudaMemcpyAsync(
*dst,
*src,
dst.getCount() * sizeof(T),
cudaMemcpyDeviceToDevice,
stream_));
CUDA_CHECK(cudaEventRecord(event_, stream_));
}
template <typename T>
void CudaStream::copyAsync(
CudaDevicePointer<T>& dst,
CudaHostPointer<T>& src) {
CudaDeviceScope scope(deviceId_);
GLOO_ENFORCE_LE(dst.getCount(), src.getCount());
CUDA_CHECK(cudaMemcpyAsync(
*dst,
*src,
dst.getCount() * sizeof(T),
cudaMemcpyHostToDevice,
stream_));
CUDA_CHECK(cudaEventRecord(event_, stream_));
}
void CudaStream::record() {
CUDA_CHECK(cudaEventRecord(event_, stream_));
}
void CudaStream::wait() {
CudaDeviceScope scope(deviceId_);
CUDA_CHECK(cudaEventSynchronize(event_));
}
template <typename T>
CudaDevicePointer<T> CudaDevicePointer<T>::alloc(
size_t count) {
T* ptr = nullptr;
size_t bytes = count * sizeof(T);
{
std::lock_guard<std::mutex> lock(CudaShared::getMutex());
CUDA_CHECK(cudaMalloc(&ptr, bytes));
}
auto p = create(ptr, count);
p.owner_ = true;
return p;
}
template<typename T>
CudaDevicePointer<T> CudaDevicePointer<T>::create(
T* ptr,
size_t count) {
CudaDevicePointer p(ptr, count, false);
return p;
}
template<typename T>
CudaDevicePointer<T>::CudaDevicePointer(T* ptr, size_t count, bool owner)
: device_(ptr),
count_(count),
owner_(owner),
deviceId_(getGPUIDForPointer(device_)) {
}
template<typename T>
CudaDevicePointer<T>::CudaDevicePointer(CudaDevicePointer<T>&& other) noexcept
: device_(other.device_),
count_(other.count_),
owner_(other.owner_),
deviceId_(other.deviceId_) {
// Nullify fields that would otherwise be destructed
other.device_ = nullptr;
other.owner_ = false;
other.deviceId_ = kInvalidDeviceId;
}
template<typename T>
CudaDevicePointer<T>& CudaDevicePointer<T>::operator=(
CudaDevicePointer<T>&& other) {
device_ = other.device_;
count_ = other.count_;
owner_ = other.owner_;
deviceId_ = other.deviceId_;
// Nullify fields that would otherwise be destructed
other.device_ = nullptr;
other.owner_ = false;
other.deviceId_ = kInvalidDeviceId;
return *this;
}
template<typename T>
CudaDevicePointer<T>::~CudaDevicePointer() noexcept(false) {
if (deviceId_ == kInvalidDeviceId) {
return;
}
CudaDeviceScope scope(deviceId_);
if (owner_ && device_ != nullptr) {
std::lock_guard<std::mutex> lock(CudaShared::getMutex());
CUDA_CHECK(cudaFree(device_));
}
}
template <typename T>
CudaHostPointer<T> CudaHostPointer<T>::alloc(size_t count) {
T* ptr = nullptr;
size_t bytes = count * sizeof(T);
{
std::lock_guard<std::mutex> lock(CudaShared::getMutex());
CUDA_CHECK(cudaMallocHost(&ptr, bytes));
}
return CudaHostPointer<T>(ptr, count, true);
}
template <typename T>
CudaHostPointer<T>::CudaHostPointer(T* ptr, size_t count, bool owner)
: host_(ptr),
count_(count),
owner_(owner) {}
template <typename T>
CudaHostPointer<T>::CudaHostPointer(CudaHostPointer&& other) noexcept
: host_(other.host_),
count_(other.count_),
owner_(other.owner_) {
other.host_ = nullptr;
other.count_ = 0;
other.owner_ = false;
}
template<typename T>
CudaHostPointer<T>& CudaHostPointer<T>::operator=(CudaHostPointer<T>&& other) {
host_ = other.host_;
count_ = other.count_;
owner_ = other.owner_;
other.host_ = nullptr;
other.count_ = 0;
other.owner_ = false;
return *this;
}
template<typename T>
CudaHostPointer<T>::~CudaHostPointer() noexcept(false) {
if (owner_) {
std::lock_guard<std::mutex> lock(CudaShared::getMutex());
CUDA_CHECK(cudaFreeHost(host_));
}
}
// Instantiate templates
#define INSTANTIATE_COPY_ASYNC(T) \
template class CudaDevicePointer<T>; \
template class CudaHostPointer<T>; \
\
template void CudaStream::copyAsync<T>( \
CudaHostPointer<T>& dst, \
CudaDevicePointer<T>& src); \
\
template void CudaStream::copyAsync<T>( \
CudaHostPointer<T>& dst, \
CudaHostPointer<T>& src); \
\
template void CudaStream::copyAsync<T>( \
CudaDevicePointer<T>& dst, \
CudaDevicePointer<T>& src); \
\
template void CudaStream::copyAsync<T>( \
CudaDevicePointer<T>& dst, \
CudaHostPointer<T>& src);
INSTANTIATE_COPY_ASYNC(int8_t);
INSTANTIATE_COPY_ASYNC(uint8_t);
INSTANTIATE_COPY_ASYNC(int32_t);
INSTANTIATE_COPY_ASYNC(int64_t);
INSTANTIATE_COPY_ASYNC(uint64_t);
INSTANTIATE_COPY_ASYNC(float16);
INSTANTIATE_COPY_ASYNC(float);
INSTANTIATE_COPY_ASYNC(double);
// Borrowed limits from Caffe2 code (see core/common_gpu.h)
constexpr static int kCudaNumThreads = 512;
constexpr static int kCudaMaximumNumBlocks = 4096;
static inline int cudaGetBlocks(const int N) {
return std::min((N + kCudaNumThreads - 1) / kCudaNumThreads,
kCudaMaximumNumBlocks);
}
#define DELEGATE_SIMPLE_CUDA_BINARY_OPERATOR(T, Funcname, op) \
__global__ \
void _Kernel_##T##_##Funcname(T* dst, const T* src, const int n) { \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x) { \
dst[i] = dst[i] op src[i]; \
} \
} \
template <> \
void Funcname<T>( \
T* dst, \
const T* src, \
size_t n, \
const cudaStream_t stream) { \
_Kernel_##T##_##Funcname<<< \
cudaGetBlocks(n), \
kCudaNumThreads, \
0, \
stream>>>( \
dst, src, n); \
}
#define DELEGATE_HALF_PRECISION_CUDA_BINARY_OPERATOR(Funcname, op) \
__global__ void _Kernel_half_##Funcname( \
half* dst, const half* src, const int n) { \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x) { \
float r = __half2float(dst[i]) op __half2float(src[i]); \
dst[i] = __float2half(r); \
} \
} \
template <> \
void Funcname<float16>( \
float16* dst, \
const float16* src, \
size_t n, \
const cudaStream_t stream) { \
_Kernel_half_##Funcname<<<cudaGetBlocks(n), kCudaNumThreads, 0, stream>>>( \
(half*)dst, (half*)src, n); \
}
DELEGATE_SIMPLE_CUDA_BINARY_OPERATOR(int8_t, cudaSum, +);
DELEGATE_SIMPLE_CUDA_BINARY_OPERATOR(int8_t, cudaProduct, *);
DELEGATE_SIMPLE_CUDA_BINARY_OPERATOR(uint8_t, cudaSum, +);
DELEGATE_SIMPLE_CUDA_BINARY_OPERATOR(uint8_t, cudaProduct, *);
DELEGATE_SIMPLE_CUDA_BINARY_OPERATOR(int32_t, cudaSum, +);
DELEGATE_SIMPLE_CUDA_BINARY_OPERATOR(int32_t, cudaProduct, *);
DELEGATE_SIMPLE_CUDA_BINARY_OPERATOR(int64_t, cudaSum, +);
DELEGATE_SIMPLE_CUDA_BINARY_OPERATOR(int64_t, cudaProduct, *);
DELEGATE_SIMPLE_CUDA_BINARY_OPERATOR(uint64_t, cudaSum, +);
DELEGATE_SIMPLE_CUDA_BINARY_OPERATOR(uint64_t, cudaProduct, *);
DELEGATE_SIMPLE_CUDA_BINARY_OPERATOR(float, cudaSum, +);
DELEGATE_SIMPLE_CUDA_BINARY_OPERATOR(float, cudaProduct, *);
DELEGATE_SIMPLE_CUDA_BINARY_OPERATOR(double, cudaSum, +);
DELEGATE_SIMPLE_CUDA_BINARY_OPERATOR(double, cudaProduct, *);
DELEGATE_HALF_PRECISION_CUDA_BINARY_OPERATOR(cudaSum, +);
DELEGATE_HALF_PRECISION_CUDA_BINARY_OPERATOR(cudaProduct, *);
#define DELEGATE_SIMPLE_CUDA_BINARY_COMPARE(T, Funcname, op) \
__global__ \
void _Kernel_##T##_##Funcname(T* dst, const T* src, const int n) { \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x) { \
if (src[i] op dst[i]) { \
dst[i] = src[i]; \
} \
} \
} \
template <> \
void Funcname<T>( \
T* dst, \
const T* src, \
size_t n, \
const cudaStream_t stream) { \
_Kernel_##T##_##Funcname<<< \
cudaGetBlocks(n), \
kCudaNumThreads, \
0, \
stream>>>( \
dst, src, n); \
}
#define DELEGATE_HALF_PRECISION_CUDA_BINARY_COMPARE(Funcname, op) \
__global__ void _Kernel_half_##Funcname( \
half* dst, const half* src, const int n) { \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x) { \
if (__half2float(src[i]) op __half2float(dst[i])) { \
dst[i] = src[i]; \
} \
} \
} \
template <> \
void Funcname<float16>( \
float16* dst, \
const float16* src, \
size_t n, \
const cudaStream_t stream) { \
_Kernel_half_##Funcname<<<cudaGetBlocks(n), kCudaNumThreads, 0, stream>>>( \
(half*)dst, (half*)src, n); \
}
DELEGATE_SIMPLE_CUDA_BINARY_COMPARE(int8_t, cudaMin, <);
DELEGATE_SIMPLE_CUDA_BINARY_COMPARE(int8_t, cudaMax, >);
DELEGATE_SIMPLE_CUDA_BINARY_COMPARE(uint8_t, cudaMin, <);
DELEGATE_SIMPLE_CUDA_BINARY_COMPARE(uint8_t, cudaMax, >);
DELEGATE_SIMPLE_CUDA_BINARY_COMPARE(int32_t, cudaMin, <);
DELEGATE_SIMPLE_CUDA_BINARY_COMPARE(int32_t, cudaMax, >);
DELEGATE_SIMPLE_CUDA_BINARY_COMPARE(int64_t, cudaMin, <);
DELEGATE_SIMPLE_CUDA_BINARY_COMPARE(int64_t, cudaMax, >);
DELEGATE_SIMPLE_CUDA_BINARY_COMPARE(uint64_t, cudaMin, <);
DELEGATE_SIMPLE_CUDA_BINARY_COMPARE(uint64_t, cudaMax, >);
DELEGATE_SIMPLE_CUDA_BINARY_COMPARE(float, cudaMin, <);
DELEGATE_SIMPLE_CUDA_BINARY_COMPARE(float, cudaMax, >);
DELEGATE_SIMPLE_CUDA_BINARY_COMPARE(double, cudaMin, <);
DELEGATE_SIMPLE_CUDA_BINARY_COMPARE(double, cudaMax, >);
DELEGATE_HALF_PRECISION_CUDA_BINARY_COMPARE(cudaMin, <);
DELEGATE_HALF_PRECISION_CUDA_BINARY_COMPARE(cudaMax, >);
} // namespace gloo
|
the_stack
|
#ifndef CPU_ONLY
__global__ void gpu_set_zero(int number_of_elements,
real* __restrict g_state_real,
real* __restrict g_state_imag)
{
int n = blockIdx.x * blockDim.x + threadIdx.x;
if (n < number_of_elements) {
g_state_real[n] = 0;
g_state_imag[n] = 0;
}
}
#else
void cpu_set_zero(int number_of_elements, real* g_state_real, real* g_state_imag)
{
for (int n = 0; n < number_of_elements; ++n) {
g_state_real[n] = 0;
g_state_imag[n] = 0;
}
}
#endif
#ifndef CPU_ONLY
void Vector::initialize_gpu(int n)
{
this->n = n;
array_size = n * sizeof(real);
CHECK(hipMalloc((void**)&real_part, array_size));
CHECK(hipMalloc((void**)&imag_part, array_size));
}
#else
void Vector::initialize_cpu(int n)
{
this->n = n;
array_size = n * sizeof(real);
real_part = new real[n];
imag_part = new real[n];
}
#endif
Vector::Vector(int n)
{
#ifndef CPU_ONLY
initialize_gpu(n);
hipLaunchKernelGGL(gpu_set_zero, dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, n, real_part, imag_part);
CHECK(hipGetLastError());
#else
initialize_cpu(n);
cpu_set_zero(n, real_part, imag_part);
#endif
}
#ifndef CPU_ONLY
__global__ void gpu_copy_state(
const int N,
const real* __restrict in_real,
const real* __restrict in_imag,
real* __restrict out_real,
real* __restrict out_imag)
{
int n = blockIdx.x * blockDim.x + threadIdx.x;
if (n < N) {
out_real[n] = in_real[n];
out_imag[n] = in_imag[n];
}
}
#else
void cpu_copy_state(int N, real* in_real, real* in_imag, real* out_real, real* out_imag)
{
for (int n = 0; n < N; ++n) {
out_real[n] = in_real[n];
out_imag[n] = in_imag[n];
}
}
#endif
Vector::Vector(Vector& original)
{
// Just teach myself: one can access private members of another instance
// of the class from within the class
#ifndef CPU_ONLY
initialize_gpu(original.n);
hipLaunchKernelGGL(gpu_copy_state, dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0,
n, original.real_part, original.imag_part, real_part, imag_part);
CHECK(hipGetLastError());
#else
initialize_cpu(original.n);
cpu_copy_state(n, original.real_part, original.imag_part, real_part, imag_part);
#endif
}
Vector::~Vector()
{
#ifndef CPU_ONLY
CHECK(hipFree(real_part));
CHECK(hipFree(imag_part));
#else
delete[] real_part;
delete[] imag_part;
#endif
}
#ifndef CPU_ONLY
__global__ void gpu_add_state(
const int n,
const real*__restrict in_real,
const real*__restrict in_imag,
real*__restrict out_real,
real*__restrict out_imag)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
out_real[i] += in_real[i];
out_imag[i] += in_imag[i];
}
}
#else
void cpu_add_state(int n, real* in_real, real* in_imag, real* out_real, real* out_imag)
{
for (int i = 0; i < n; ++i) {
out_real[i] += in_real[i];
out_imag[i] += in_imag[i];
}
}
#endif
void Vector::add(Vector& other)
{
#ifndef CPU_ONLY
hipLaunchKernelGGL(gpu_add_state, dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0,
n, other.real_part, other.imag_part, real_part, imag_part);
CHECK(hipGetLastError());
#else
cpu_add_state(n, other.real_part, other.imag_part, real_part, imag_part);
#endif
}
void Vector::copy(Vector& other)
{
#ifndef CPU_ONLY
hipLaunchKernelGGL(gpu_copy_state, dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0,
n, other.real_part, other.imag_part, real_part, imag_part);
CHECK(hipGetLastError());
#else
cpu_copy_state(n, other.real_part, other.imag_part, real_part, imag_part);
#endif
}
#ifndef CPU_ONLY
__global__ void gpu_apply_sz(
const int n,
const real* __restrict in_real,
const real* __restrict in_imag,
real* __restrict out_real,
real* __restrict out_imag)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
if (i % 2 == 0) {
out_real[i] = in_real[i];
out_imag[i] = in_imag[i];
} else {
out_real[i] = -in_real[i];
out_imag[i] = -in_imag[i];
}
}
}
#else
void cpu_apply_sz(int n, real* in_real, real* in_imag, real* out_real, real* out_imag)
{
for (int i = 0; i < n; ++i) {
if (i % 2 == 0) {
out_real[i] = in_real[i];
out_imag[i] = in_imag[i];
} else {
out_real[i] = -in_real[i];
out_imag[i] = -in_imag[i];
}
}
}
#endif
void Vector::apply_sz(Vector& other)
{
#ifndef CPU_ONLY
hipLaunchKernelGGL(gpu_apply_sz, dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0,
n, other.real_part, other.imag_part, real_part, imag_part);
CHECK(hipGetLastError());
#else
cpu_apply_sz(n, other.real_part, other.imag_part, real_part, imag_part);
#endif
}
void Vector::copy_from_host(real* other_real, real* other_imag)
{
#ifndef CPU_ONLY
CHECK(hipMemcpy(real_part, other_real, array_size, hipMemcpyHostToDevice));
CHECK(hipMemcpy(imag_part, other_imag, array_size, hipMemcpyHostToDevice));
#else
memcpy(real_part, other_real, array_size);
memcpy(imag_part, other_imag, array_size);
#endif
}
void Vector::copy_to_host(real* target_real, real* target_imag)
{
#ifndef CPU_ONLY
CHECK(hipMemcpy(target_real, real_part, array_size, hipMemcpyDeviceToHost));
CHECK(hipMemcpy(target_imag, imag_part, array_size, hipMemcpyDeviceToHost));
#else
memcpy(target_real, real_part, array_size);
memcpy(target_imag, imag_part, array_size);
#endif
}
void Vector::swap(Vector& other)
{
real* tmp_real = real_part;
real* tmp_imag = imag_part;
real_part = other.real_part, imag_part = other.imag_part;
other.real_part = tmp_real;
other.imag_part = tmp_imag;
}
#ifndef CPU_ONLY
__device__ void warp_reduce(volatile real* s, int t)
{
s[t] += s[t + 32];
s[t] += s[t + 16];
s[t] += s[t + 8];
s[t] += s[t + 4];
s[t] += s[t + 2];
s[t] += s[t + 1];
}
#endif
#ifndef CPU_ONLY
__global__ void gpu_find_inner_product_1(
const int number_of_atoms,
const real* __restrict g_final_state_real,
const real* __restrict g_final_state_imag,
const real* __restrict g_random_state_real,
const real* __restrict g_random_state_imag,
real* __restrict g_inner_product_real,
real* __restrict g_inner_product_imag,
const int g_offset)
{
int tid = threadIdx.x;
int n = blockIdx.x * blockDim.x + tid;
int m;
real a, b, c, d;
__shared__ real s_data_real[BLOCK_SIZE];
__shared__ real s_data_imag[BLOCK_SIZE];
s_data_real[tid] = 0.0;
s_data_imag[tid] = 0.0;
if (n < number_of_atoms) {
a = g_final_state_real[n];
b = g_final_state_imag[n];
c = g_random_state_real[n];
d = g_random_state_imag[n];
s_data_real[tid] = (a * c + b * d);
s_data_imag[tid] = (b * c - a * d);
}
__syncthreads();
/*
if (tid < 256) {
m = tid + 256;
s_data_real[tid] += s_data_real[m];
s_data_imag[tid] += s_data_imag[m];
}
__syncthreads();
*/
if (tid < 128) {
m = tid + 128;
s_data_real[tid] += s_data_real[m];
s_data_imag[tid] += s_data_imag[m];
}
__syncthreads();
if (tid < 64) {
m = tid + 64;
s_data_real[tid] += s_data_real[m];
s_data_imag[tid] += s_data_imag[m];
}
__syncthreads();
if (tid < 32) {
warp_reduce(s_data_real, tid);
warp_reduce(s_data_imag, tid);
}
if (tid == 0) {
g_inner_product_real[blockIdx.x + g_offset] = s_data_real[0];
g_inner_product_imag[blockIdx.x + g_offset] = s_data_imag[0];
}
}
#else
void cpu_find_inner_product_1(
int grid_size,
int number_of_atoms,
real* g_final_state_real,
real* g_final_state_imag,
real* g_random_state_real,
real* g_random_state_imag,
real* g_inner_product_real,
real* g_inner_product_imag,
int g_offset)
{
for (int m = 0; m < grid_size; ++m) {
real s_data_real = 0.0;
real s_data_imag = 0.0;
for (int k = 0; k < BLOCK_SIZE; ++k) {
int n = m * BLOCK_SIZE + k;
if (n < number_of_atoms) {
real a = g_final_state_real[n];
real b = g_final_state_imag[n];
real c = g_random_state_real[n];
real d = g_random_state_imag[n];
s_data_real += (a * c + b * d);
s_data_imag += (b * c - a * d);
}
}
g_inner_product_real[m + g_offset] = s_data_real;
g_inner_product_imag[m + g_offset] = s_data_imag;
}
}
#endif
void Vector::inner_product_1(int number_of_atoms, Vector& other, Vector& target, int offset)
{
int grid_size = (number_of_atoms - 1) / BLOCK_SIZE + 1;
#ifndef CPU_ONLY
hipLaunchKernelGGL(gpu_find_inner_product_1, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0,
number_of_atoms, real_part, imag_part, other.real_part, other.imag_part, target.real_part,
target.imag_part, offset);
CHECK(hipGetLastError());
#else
cpu_find_inner_product_1(
grid_size, number_of_atoms, real_part, imag_part, other.real_part, other.imag_part,
target.real_part, target.imag_part, offset);
#endif
}
#ifndef CPU_ONLY
__global__ void gpu_find_inner_product_2(
const int number_of_atoms,
const real* __restrict g_inner_product_1_real,
const real* __restrict g_inner_product_1_imag,
real* __restrict g_inner_product_2_real,
real* __restrict g_inner_product_2_imag)
{
//<<<para.number_of_energy_points, BLOCK_SIZE)>>>
int tid = threadIdx.x;
int patch, n, m;
__shared__ real s_data_real[BLOCK_SIZE];
__shared__ real s_data_imag[BLOCK_SIZE];
s_data_real[tid] = 0.0;
s_data_imag[tid] = 0.0;
int number_of_blocks = (number_of_atoms - 1) / BLOCK_SIZE + 1;
int number_of_patches = (number_of_blocks - 1) / BLOCK_SIZE + 1;
for (patch = 0; patch < number_of_patches; ++patch) {
n = tid + patch * BLOCK_SIZE;
if (n < number_of_blocks) {
m = blockIdx.x * number_of_blocks + n;
s_data_real[tid] += g_inner_product_1_real[m];
s_data_imag[tid] += g_inner_product_1_imag[m];
}
}
__syncthreads();
/*
if (tid < 256) {
m = tid + 256;
s_data_real[tid] += s_data_real[m];
s_data_imag[tid] += s_data_imag[m];
}
__syncthreads();
*/
if (tid < 128) {
m = tid + 128;
s_data_real[tid] += s_data_real[m];
s_data_imag[tid] += s_data_imag[m];
}
__syncthreads();
if (tid < 64) {
m = tid + 64;
s_data_real[tid] += s_data_real[m];
s_data_imag[tid] += s_data_imag[m];
}
__syncthreads();
if (tid < 32) {
warp_reduce(s_data_real, tid);
warp_reduce(s_data_imag, tid);
}
if (tid == 0) {
g_inner_product_2_real[blockIdx.x] = s_data_real[0];
g_inner_product_2_imag[blockIdx.x] = s_data_imag[0];
}
}
#else
void cpu_find_inner_product_2(
int number_of_moments,
int grid_size,
real* g_inner_product_1_real,
real* g_inner_product_1_imag,
real* g_inner_product_2_real,
real* g_inner_product_2_imag)
{
for (int m = 0; m < number_of_moments; ++m) {
real s_data_real = 0.0;
real s_data_imag = 0.0;
for (int k = 0; k < grid_size; ++k) {
int n = m * grid_size + k;
s_data_real += g_inner_product_1_real[n];
s_data_imag += g_inner_product_1_imag[n];
}
g_inner_product_2_real[m] = s_data_real;
g_inner_product_2_imag[m] = s_data_imag;
}
}
#endif
void Vector::inner_product_2(int number_of_atoms, int number_of_moments, Vector& target)
{
#ifndef CPU_ONLY
hipLaunchKernelGGL(gpu_find_inner_product_2, dim3(number_of_moments), dim3(BLOCK_SIZE), 0, 0,
number_of_atoms, real_part, imag_part, target.real_part, target.imag_part);
CHECK(hipGetLastError());
#else
int grid_size = (number_of_atoms - 1) / BLOCK_SIZE + 1;
cpu_find_inner_product_2(
number_of_moments, grid_size, real_part, imag_part, target.real_part, target.imag_part);
#endif
}
|
the_stack
|
#define COST_EPSILON 1e-2
using ::testing::Contains;
class TextEntityTest : public ModelTest<ModelTestWrapper<LSE>> {
protected:
virtual typename DataSourceType::BatchType* create_batch(
const size_t batch_size,
const size_t window_size) const {
return new typename DataSourceType::BatchType(batch_size, window_size);
}
};
class EntityEntityTest : public ModelTest<ModelTestWrapper<Model<EntityEntity::Objective>>> {
protected:
virtual typename DataSourceType::BatchType* create_batch(
const size_t batch_size,
const size_t window_size) const {
return new typename DataSourceType::BatchType(batch_size);
}
};
class TermTermTest : public ModelTest<ModelTestWrapper<Model<TermTerm::Objective>>> {
protected:
virtual typename DataSourceType::BatchType* create_batch(
const size_t batch_size,
const size_t window_size) const {
return new typename DataSourceType::BatchType(batch_size);
}
};
class TextEntityEntityEntityTest : public ModelTest<ModelTestWrapper<Model<TextEntityEntityEntity::Objective>>> {
protected:
virtual typename DataSourceType::BatchType* create_batch(
const size_t batch_size,
const size_t window_size) const {
return new typename DataSourceType::BatchType(
TextEntity::Batch(batch_size, window_size),
EntityEntity::Batch(batch_size));
}
};
class TextEntityTermTermTest : public ModelTest<ModelTestWrapper<Model<TextEntityTermTerm::Objective>>> {
protected:
virtual typename DataSourceType::BatchType* create_batch(
const size_t batch_size,
const size_t window_size) const {
return new typename DataSourceType::BatchType(
TextEntity::Batch(batch_size, window_size),
TermTerm::Batch(batch_size));
}
};
typedef TextEntityTest ConstantTextEntityTest;
typedef EntityEntityTest ConstantEntityEntityTest;
typedef TermTermTest ConstantTermTermTest;
typedef TextEntityEntityEntityTest ConstantTextEntityEntityEntityTest;
typedef TextEntityTermTermTest ConstantTextEntityTermTermTest;
// In the case that we feed the network constant input, we run a different configuration
// than for the "average" input or random input. This is because momentum-based update mechanisms
// quickly get out of whack when they "figure out" that it's always the same patterns.
//
// Also in the case of batch normalization, for a constant input source, batch normalization
// fails as in a single batch all rows are equal and consequently the activations become zero.
INSTANTIATE_TEST_CASE_P(RandomSeedAndConfigs,
ConstantTextEntityTest,
::testing::Combine(
::testing::Range<RNG::result_type>(0 /* start, inclusive */,
6 /* end, exclusive */,
1 /* step */),
::testing::Values<std::string>(
// Different variants of Transform.
"transform_desc < "
" batch_normalization: false "
" nonlinearity: TANH "
">",
"transform_desc < "
" batch_normalization: false "
" nonlinearity: HARD_TANH "
">",
// Test bias_negative_samples.
"transform_desc < "
" batch_normalization: false "
" nonlinearity: TANH " // Remove unnecessary kinks!
"> "
"bias_negative_samples: true",
// Test l2_normalize_reprs.
"transform_desc < "
" batch_normalization: false "
" nonlinearity: TANH " // Remove unnecessary kinks!
"> "
"l2_normalize_phrase_reprs: true",
"transform_desc < "
" batch_normalization: false "
" nonlinearity: TANH " // Remove unnecessary kinks!
"> "
"l2_normalize_entity_reprs: true ",
"transform_desc < "
" batch_normalization: false "
" nonlinearity: TANH " // Remove unnecessary kinks!
"> "
"l2_normalize_phrase_reprs: true "
"l2_normalize_entity_reprs: true"
),
::testing::Values<std::string>(
"type: SGD")));
TEST_P(ConstantTextEntityTest, ConstantSource_GradientCheck) {
auto costs = train_dummy_source(new ConstantSource);
EXPECT_TRUE(!costs.empty());
}
INSTANTIATE_TEST_CASE_P(RandomSeedAndConfigs,
ConstantEntityEntityTest,
::testing::Combine(
::testing::Range<RNG::result_type>(0 /* start, inclusive */,
6 /* end, exclusive */,
1 /* step */),
::testing::Values<std::string>(
// Only the default, as it doesn't matter for this objective.
"transform_desc < "
" batch_normalization: false "
" nonlinearity: TANH "
">"
),
::testing::Values<std::string>(
"type: SGD",
"type: ADAGRAD")));
TEST_P(ConstantEntityEntityTest, ConstantSource_GradientCheck) {
RNG rng;
// We only have three entities in our model.
std::vector<EntityEntity::InstanceT>* const data =
new std::vector<EntityEntity::InstanceT>();
while (data->size() < 3 * (1 << 10)) {
data->push_back(std::make_tuple(0, 1, 1.0));
data->push_back(std::make_tuple(1, 2, 0.5));
data->push_back(std::make_tuple(2, 3, 1.0));
data->push_back(std::make_tuple(0, 2, 1.0));
data->push_back(std::make_tuple(1, 2, 1.0));
}
auto costs = train_dummy_source(new EntityEntity::DataSource(data, &rng));
EXPECT_TRUE(!costs.empty());
}
INSTANTIATE_TEST_CASE_P(RandomSeedAndConfigs,
ConstantTermTermTest,
::testing::Combine(
::testing::Range<RNG::result_type>(0 /* start, inclusive */,
6 /* end, exclusive */,
1 /* step */),
::testing::Values<std::string>(
// Only the default, as it doesn't matter for this objective.
"transform_desc < "
" batch_normalization: false "
" nonlinearity: TANH "
">"
),
::testing::Values<std::string>(
"type: SGD",
"type: ADAGRAD")));
TEST_P(ConstantTermTermTest, ConstantSource_GradientCheck) {
RNG rng;
// There are 20 terms within the model.
std::vector<TermTerm::InstanceT>* const data =
new std::vector<TermTerm::InstanceT>();
while (data->size() < 3 * (1 << 10)) {
data->push_back(std::make_tuple(0, 19, 1.0));
data->push_back(std::make_tuple(16, 2, 1.0));
data->push_back(std::make_tuple(2, 13, 1.0));
data->push_back(std::make_tuple(0, 2, 1.0));
data->push_back(std::make_tuple(11, 12, 1.0));
}
auto costs = train_dummy_source(new TermTerm::DataSource(data, &rng));
EXPECT_TRUE(!costs.empty());
}
INSTANTIATE_TEST_CASE_P(RandomSeedAndConfigs,
ConstantTextEntityEntityEntityTest,
::testing::Combine(
::testing::Range<RNG::result_type>(0 /* start, inclusive */,
6 /* end, exclusive */,
1 /* step */),
::testing::Values<std::string>(
// Only the default, as it doesn't matter for this objective.
"transform_desc < "
" batch_normalization: false "
" nonlinearity: TANH "
">"
),
::testing::Values<std::string>(
"type: SGD",
"type: ADAM "
"adam_conf: < mode: DENSE_UPDATE_DENSE_VARIANCE >")));
TEST_P(ConstantTextEntityEntityEntityTest, ConstantSource_GradientCheck) {
RNG rng;
// We only have three entities in our model.
std::vector<EntityEntity::InstanceT>* const data =
new std::vector<EntityEntity::InstanceT>();
while (data->size() < 3 * (1 << 10)) {
data->push_back(std::make_tuple(0, 1, 1.0));
data->push_back(std::make_tuple(1, 2, 1.0));
data->push_back(std::make_tuple(2, 3, 1.0));
data->push_back(std::make_tuple(0, 2, 1.0));
data->push_back(std::make_tuple(1, 2, 1.0));
}
auto source = new MultiSource<TextEntity::Batch, EntityEntity::Batch>(
std::make_tuple<DataSource<TextEntity::Batch>*,
DataSource<EntityEntity::Batch>*>(
new RandomSource(&rng),
new EntityEntity::DataSource(data, &rng)));
auto costs = train_dummy_source(source);
EXPECT_TRUE(!costs.empty());
}
INSTANTIATE_TEST_CASE_P(RandomSeedAndConfigs,
ConstantTextEntityTermTermTest,
::testing::Combine(
::testing::Range<RNG::result_type>(0 /* start, inclusive */,
6 /* end, exclusive */,
1 /* step */),
::testing::Values<std::string>(
// Only the default, as it doesn't matter for this objective.
"transform_desc < "
" batch_normalization: false "
" nonlinearity: TANH "
">"
),
::testing::Values<std::string>(
"type: SGD",
"type: ADAM "
"adam_conf: < mode: DENSE_UPDATE_DENSE_VARIANCE >")));
TEST_P(ConstantTextEntityTermTermTest, ConstantSource_GradientCheck) {
RNG rng;
// There are 20 terms within the model.
std::vector<EntityEntity::InstanceT>* const data =
new std::vector<EntityEntity::InstanceT>();
while (data->size() < 3 * (1 << 10)) {
data->push_back(std::make_tuple(0, 19, 1.0));
data->push_back(std::make_tuple(16, 2, 1.0));
data->push_back(std::make_tuple(2, 13, 1.0));
data->push_back(std::make_tuple(0, 2, 1.0));
data->push_back(std::make_tuple(11, 12, 1.0));
}
auto source = new MultiSource<TextEntity::Batch, TermTerm::Batch>(
std::make_tuple<DataSource<TextEntity::Batch>*,
DataSource<TermTerm::Batch>*>(
new RandomSource(&rng),
new TermTerm::DataSource(data, &rng)));
auto costs = train_dummy_source(source);
EXPECT_TRUE(!costs.empty());
}
INSTANTIATE_TEST_CASE_P(RandomSeedAndConfigs,
TextEntityTest,
::testing::Combine(
::testing::Range<RNG::result_type>(0 /* start, inclusive */,
6 /* end, exclusive */,
1 /* step */),
::testing::Values<std::string>(
// Different variants of Transform.
"transform_desc < "
" batch_normalization: false "
" nonlinearity: TANH "
">",
"transform_desc < "
" batch_normalization: true "
" nonlinearity: TANH "
">",
"transform_desc < "
" batch_normalization: false "
" nonlinearity: HARD_TANH "
">",
"transform_desc < "
" batch_normalization: true "
" nonlinearity: HARD_TANH "
">",
// Test bias_negative_samples.
"transform_desc < "
" batch_normalization: true "
" nonlinearity: TANH " // Remove unnecessary kinks!
"> "
"bias_negative_samples: true",
// Test l2_normalize_reprs.
"transform_desc < "
" batch_normalization: true "
" nonlinearity: TANH " // Remove unnecessary kinks!
"> "
"l2_normalize_phrase_reprs: true",
"transform_desc < "
" batch_normalization: true "
" nonlinearity: TANH " // Remove unnecessary kinks!
"> "
"l2_normalize_entity_reprs: true ",
"transform_desc < "
" batch_normalization: true "
" nonlinearity: TANH " // Remove unnecessary kinks!
"> "
"l2_normalize_phrase_reprs: true "
"l2_normalize_entity_reprs: true"
),
::testing::Values<std::string>(
"type: SGD",
"type: ADAGRAD",
"type: ADAM "
"adam_conf: < mode: SPARSE >",
"type: ADAM "
"adam_conf: < mode: DENSE_UPDATE >",
"type: ADAM "
"adam_conf: < mode: DENSE_UPDATE_DENSE_VARIANCE >")));
TEST_P(TextEntityTest, RandomSource_GradientCheck) {
train_dummy_source(new RandomSource(&rng_));
}
int main(int argc, char* argv[]) {
google::InitGoogleLogging(argv[0]);
testing::InitGoogleTest(&argc, argv);
google::ParseCommandLineFlags(&argc, &argv, true);
return RUN_ALL_TESTS();
}
|
the_stack
|
////////////////////////////////////////////////////////////////////////////////
// //
// Description: //
// The Runge-Kutta-Fehlberg method is an adaptive procedure for approxi- //
// mating the solution of the differential equation y'(x) = f(x,y) with //
// initial condition y(x0) = c. This implementation evaluates f(x,y) //
// thirteen times per step using embedded seventh order and eight order //
// Runge-Kutta estimates to estimate the not only the solution but also //
// the error. //
// The next step size is then calculated using the preassigned tolerance //
// and error estimate. //
// For step i+1, //
// y[i+1] = y[i] + h * (41/840 * k1 + 34/105 * finavalu_temp[5] + 9/35 * finavalu_temp[6] //
// + 9/35 * finavalu_temp[7] + 9/280 * finavalu_temp[8] + 9/280 finavalu_temp[9] + 41/840 finavalu_temp[10] ) //
// where //
// k1 = f( x[i],y[i] ), //
// finavalu_temp[1] = f( x[i]+2h/27, y[i] + 2h*k1/27), //
// finavalu_temp[2] = f( x[i]+h/9, y[i]+h/36*( k1 + 3 finavalu_temp[1]) ), //
// finavalu_temp[3] = f( x[i]+h/6, y[i]+h/24*( k1 + 3 finavalu_temp[2]) ), //
// finavalu_temp[4] = f( x[i]+5h/12, y[i]+h/48*(20 k1 - 75 finavalu_temp[2] + 75 finavalu_temp[3])), //
// finavalu_temp[5] = f( x[i]+h/2, y[i]+h/20*( k1 + 5 finavalu_temp[3] + 4 finavalu_temp[4] ) ), //
// finavalu_temp[6] = f( x[i]+5h/6, y[i]+h/108*( -25 k1 + 125 finavalu_temp[3] - 260 finavalu_temp[4] + 250 finavalu_temp[5] ) ), //
// finavalu_temp[7] = f( x[i]+h/6, y[i]+h*( 31/300 k1 + 61/225 finavalu_temp[4] - 2/9 finavalu_temp[5] //
// + 13/900 finavalu_temp[6]) ) //
// finavalu_temp[8] = f( x[i]+2h/3, y[i]+h*( 2 k1 - 53/6 finavalu_temp[3] + 704/45 finavalu_temp[4] - 107/9 finavalu_temp[5] //
// + 67/90 finavalu_temp[6] + 3 finavalu_temp[7]) ), //
// finavalu_temp[9] = f( x[i]+h/3, y[i]+h*( -91/108 k1 + 23/108 finavalu_temp[3] - 976/135 finavalu_temp[4] //
// + 311/54 finavalu_temp[5] - 19/60 finavalu_temp[6] + 17/6 finavalu_temp[7] - 1/12 finavalu_temp[8]) ), //
// finavalu_temp[10] = f( x[i]+h, y[i]+h*( 2383/4100 k1 - 341/164 finavalu_temp[3] + 4496/1025 finavalu_temp[4] //
// - 301/82 finavalu_temp[5] + 2133/4100 finavalu_temp[6] + 45/82 finavalu_temp[7] + 45/164 finavalu_temp[8] + 18/41 finavalu_temp[9]) ) //
// finavalu_temp[11] = f( x[i], y[i]+h*( 3/205 k1 - 6/41 finavalu_temp[5] - 3/205 finavalu_temp[6] - 3/41 finavalu_temp[7] //
// + 3/41 finavalu_temp[8] + 6/41 finavalu_temp[9]) ) //
// finavalu_temp[12] = f( x[i]+h, y[i]+h*( -1777/4100 k1 - 341/164 finavalu_temp[3] + 4496/1025 finavalu_temp[4] //
// - 289/82 finavalu_temp[5] + 2193/4100 finavalu_temp[6] + 51/82 finavalu_temp[7] + 33/164 finavalu_temp[8] + //
// 12/41 finavalu_temp[9] + finavalu_temp[11]) ) //
// x[i+1] = x[i] + h. //
// //
// The error is estimated to be //
// err = -41/840 * h * ( k1 + finavalu_temp[10] - finavalu_temp[11] - finavalu_temp[12]) //
// The step size h is then scaled by the scale factor //
// scale = 0.8 * | epsilon * y[i] / [err * (xmax - x[0])] | ^ 1/7 //
// The scale factor is further constrained 0.125 < scale < 4.0. //
// The new step size is h := scale * h. //
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// static fp Runge_Kutta(fp (*f)(fp,fp), fp *y, //
// fp x0, fp h) //
// //
// Description: //
// This routine uses Fehlberg's embedded 7th and 8th order methods to //
// approximate the solution of the differential equation y'=f(x,y) with //
// the initial condition y = y[0] at x = x0. The value at x + h is //
// returned in y[1]. The function returns err / h ( the absolute error //
// per step size ). //
// //
// Arguments: //
// fp *f Pointer to the function which returns the slope at (x,y) of //
// integral curve of the differential equation y' = f(x,y) //
// which passes through the point (x0,y[0]). //
// fp y[] On input y[0] is the initial value of y at x, on output //
// y[1] is the solution at x + h. //
// fp x Initial value of x. //
// fp h Step size //
// //
// Return Values: //
// This routine returns the err / h. The solution of y(x) at x + h is //
// returned in y[1]. //
// //
////////////////////////////////////////////////////////////////////////////////
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
// PARTICULAR SOLVER FUNCTION
//===============================================================================================================================================================================================================
//===============================================================================================================================================================================================================
void embedded_fehlberg_7_8( fp timeinst,
fp h,
fp* initvalu,
fp* finavalu,
fp* error,
fp* parameter,
fp* com,
fp* d_initvalu,
fp* d_finavalu,
fp* d_params,
fp* d_com) {
//======================================================================================================================================================
// VARIABLES
//======================================================================================================================================================
static const fp c_1_11 = 41.0 / 840.0;
static const fp c6 = 34.0 / 105.0;
static const fp c_7_8= 9.0 / 35.0;
static const fp c_9_10 = 9.0 / 280.0;
static const fp a2 = 2.0 / 27.0;
static const fp a3 = 1.0 / 9.0;
static const fp a4 = 1.0 / 6.0;
static const fp a5 = 5.0 / 12.0;
static const fp a6 = 1.0 / 2.0;
static const fp a7 = 5.0 / 6.0;
static const fp a8 = 1.0 / 6.0;
static const fp a9 = 2.0 / 3.0;
static const fp a10 = 1.0 / 3.0;
static const fp b31 = 1.0 / 36.0;
static const fp b32 = 3.0 / 36.0;
static const fp b41 = 1.0 / 24.0;
static const fp b43 = 3.0 / 24.0;
static const fp b51 = 20.0 / 48.0;
static const fp b53 = -75.0 / 48.0;
static const fp b54 = 75.0 / 48.0;
static const fp b61 = 1.0 / 20.0;
static const fp b64 = 5.0 / 20.0;
static const fp b65 = 4.0 / 20.0;
static const fp b71 = -25.0 / 108.0;
static const fp b74 = 125.0 / 108.0;
static const fp b75 = -260.0 / 108.0;
static const fp b76 = 250.0 / 108.0;
static const fp b81 = 31.0/300.0;
static const fp b85 = 61.0/225.0;
static const fp b86 = -2.0/9.0;
static const fp b87 = 13.0/900.0;
static const fp b91 = 2.0;
static const fp b94 = -53.0/6.0;
static const fp b95 = 704.0 / 45.0;
static const fp b96 = -107.0 / 9.0;
static const fp b97 = 67.0 / 90.0;
static const fp b98 = 3.0;
static const fp b10_1 = -91.0 / 108.0;
static const fp b10_4 = 23.0 / 108.0;
static const fp b10_5 = -976.0 / 135.0;
static const fp b10_6 = 311.0 / 54.0;
static const fp b10_7 = -19.0 / 60.0;
static const fp b10_8 = 17.0 / 6.0;
static const fp b10_9 = -1.0 / 12.0;
static const fp b11_1 = 2383.0 / 4100.0;
static const fp b11_4 = -341.0 / 164.0;
static const fp b11_5 = 4496.0 / 1025.0;
static const fp b11_6 = -301.0 / 82.0;
static const fp b11_7 = 2133.0 / 4100.0;
static const fp b11_8 = 45.0 / 82.0;
static const fp b11_9 = 45.0 / 164.0;
static const fp b11_10 = 18.0 / 41.0;
static const fp b12_1 = 3.0 / 205.0;
static const fp b12_6 = - 6.0 / 41.0;
static const fp b12_7 = - 3.0 / 205.0;
static const fp b12_8 = - 3.0 / 41.0;
static const fp b12_9 = 3.0 / 41.0;
static const fp b12_10 = 6.0 / 41.0;
static const fp b13_1 = -1777.0 / 4100.0;
static const fp b13_4 = -341.0 / 164.0;
static const fp b13_5 = 4496.0 / 1025.0;
static const fp b13_6 = -289.0 / 82.0;
static const fp b13_7 = 2193.0 / 4100.0;
static const fp b13_8 = 51.0 / 82.0;
static const fp b13_9 = 33.0 / 164.0;
static const fp b13_10 = 12.0 / 41.0;
static const fp err_factor = -41.0 / 840.0;
fp h2_7 = a2 * h;
fp timeinst_temp;
fp* initvalu_temp;
fp** finavalu_temp;
int i;
//======================================================================================================================================================
// TEMPORARY STORAGE ALLOCATION
//======================================================================================================================================================
initvalu_temp= (fp *) malloc(EQUATIONS* sizeof(fp));
finavalu_temp= (fp **) malloc(13* sizeof(fp *));
for (i= 0; i<13; i++){
finavalu_temp[i]= (fp *) malloc(EQUATIONS* sizeof(fp));
}
//======================================================================================================================================================
// EVALUATIONS
//======================================================================================================================================================
//===================================================================================================
// 1
//===================================================================================================
timeinst_temp = timeinst;
for(i=0; i<EQUATIONS; i++){
initvalu_temp[i] = initvalu[i] ;
// printf("initvalu[%d] = %f\n", i, initvalu[i]);
}
master( timeinst_temp,
initvalu_temp,
parameter,
finavalu_temp[0],
com,
d_initvalu,
d_finavalu,
d_params,
d_com);
//===================================================================================================
// 2
//===================================================================================================
timeinst_temp = timeinst+h2_7;
for(i=0; i<EQUATIONS; i++){
initvalu_temp[i] = initvalu[i] + h2_7 * (finavalu_temp[0][i]);
}
master( timeinst_temp,
initvalu_temp,
parameter,
finavalu_temp[1],
com,
d_initvalu,
d_finavalu,
d_params,
d_com);
//===================================================================================================
// 3
//===================================================================================================
timeinst_temp = timeinst+a3*h;
for(i=0; i<EQUATIONS; i++){
initvalu_temp[i] = initvalu[i] + h * ( b31*finavalu_temp[0][i] + b32*finavalu_temp[1][i]);
}
master( timeinst_temp,
initvalu_temp,
parameter,
finavalu_temp[2],
com,
d_initvalu,
d_finavalu,
d_params,
d_com);
//===================================================================================================
// 4
//===================================================================================================
timeinst_temp = timeinst+a4*h;
for(i=0; i<EQUATIONS; i++){
initvalu_temp[i] = initvalu[i] + h * ( b41*finavalu_temp[0][i] + b43*finavalu_temp[2][i]) ;
}
master( timeinst_temp,
initvalu_temp,
parameter,
finavalu_temp[3],
com,
d_initvalu,
d_finavalu,
d_params,
d_com);
//===================================================================================================
// 5
//===================================================================================================
timeinst_temp = timeinst+a5*h;
for(i=0; i<EQUATIONS; i++){
initvalu_temp[i] = initvalu[i] + h * ( b51*finavalu_temp[0][i] + b53*finavalu_temp[2][i] + b54*finavalu_temp[3][i]) ;
}
master( timeinst_temp,
initvalu_temp,
parameter,
finavalu_temp[4],
com,
d_initvalu,
d_finavalu,
d_params,
d_com);
//===================================================================================================
// 6
//===================================================================================================
timeinst_temp = timeinst+a6*h;
for(i=0; i<EQUATIONS; i++){
initvalu_temp[i] = initvalu[i] + h * ( b61*finavalu_temp[0][i] + b64*finavalu_temp[3][i] + b65*finavalu_temp[4][i]) ;
}
master( timeinst_temp,
initvalu_temp,
parameter,
finavalu_temp[5],
com,
d_initvalu,
d_finavalu,
d_params,
d_com);
//===================================================================================================
// 7
//===================================================================================================
timeinst_temp = timeinst+a7*h;
for(i=0; i<EQUATIONS; i++){
initvalu_temp[i] = initvalu[i] + h * ( b71*finavalu_temp[0][i] + b74*finavalu_temp[3][i] + b75*finavalu_temp[4][i] + b76*finavalu_temp[5][i]);
}
master( timeinst_temp,
initvalu_temp,
parameter,
finavalu_temp[6],
com,
d_initvalu,
d_finavalu,
d_params,
d_com);
//===================================================================================================
// 8
//===================================================================================================
timeinst_temp = timeinst+a8*h;
for(i=0; i<EQUATIONS; i++){
initvalu_temp[i] = initvalu[i] + h * ( b81*finavalu_temp[0][i] + b85*finavalu_temp[4][i] + b86*finavalu_temp[5][i] + b87*finavalu_temp[6][i]);
}
master( timeinst_temp,
initvalu_temp,
parameter,
finavalu_temp[7],
com,
d_initvalu,
d_finavalu,
d_params,
d_com);
//===================================================================================================
// 9
//===================================================================================================
timeinst_temp = timeinst+a9*h;
for(i=0; i<EQUATIONS; i++){
initvalu_temp[i] = initvalu[i] + h * ( b91*finavalu_temp[0][i] + b94*finavalu_temp[3][i] + b95*finavalu_temp[4][i] + b96*finavalu_temp[5][i] + b97*finavalu_temp[6][i]+ b98*finavalu_temp[7][i]) ;
}
master( timeinst_temp,
initvalu_temp,
parameter,
finavalu_temp[8],
com,
d_initvalu,
d_finavalu,
d_params,
d_com);
//===================================================================================================
// 10
//===================================================================================================
timeinst_temp = timeinst+a10*h;
for(i=0; i<EQUATIONS; i++){
initvalu_temp[i] = initvalu[i] + h * ( b10_1*finavalu_temp[0][i] + b10_4*finavalu_temp[3][i] + b10_5*finavalu_temp[4][i] + b10_6*finavalu_temp[5][i] + b10_7*finavalu_temp[6][i] + b10_8*finavalu_temp[7][i] + b10_9*finavalu_temp[8] [i]) ;
}
master( timeinst_temp,
initvalu_temp,
parameter,
finavalu_temp[9],
com,
d_initvalu,
d_finavalu,
d_params,
d_com);
//===================================================================================================
// 11
//===================================================================================================
timeinst_temp = timeinst+h;
for(i=0; i<EQUATIONS; i++){
initvalu_temp[i] = initvalu[i] + h * ( b11_1*finavalu_temp[0][i] + b11_4*finavalu_temp[3][i] + b11_5*finavalu_temp[4][i] + b11_6*finavalu_temp[5][i] + b11_7*finavalu_temp[6][i] + b11_8*finavalu_temp[7][i] + b11_9*finavalu_temp[8][i]+ b11_10 * finavalu_temp[9][i]);
}
master( timeinst_temp,
initvalu_temp,
parameter,
finavalu_temp[10],
com,
d_initvalu,
d_finavalu,
d_params,
d_com);
//===================================================================================================
// 12
//===================================================================================================
timeinst_temp = timeinst;
for(i=0; i<EQUATIONS; i++){
initvalu_temp[i] = initvalu[i] + h * ( b12_1*finavalu_temp[0][i] + b12_6*finavalu_temp[5][i] + b12_7*finavalu_temp[6][i] + b12_8*finavalu_temp[7][i] + b12_9*finavalu_temp[8][i] + b12_10 * finavalu_temp[9][i]) ;
}
master( timeinst_temp,
initvalu_temp,
parameter,
finavalu_temp[11],
com,
d_initvalu,
d_finavalu,
d_params,
d_com);
//===================================================================================================
// 13
//===================================================================================================
timeinst_temp = timeinst+h;
for(i=0; i<EQUATIONS; i++){
initvalu_temp[i] = initvalu[i] + h * ( b13_1*finavalu_temp[0][i] + b13_4*finavalu_temp[3][i] + b13_5*finavalu_temp[4][i] + b13_6*finavalu_temp[5][i] + b13_7*finavalu_temp[6][i] + b13_8*finavalu_temp[7][i] + b13_9*finavalu_temp[8][i] + b13_10*finavalu_temp[9][i] + finavalu_temp[11][i]) ;
}
master( timeinst_temp,
initvalu_temp,
parameter,
finavalu_temp[12],
com,
d_initvalu,
d_finavalu,
d_params,
d_com);
//======================================================================================================================================================
// FINAL VALUE
//======================================================================================================================================================
for(i=0; i<EQUATIONS; i++){
finavalu[i]= initvalu[i] + h * (c_1_11 * (finavalu_temp[0][i] + finavalu_temp[10][i]) + c6 * finavalu_temp[5][i] + c_7_8 * (finavalu_temp[6][i] + finavalu_temp[7][i]) + c_9_10 * (finavalu_temp[8][i] + finavalu_temp[9][i]) );
}
//======================================================================================================================================================
// RETURN
//======================================================================================================================================================
for(i=0; i<EQUATIONS; i++){
error[i] = fabs(err_factor * (finavalu_temp[0][i] + finavalu_temp[10][i] - finavalu_temp[11][i] - finavalu_temp[12][i]));
}
//======================================================================================================================================================
// DEALLOCATION
//======================================================================================================================================================
free(initvalu_temp);
free(finavalu_temp);
}
|
the_stack
|
#include "data.hpp"
#include <cassert>
#include <cstdlib>
#include <cmath>
#include <time.h>
#ifdef _MSC_VER
#include "windows.h"
#else
#include <sys/time.h>
#endif
#ifndef NDEBUG
#include <iostream>
#endif
#if ENABLE_GPU
#include "datacu.hpp"
#endif
using namespace vl ;
/* -------------------------------------------------------------------
* Helpers
* ---------------------------------------------------------------- */
int vl::gcd(int a, int b, int& u, int& v)
{
assert(a >= 0) ;
assert(b >= 0) ;
int u_ = 0 ;
int v_ = 1 ;
u = 1 ;
v = 0 ;
while (b > 0) {
int tmp ;
int q = a / b ;
tmp = b ;
b = a - q*b ;
a = tmp ;
tmp = u_ ;
u_ = u - q*u_ ;
u = tmp ;
tmp = v_ ;
v_ = v - q*v_ ;
v = tmp ;
}
return a ;
}
size_t vl::getTime()
{
#ifdef _MSC_VER
LARGE_INTEGER t ;
QueryPerformanceFrequency(&t) ;
return (size_t)(t.QuadPart / 1000ULL) ;
#else
struct timeval time ;
int error = gettimeofday(&time, NULL) ;
assert(error == 0) ;
return (size_t)time.tv_sec * 1000000 + (size_t)time.tv_usec ;
#endif
}
const char *
vl::getErrorMessage(ErrorCode error)
{
static char const * messages[] = {
"success",
"unsupported feature error",
"CUDA error",
"cuDNN error",
"cuBLAS error",
"out of memory error",
"out of GPU memory error",
"unknown error",
"timeout",
"no data",
"illegal message",
"interrupted"
} ;
if (error < VLE_Success || error > VLE_Unknown) {
error = VLE_Unknown ;
}
return messages[error] ;
}
static int
getTypeSize(DataType dataType)
{
switch (dataType) {
case VLDT_Char : return sizeof(char) ;
case VLDT_Float : return sizeof(float) ;
case VLDT_Double : return sizeof(double) ;
default: abort() ;
}
return 0 ;
}
namespace vl { namespace impl {
class Randn
{
public:
Randn()
{
tx[1] = 3.655420419026953 ;
ty[1] = f(tx[1]) ;
double A = ty[1] * (tx[1] + 1./tx[1]) ;
for (int k = 1 ; k < K ; ++k) {
ty[k] = f(tx[k]) ;
tx[k+1] = invf(A/tx[k] + ty[k]) ;
}
tx[0] = A / ty[1] ;
}
double sample() {
while (true) {
int k = rand() % K ;
double u = (double)rand() / RAND_MAX ;
double x = tx[k] * (2.*u - 1.) ;
double absx = fabs(x) ;
if (absx <= tx[k+1]) { return x ; }
double v = (double)rand() / RAND_MAX ;
if (k > 1) {
double y = v * (ty[k+1] - ty[k]) + ty[k] ;
if (y <= f(absx)) { return x ; }
} else {
double w = (double)rand() / RAND_MAX ;
double dx = - log(w) / tx[1] ;
if (- 2.0 * log(v) > dx*dx) {
double s = (rand() & 0x1) ? +1 : -1 ;
return s * (tx[1] + dx) ;
}
}
}
}
private:
enum { K = 256, } ;
double tx [K+1] ;
double ty [K+1] ;
double f(double x) {
// 1/sqrt(2*pi)
return 0.398942280401433 * exp(-0.5 * x * x) ;
}
double invf(double y) {
// sqrt(2*pi)
return sqrt(-2.0 * log(2.506628274631000 * y));
}
} ;
} } // namespaces
double vl::randn()
{
static vl::impl::Randn rnd ;
return rnd.sample() ;
}
/* -------------------------------------------------------------------
* Buffer
* ---------------------------------------------------------------- */
vl::impl::Buffer::Buffer()
:
deviceType(vl::VLDT_CPU), dataType(VLDT_Char),
size(0), memory(NULL), numReallocations(0)
{ }
void*
vl::impl::Buffer::getMemory()
{
return memory ;
}
int
vl::impl::Buffer::getNumReallocations() const
{
return numReallocations ;
}
vl::ErrorCode
vl::impl::Buffer::init(DeviceType deviceType_, DataType dataType_, size_t size_)
{
bool ok =
(deviceType == deviceType_) &
(dataType == dataType_) &
(size >= size_) ;
if (ok) { return vl::VLE_Success ; }
clear() ;
void * memory_ = NULL ;
size_t sizeInBytes = getTypeSize(dataType_) * size_ ;
switch (deviceType_) {
case vl::VLDT_CPU:
memory_ = malloc(sizeInBytes) ;
if (memory_ == NULL) { return vl::VLE_OutOfMemory ; }
break ;
case vl::VLDT_GPU:
#if ENABLE_GPU
cudaError_t error = cudaMalloc(&memory_, sizeInBytes) ;
if (error != cudaSuccess) { return vl::VLE_OutOfMemory ; }
break ;
#else
abort() ;
#endif
}
deviceType = deviceType_ ;
dataType = dataType_ ;
size = size_ ;
memory = memory_ ;
numReallocations ++ ;
return vl::VLE_Success ;
}
void
vl::impl::Buffer::clear()
{
if (memory != NULL) {
switch (deviceType) {
case vl::VLDT_CPU:
free(memory) ;
break ;
case vl::VLDT_GPU:
#if ENABLE_GPU
cudaFree(memory) ;
break ;
#else
abort() ;
#endif
}
}
deviceType = vl::VLDT_CPU ;
dataType= VLDT_Char ;
size = 0 ;
memory = NULL ;
}
void
vl::impl::Buffer::invalidateGpu()
{
if (deviceType == vl::VLDT_GPU) {
memory = NULL ;
clear() ;
}
}
/* -------------------------------------------------------------------
* Context
* ---------------------------------------------------------------- */
vl::Context::Context()
:
lastError(vl::VLE_Success), lastErrorMessage(), cudaHelper(NULL)
{ }
vl::CudaHelper &
vl::Context::getCudaHelper()
{
#ifdef ENABLE_GPU
if (!cudaHelper) {
cudaHelper = new CudaHelper() ;
}
#else
abort() ;
#endif
return *cudaHelper ;
}
void vl::Context::clear()
{
#ifndef NDEBUG
std::cout<<"Context::clear()"<<std::endl ;
#endif
clearWorkspace(VLDT_CPU) ;
clearAllOnes(VLDT_CPU) ;
#if ENABLE_GPU
clearWorkspace(VLDT_GPU) ;
clearAllOnes(VLDT_GPU) ;
if (cudaHelper) {
delete cudaHelper ;
cudaHelper = NULL ;
}
#endif
}
void
vl::Context::invalidateGpu()
{
#if ENABLE_GPU
workspace[vl::VLDT_GPU].invalidateGpu() ;
allOnes[vl::VLDT_GPU].invalidateGpu() ;
getCudaHelper().invalidateGpu() ;
#endif
}
vl::Context::~Context()
{
clear() ;
#ifndef NDEBUG
std::cout<<"Context::~Context()"<<std::endl ;
#endif
}
/* -------------------------------------------------------------------
* Context errors
* ---------------------------------------------------------------- */
void
vl::Context::resetLastError()
{
lastError = vl::VLE_Success ;
lastErrorMessage = std::string() ;
}
vl::ErrorCode
vl::Context::passError(vl::ErrorCode error, char const* description)
{
if (error != vl::VLE_Success) {
if (description) {
lastErrorMessage = std::string(description) + ": " + lastErrorMessage ;
}
}
return error ;
}
vl::ErrorCode
vl::Context::setError(vl::ErrorCode error, char const* description)
{
if (error != vl::VLE_Success ) {
lastError = error ;
std::string message = getErrorMessage(error) ;
if (description) {
message = std::string(description) + " [" + message + "]" ;
}
#if ENABLE_GPU
if (error == vl::VLE_Cuda) {
std::string cudaMessage = getCudaHelper().getLastCudaErrorMessage() ;
if (cudaMessage.length() > 0) {
message += " [cuda: " + cudaMessage + "]" ;
}
}
if (error == vl::VLE_Cublas) {
std::string cublasMessage = getCudaHelper().getLastCublasErrorMessage() ;
if (cublasMessage.length() > 0) {
message += " [cublas:" + cublasMessage + "]" ;
}
}
#endif
#if ENABLE_CUDNN
if (error == vl::VLE_Cudnn) {
std::string cudnnMessage = getCudaHelper().getLastCudnnErrorMessage() ;
if (cudnnMessage.length() > 0) {
message += " [cudnn: " + cudnnMessage + "]" ;
}
}
#endif
lastErrorMessage = message ;
}
return error ;
}
vl::ErrorCode
vl::Context::getLastError() const
{
return lastError ;
}
std::string const&
vl::Context::getLastErrorMessage() const
{
return lastErrorMessage ;
}
/* -------------------------------------------------------------------
* Context workspace
* ---------------------------------------------------------------- */
void *
vl::Context::getWorkspace(DeviceType deviceType, size_t size)
{
vl::ErrorCode error = workspace[deviceType].init(deviceType, VLDT_Char, size) ;
if (error != VLE_Success) {
setError(error, "getWorkspace") ;
return NULL ;
}
return workspace[deviceType].getMemory() ;
}
void
vl::Context::clearWorkspace(DeviceType deviceType)
{
workspace[deviceType].clear() ;
}
/* -------------------------------------------------------------------
* Context allOnes
* ---------------------------------------------------------------- */
#if ENABLE_GPU
template<typename type> __global__ void
setToOnes (type * data, int size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < size) data[index] = type(1.0) ;
}
#endif
void *
vl::Context::getAllOnes(DeviceType deviceType, DataType dataType, size_t size)
{
int n = allOnes[deviceType].getNumReallocations() ;
void * data = NULL ;
// make sure that there is enough space for the buffer
vl::ErrorCode error = allOnes[deviceType].init(deviceType, dataType, size) ;
if (error != VLE_Success) { goto done ; }
data = allOnes[deviceType].getMemory() ;
// detect if a new buffer has been allocated and if so initialise it
if (n < allOnes[deviceType].getNumReallocations()) {
switch (deviceType) {
case vl::VLDT_CPU:
for (int i = 0 ; i < size ; ++i) {
if (dataType == VLDT_Float) {
((float*)data)[i] = 1.0f ;
} else {
((double*)data)[i] = 1.0 ;
}
}
break ;
case vl::VLDT_GPU:
#if ENABLE_GPU
if (dataType == VLDT_Float) {
setToOnes<float>
<<<divideAndRoundUp(size, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS>>>
((float*)data, size) ;
} else {
setToOnes<double>
<<<divideAndRoundUp(size, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS>>>
((double*)data, size) ;
}
error = getCudaHelper().catchCudaError() ;
break ;
#else
abort() ;
return NULL ;
#endif
}
}
done:
if (setError(error, "getAllOnes: ") == vl::VLE_Success) {
return data ;
} else {
return NULL ;
}
}
void
vl::Context::clearAllOnes(DeviceType deviceType)
{
allOnes[deviceType].clear() ;
}
/* -------------------------------------------------------------------
* TensorShape
* ---------------------------------------------------------------- */
vl::TensorShape::TensorShape()
: numDimensions(0)
{ }
vl::TensorShape::TensorShape(TensorShape const & t)
: numDimensions(t.numDimensions)
{
for (unsigned k = 0 ; k < numDimensions ; ++k) {
dimensions[k] = t.dimensions[k] ;
}
}
vl::TensorShape::TensorShape(size_t height, size_t width, size_t depth, size_t size)
: numDimensions(4)
{
dimensions[0] = height ;
dimensions[1] = width ;
dimensions[2] = depth ;
dimensions[3] = size ;
}
void vl::TensorShape::clear()
{
numDimensions = 0 ;
}
void vl::TensorShape::setDimensions(size_t const * newDimensions, size_t newNumDimensions)
{
assert(newNumDimensions <= VL_TENSOR_SHAPE_MAX_NUM_DIMENSIONS) ;
for (int k = 0 ; k < newNumDimensions ; ++k) {
dimensions[k] = newDimensions[k] ;
}
numDimensions = newNumDimensions ;
}
void vl::TensorShape::setDimension(size_t num, size_t dimension)
{
assert(num + 1 <= VL_TENSOR_SHAPE_MAX_NUM_DIMENSIONS) ;
if (num + 1 > numDimensions) {
size_t x = (getNumElements() > 0) ;
for (size_t k = numDimensions ; k < num ; ++k) {
dimensions[k] = x ;
}
numDimensions = num + 1 ;
}
dimensions[num] = dimension ;
}
size_t vl::TensorShape::getDimension(size_t num) const
{
if (num + 1 > numDimensions) {
return 1 ;
}
return dimensions[num] ;
}
size_t vl::TensorShape::getNumDimensions() const
{
return numDimensions ;
}
size_t const * vl::TensorShape::getDimensions() const
{
return dimensions ;
}
size_t vl::TensorShape::getNumElements() const
{
if (numDimensions == 0) {
return 0 ;
}
size_t n = 1 ;
for (unsigned k = 0 ; k < numDimensions ; ++k) { n *= dimensions[k] ; }
return n ;
}
size_t vl::TensorShape::getHeight() const { return getDimension(0) ; }
size_t vl::TensorShape::getWidth() const { return getDimension(1) ; }
size_t vl::TensorShape::getDepth() const { return getDimension(2) ; }
size_t vl::TensorShape::getSize() const { return getDimension(3) ; }
void vl::TensorShape::setHeight(size_t x) { setDimension(0,x) ; }
void vl::TensorShape::setWidth(size_t x) { setDimension(1,x) ; }
void vl::TensorShape::setDepth(size_t x) { setDimension(2,x) ; }
void vl::TensorShape::setSize(size_t x) { setDimension(3,x) ; }
bool vl::TensorShape::isEmpty() const { return getNumElements() == 0 ; }
bool vl::operator== (vl::TensorShape const & a, vl::TensorShape const & b)
{
size_t n = a.getNumDimensions() ;
if (b.getNumDimensions() != n) { return false ; }
size_t const * adims = a.getDimensions() ;
size_t const * bdims = b.getDimensions() ;
for (unsigned k =0 ; k < a.getNumDimensions() ; ++k) {
if (adims[k] != bdims[k]) { return false ; }
}
return true ;
}
void vl::TensorShape::reshape(size_t newNumDimensions)
{
assert(newNumDimensions <= VL_TENSOR_SHAPE_MAX_NUM_DIMENSIONS) ;
size_t n = getNumElements() ;
if (newNumDimensions > 0) {
setDimension(newNumDimensions - 1, 1) ;
numDimensions = newNumDimensions ;
size_t m = getNumElements() ;
if (m) {
dimensions[newNumDimensions - 1] *= (n / m) ;
} else if (n == 0) {
dimensions[newNumDimensions - 1] = 0 ;
}
} else {
numDimensions = newNumDimensions ;
}
}
void vl::TensorShape::reshape(TensorShape const & newShape)
{
operator=(newShape) ;
}
/* -------------------------------------------------------------------
* Tensor
* ---------------------------------------------------------------- */
vl::Tensor::Tensor()
: TensorShape(), dataType(VLDT_Float),
deviceType(VLDT_CPU), memory(NULL), memorySize(0)
{ }
vl::Tensor::Tensor(TensorShape const & shape, DataType dataType,
DeviceType deviceType, void * memory, size_t memorySize)
: TensorShape(shape),
dataType(dataType),
deviceType(deviceType),
memory(memory), memorySize(memorySize)
{ }
TensorShape vl::Tensor::getShape() const
{
return TensorShape(*this) ;
}
vl::DataType vl::Tensor::getDataType() const { return dataType ; }
void * vl::Tensor::getMemory() { return memory ; }
void vl::Tensor::setMemory(void * x) { memory = x ; }
vl::DeviceType vl::Tensor::getDeviceType() const { return deviceType ; }
bool vl::Tensor::isNull() const { return memory == NULL ; }
vl::Tensor::operator bool() const { return !isNull() ; }
|
the_stack
|
#include "roipooling.hpp"
#include "../datacu.hpp"
#include <assert.h>
#include <cfloat>
#include <algorithm>
#include <sm_20_atomic_functions.h>
/* ---------------------------------------------------------------- */
/* Helpers */
/* ---------------------------------------------------------------- */
template<typename T>
struct Geom {
int subdivisions[2] ;
T transform[6] ;
Geom(int const subdivisions[2], double const transform[6])
{
this->subdivisions[0] = subdivisions[0] ;
this->subdivisions[1] = subdivisions[1] ;
this->transform[0] = transform[0] ;
this->transform[1] = transform[1] ;
this->transform[2] = transform[2] ;
this->transform[3] = transform[3] ;
this->transform[4] = transform[4] ;
this->transform[5] = transform[5] ;
}
} ;
struct Bounds {
int image, offset, hstart, hend, wstart, wend ;
bool isEmpty ;
} ;
template<typename T>
__device__ __forceinline__ static Bounds
getBounds(int outputIndex,
int height, int width, int numChannels, int size,
const T* rois, int numROIs,
Geom<T> geom)
{
Bounds b ;
int ph = outputIndex ;
int pw = ph / geom.subdivisions[0] ;
int pc = pw / geom.subdivisions[1] ;
int pr = pc / numChannels ;
ph %= geom.subdivisions[0] ;
pw %= geom.subdivisions[1] ;
pc %= numChannels ;
rois += 5 * pr ;
// Apply sacle and offset to each ROI coordinate.
T u1_ = rois[1] ;
T v1_ = rois[2] ;
T u2_ = rois[3] ;
T v2_ = rois[4] ;
T u1 = geom.transform[0] * u1_ + geom.transform[2] * v1_ + geom.transform[4] ;
T v1 = geom.transform[1] * u1_ + geom.transform[3] * v1_ + geom.transform[5] ;
T u2 = geom.transform[0] * u2_ + geom.transform[2] * v2_ + geom.transform[4] ;
T v2 = geom.transform[1] * u2_ + geom.transform[3] * v2_ + geom.transform[5] ;
// First and last pixel of each ROI (rounded
// for compatibility with the Caffe definition).
int roi_image = (int)rois[0];
int roi_start_h = (int)round(v1) - 1 ;
int roi_start_w = (int)round(u1) - 1 ;
int roi_end_h = (int)round(v2) - 1 ;
int roi_end_w = (int)round(u2) - 1 ;
int roi_height = max(roi_end_h - roi_start_h + 1, 1) ;
int roi_width = max(roi_end_w - roi_start_w + 1, 1) ;
T bin_size_h = (T)roi_height / geom.subdivisions[0] ;
T bin_size_w = (T)roi_width / geom.subdivisions[1] ;
roi_image = min(max(roi_image - 1,0), (int)size - 1) ;
b.offset = (roi_image * numChannels + pc) * (width*height) ;
b.wstart = (int)floor(((T)pw) * bin_size_w) ;
b.wend = (int)ceil(((T)(pw + 1)) * bin_size_w) ;
b.wstart = min(max(b.wstart + roi_start_w, 0), (int)width) ;
b.wend = min(max(b.wend + roi_start_w, 0), (int)width) ;
b.hstart = (int)floor(((T)ph) * bin_size_h) ;
b.hend = (int)ceil(((T)(ph + 1)) * bin_size_h) ;
b.hstart = min(max(b.hstart + roi_start_h, 0), (int)height) ;
b.hend = min(max(b.hend + roi_start_h, 0), (int)height) ;
b.isEmpty = (b.hend <= b.hstart) || (b.wend <= b.wstart) ;
return b ;
}
/* ---------------------------------------------------------------- */
/* roipooling_average_forward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
roipooling_average_kernel
(T* output,
const T* data, int height, int width, int numChannels, int size,
const T* rois, int numROIs,
Geom<T> geom)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x;
int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs;
if (outputIndex < outputVolume) {
Bounds b = getBounds<T>(outputIndex,
height,width,numChannels,size,
rois,numROIs,
geom) ;
data += b.offset ;
T bestValue = 0;
const T coeff = ((T)1.) / (T)((b.wend-b.wstart) * (b.hend-b.hstart));
for (int w = b.wstart; w < b.wend; ++w) {
for (int h = b.hstart; h < b.hend; ++h) {
int index = w * height + h ;
bestValue += data[index] * coeff ;
}
}
output[outputIndex] = bestValue ;
}
}
/* ---------------------------------------------------------------- */
/* roipooling_max_forward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
roipooling_max_kernel
(T* output,
const T* data, int height, int width, int numChannels, int size,
const T* rois, int numROIs,
Geom<T> geom)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x ;
int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs ;
if (outputIndex < outputVolume) {
Bounds b = getBounds<T>(outputIndex,
height,width,numChannels,size,
rois,numROIs,
geom) ;
data += b.offset ;
if (! b.isEmpty) {
T bestValue = -FLT_MAX;
for (int w = b.wstart; w < b.wend; ++w) {
for (int h = b.hstart; h < b.hend; ++h) {
int index = w * height + h ;
bestValue = max(bestValue, data[index]) ;
}
}
output[outputIndex] = bestValue ;
} else {
output[outputIndex] = 0 ;
}
}
}
/* ---------------------------------------------------------------- */
/* atomicAdd */
/* ---------------------------------------------------------------- */
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
// an implementation of atomicAdd() for double (really slow)
static __device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
/* ---------------------------------------------------------------- */
/* roipooling_average_backward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
roipooling_average_backward_kernel
(T* derData,
const T* data, int height, int width, int numChannels, int size,
const T* rois, int numROIs,
const T* derOutput,
Geom<T> geom)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x;
int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs ;
if (outputIndex < outputVolume) {
Bounds b = getBounds<T>(outputIndex,
height,width,numChannels,size,
rois,numROIs,
geom) ;
data += b.offset ;
derData += b.offset ;
const T coeff = ((T)1.) / (T)((b.wend-b.wstart)*(b.hend-b.hstart)) ;
for (int h = b.hstart; h < b.hend; ++h) {
for (int w = b.wstart; w < b.wend; ++w) {
int index = w * height + h ;
atomicAdd(derData + index, derOutput[outputIndex] * coeff) ;
}
}
}
}
/* ---------------------------------------------------------------- */
/* roipooling_max_backward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
roipooling_max_backward_kernel
(T* derData,
const T* data, int height, int width, int numChannels, int size,
const T* rois, int numROIs,
const T* derOutput,
Geom<T> geom)
{
int outputIndex = threadIdx.x + blockIdx.x * blockDim.x;
int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs;
if (outputIndex < outputVolume) {
Bounds b = getBounds<T>(outputIndex,
height,width,numChannels,size,
rois,numROIs,
geom) ;
if (! b.isEmpty) {
data += b.offset ;
derData += b.offset ;
int bestIndex = min(b.wstart,width-1) * height + min(b.hstart,height-1);
T bestValue = -FLT_MAX;
for (int h = b.hstart; h < b.hend; ++h) {
for (int w = b.wstart; w < b.wend; ++w) {
int index = w * height + h ;
T value = data[index] ;
if (value > bestValue) {
bestValue = value ;
bestIndex = index ;
}
}
}
atomicAdd(derData + bestIndex, derOutput[outputIndex]) ;
}
}
}
/* ---------------------------------------------------------------- */
/* Interface */
/* ---------------------------------------------------------------- */
namespace vl { namespace impl {
template <typename type>
struct roipooling_max<vl::VLDT_GPU, type>
{
static vl::ErrorCode
forward(type* output,
type const* data,
size_t height, size_t width, size_t numChannels, size_t size,
type const* rois,
size_t numROIs,
int const subdivisions[2],
double const transform[6])
{
int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ;
roipooling_max_kernel<type>
<<< divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS),VL_CUDA_NUM_THREADS >>>
(output,
data, height, width, numChannels, size,
rois, numROIs,
Geom<type>(subdivisions,transform)) ;
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
static vl::ErrorCode
backward(type* derData,
type const* data,
size_t height, size_t width, size_t numChannels, size_t size,
type const* rois,
size_t numROIs,
type const* derOutput,
int const subdivisions[2],
double const transform[6])
{
int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ;
roipooling_max_backward_kernel<type>
<<< divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(derData, data,
height, width, numChannels, size,
rois, numROIs,
derOutput,
Geom<type>(subdivisions,transform)) ;
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
} ; // roipooling_max
template <typename type>
struct roipooling_average<vl::VLDT_GPU, type>
{
static vl::ErrorCode
forward(type* output,
type const* data,
size_t height, size_t width, size_t numChannels, size_t size,
type const* rois,
size_t numROIs,
int const subdivisions[2],
double const transform[6])
{
int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ;
roipooling_average_kernel<type>
<<< divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS),VL_CUDA_NUM_THREADS >>>
(output, data,
height, width, numChannels, size,
rois, numROIs,
Geom<type>(subdivisions,transform)) ;
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
static vl::ErrorCode
backward(type* derData,
type const* data, // <- this is not needed for avg pooling
size_t height, size_t width, size_t numChannels, size_t size,
type const* rois,
size_t numROIs,
type const* derOutput,
int const subdivisions[2],
double const transform[6])
{
int outputVolume = subdivisions[0] * subdivisions[1] * numChannels * numROIs ;
roipooling_average_backward_kernel<type>
<<< divideAndRoundUp(outputVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(derData, data,
height, width, numChannels, size,
rois, numROIs,
derOutput,
Geom<type>(subdivisions,transform)) ;
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
} ; // roipooling_average
} } ; // namespace vl::impl
// Instantiations
template struct vl::impl::roipooling_max<vl::VLDT_GPU, float> ;
template struct vl::impl::roipooling_average<vl::VLDT_GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::roipooling_max<vl::VLDT_GPU, double> ;
template struct vl::impl::roipooling_average<vl::VLDT_GPU, double> ;
#endif
|
the_stack
|
#include <iostream>
#include <chrono>
#include <algorithm>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
#include "Common.cuh"
#include "StochasticLut.cuh"
// -------------------------------------------------
// Forward
// -------------------------------------------------
// real type
template<int N=6, typename T=float, int MAX_FRAME_UNIT=32, int MAX_NODE_UNIT=32>
__global__ void kernal_DifferentiableLut_ForwardTraining
(
T const *x_buf,
T *y_buf,
int const *input_index,
T const *W_buf,
T *mean_buf,
T *rstd_buf,
T *running_mean_buf,
T *running_var_buf,
T gamma,
T beta,
T momentum,
T unbinarize_bias,
T reciprocal_frame_size,
int node_size,
int frame_size,
int frame_stride,
int lut_binarize,
int binary_mode
)
{
int node_id = threadIdx.y;
int node = blockIdx.y * blockDim.y + threadIdx.y;
int id = threadIdx.x;
int id_step = blockDim.x;
__shared__ T sbuf[MAX_NODE_UNIT][MAX_FRAME_UNIT];
__shared__ T W[(1 << N)][MAX_NODE_UNIT];
T const *x_ptr[N];
T *y_ptr;
if ( node < node_size ) {
// read W
for ( int i = id; i < (1 << N); i += id_step ) {
W[i][node_id] = W_buf[node * (1 << N) + i];
if ( lut_binarize ) {
W[i][node_id] = W[i][node_id] > (T)0.5 ? (T)1.0 : (T)0.0;
}
}
// read input index
for ( int i = 0; i < N; ++i ) {
x_ptr[i] = &x_buf[frame_stride * input_index[N*node + i]];
}
y_ptr = &y_buf[node * frame_stride];
}
__syncthreads();
// 平均と分散計測
T s1 = 0, c1 = 0, y1, t1;
T s2 = 0, c2 = 0, y2, t2;
for (int frame = id; frame < frame_size; frame += id_step) {
if ( node < node_size ) {
// Forward計算
T x[N];
if ( binary_mode ) {
for ( int i = 0; i < N; ++i) {
x[i] = (T)0.5 + ((x_ptr[i][frame] > (T)0.5) ? +unbinarize_bias : -unbinarize_bias);
}
}
else {
for ( int i = 0; i < N; ++i) {
x[i] = max(0.0, min((T)1.0, x_ptr[i][frame]));
}
}
T y = StochasticLut<N, T, MAX_NODE_UNIT>::NodeForward(node_id, x, W);
// 集計
y1 = y - c1;
t1 = s1 + y1;
c1 = (t1 - s1) - y1;
s1 = t1;
y2 = (y * y) - c2;
t2 = s2 + y2;
c2 = (t2 - s2) - y2;
s2 = t2;
}
}
s1 = device_fp32_LocalSum(s1, sbuf[node_id]);
s2 = device_fp32_LocalSum(s2, sbuf[node_id]);
T mean = s1 * reciprocal_frame_size;
T var = max(1.0e-5f, (s2 * reciprocal_frame_size) - (mean * mean));
T rstd = rsqrt(var);
// 書き込み
if (id == 0) {
if ( node < node_size ) {
running_mean_buf[node] = running_mean_buf[node] * momentum + mean * ((T)1.0 - momentum);
running_var_buf[node] = running_var_buf[node] * momentum + var * ((T)1.0 - momentum);
mean_buf[node] = mean;
rstd_buf[node] = rstd;
}
}
// 正規化
for ( int frame = id; frame < frame_size; frame += id_step) {
if ( node < node_size ) {
// Forward計算
T x[N];
if ( binary_mode ) {
for ( int i = 0; i < N; ++i) {
x[i] = 0.5 + ((x_ptr[i][frame] > (T)0.5) ? +unbinarize_bias : -unbinarize_bias);
}
}
else {
for ( int i = 0; i < N; ++i) {
x[i] = max(0.0, min(1.0, x_ptr[i][frame]));
}
}
T y = StochasticLut<N, T, MAX_NODE_UNIT>::NodeForward(node_id, x, W);
y = (y - mean) * rstd;
y = y * gamma + beta;
if ( binary_mode ) {
// binarize
y = (y > (T)0.5) ? (T)1.0 : (T)0.0;
}
else {
// hard-tanh
y = min(y, (T)1.0);
y = max(y, (T)0.0);
}
y_ptr[frame] = y;
}
}
}
template <int N>
BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_ForwardTraining
(
float const *dev_x_buf,
float *dev_y_buf,
int const *dev_input_index,
float const *dev_W,
float *dev_mean_buf,
float *dev_rstd_buf,
float *dev_running_mean_buf,
float *dev_running_var_buf,
float gamma,
float beta,
float momentum,
float unbinarize_bias,
int node_size,
int frame_size,
int frame_stride,
int lut_binarize,
int binary_mode,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
unsigned int const THREAD_SIZE = 256;
unsigned int const MAX_FRAME_UNIT = 256;
unsigned int const MAX_NODE_UNIT = 16;
#if 0
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= frame_size && block.x > 32 ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= frame_size && block.x > 32) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid(1, (node_size + (block.y - 1)) / block.y);
kernal_DifferentiableLut_ForwardTraining<N, float, MAX_FRAME_UNIT, MAX_NODE_UNIT><<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
dev_input_index,
dev_W,
dev_mean_buf,
dev_rstd_buf,
dev_running_mean_buf,
dev_running_var_buf,
gamma,
beta,
momentum,
unbinarize_bias,
1.0f / (float)frame_size,
node_size,
frame_size,
frame_stride,
lut_binarize,
binary_mode
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
// bit packing binary
template<int N=6, typename T=float, int MAX_FRAME_UNIT=32, int MAX_NODE_UNIT=32>
__global__ void kernal_bit_DifferentiableLut_ForwardTraining
(
int const *x_buf,
int *y_buf,
int const *input_index,
T const *W_buf,
T *mean_buf,
T *rstd_buf,
T *running_mean_buf,
T *running_var_buf,
T gamma,
T beta,
T momentum,
T unbinarize_bias,
T reciprocal_frame_size,
int node_size,
int frame_size,
int frame_stride,
int lut_binarize
)
{
int node_id = threadIdx.y;
int node = blockIdx.y * blockDim.y + threadIdx.y;
int id = threadIdx.x;
int id_step = blockDim.x;
__shared__ T sbuf[MAX_NODE_UNIT][MAX_FRAME_UNIT];
__shared__ T W[(1 << N)][MAX_NODE_UNIT];
int const *x_ptr[N];
int *y_ptr;
if ( node < node_size ) {
// read W
for ( int i = id; i < (1 << N); i += id_step ) {
W[i][node_id] = W_buf[node * (1 << N) + i];
if ( lut_binarize ) {
W[i][node_id] = W[i][node_id] > (T)0.5 ? (T)1.0 : (T)0.0;
}
}
// read input index
for ( int i = 0; i < N; ++i ) {
x_ptr[i] = &x_buf[frame_stride * input_index[N*node + i]];
}
y_ptr = &y_buf[node * frame_stride];
}
__syncthreads();
// 平均と分散計測
T s1 = 0, c1 = 0, y1, t1;
T s2 = 0, c2 = 0, y2, t2;
for (int frame = id; frame < frame_size; frame += id_step) {
if ( node < node_size ) {
// Forward計算
int bit = (1 << (frame & 0x1f));
int unit = (frame >> 5);
T x[N];
for ( int i = 0; i < N; ++i) {
x[i] = (T)0.5 + ((x_ptr[i][unit] & bit) ? +unbinarize_bias : -unbinarize_bias);
}
T y = StochasticLut<N, T, MAX_NODE_UNIT>::NodeForward(node_id, x, W);
// printf("[0] n=%3d f=%3d y=%10f\n", node, frame, y);
// 集計
y1 = y - c1;
t1 = s1 + y1;
c1 = (t1 - s1) - y1;
s1 = t1;
y2 = (y * y) - c2;
t2 = s2 + y2;
c2 = (t2 - s2) - y2;
s2 = t2;
}
}
s1 = device_LocalSumX<float>(s1, sbuf[node_id]);
s2 = device_LocalSumX<float>(s2, sbuf[node_id]);
T mean = s1 * reciprocal_frame_size;
T var = max(1.0e-5f, (s2 * reciprocal_frame_size) - (mean * mean));
T rstd = rsqrt(var);
// if ( node < node_size && id == 0 ) {
//// printf("[0] n=%3d s1=%10f s2=%10f mean=%10f var=%10f rstd=%10f\n", node, s1, s2, mean, var, rstd);
// printf("0\t%3d\t%.20e\t%.20e\t%.20e\t%.20e\t%.20e\n", node, s1, s2, mean, var, rstd);
// }
// 書き込み
if (id == 0) {
if ( node < node_size ) {
running_mean_buf[node] = running_mean_buf[node] * momentum + mean * ((T)1.0 - momentum);
running_var_buf[node] = running_var_buf[node] * momentum + var * ((T)1.0 - momentum);
mean_buf[node] = mean;
rstd_buf[node] = rstd;
}
}
// 正規化
int loop_size = ((frame_size + blockDim.x - 1) & ~(blockDim.x - 1));
for ( int frame = id; frame < loop_size; frame += id_step) {
int unit = (frame >> 5);
int bit = (frame & 0x1f);
int bit_mask = (1 << bit);
int y_mask = 0;
if ( node < node_size && frame < frame_size) {
// Forward計算
T x[N];
for ( int i = 0; i < N; ++i) {
x[i] = (T)0.5 + ((x_ptr[i][unit] & bit_mask) ? +unbinarize_bias : -unbinarize_bias);
}
T y = StochasticLut<N, T, MAX_NODE_UNIT>::NodeForward(node_id, x, W);
y = (y - mean) * rstd;
y = y * gamma + beta;
if ( y > (T)0.5 ) {
y_mask = bit_mask;
}
}
y_mask = device_int_ShuffleOr(y_mask);
if ( bit == 0 ) {
if ( node < node_size && frame < frame_size ) {
y_ptr[unit] = y_mask;
}
}
}
}
template <int N>
BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_ForwardTraining
(
int const *dev_x_buf,
int *dev_y_buf,
int const *dev_input_index,
float const *dev_W,
float *dev_mean_buf,
float *dev_rstd_buf,
float *dev_running_mean_buf,
float *dev_running_var_buf,
float gamma,
float beta,
float momentum,
float unbinarize_bias,
int node_size,
int frame_size,
int frame_stride,
int lut_binarize,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
unsigned int const THREAD_SIZE = 256;
unsigned int const MAX_FRAME_UNIT = 256;
unsigned int const MAX_NODE_UNIT = 8; // THREAD_SIZE/32 より小さくすること
#if 0
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= frame_size && block.x > 32 ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= frame_size && block.x > 32) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid(1, (node_size + (block.y - 1)) / block.y);
kernal_bit_DifferentiableLut_ForwardTraining<N, float, MAX_FRAME_UNIT, MAX_NODE_UNIT><<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
dev_input_index,
dev_W,
dev_mean_buf,
dev_rstd_buf,
dev_running_mean_buf,
dev_running_var_buf,
gamma,
beta,
momentum,
unbinarize_bias,
1.0f / (float)frame_size,
node_size,
frame_size,
frame_stride,
lut_binarize
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
// -------------------------------------------------
// Forward Inference
// -------------------------------------------------
// real type
template<int N=6, typename T=float, int MAX_FRAME_UNIT=32, int MAX_NODE_UNIT=32>
__global__ void kernal_DifferentiableLut_ForwardInference
(
T const *x_buf,
T *y_buf,
int const *input_index,
T const *W_buf,
T const *running_mean_buf,
T const *running_var_buf,
T gamma,
T beta,
T unbinarize_bias,
int node_size,
int frame_size,
int frame_stride,
int lut_binarize,
int binary_mode
)
{
int node_id = threadIdx.y;
int node = blockIdx.y * blockDim.y + threadIdx.y;
int id = threadIdx.x;
int id_step = blockDim.x;
__shared__ T W[(1 << N)][MAX_NODE_UNIT];
T const *x_ptr[N];
T *y_ptr;
if ( node < node_size ) {
// read W
for ( int i = id; i < (1 << N); i += id_step ) {
W[i][node_id] = W_buf[node * (1 << N) + i];
if ( lut_binarize ) {
W[i][node_id] = W[i][node_id] > (T)0.5 ? (T)1.0 : (T)0.0;
}
}
// read input index
for ( int i = 0; i < N; ++i ) {
x_ptr[i] = &x_buf[frame_stride * input_index[N * node + i]];
}
y_ptr = &y_buf[node * frame_stride];
}
__syncthreads();
if ( node < node_size ) {
T mean = running_mean_buf[node];
T var = running_var_buf[node];
T rstd = (T)1.0 / (sqrt(var) + (T)1.0e-7);
for ( int frame = id; frame < frame_size; frame += id_step) {
// Forward計算
T x[N];
if ( binary_mode ) {
for ( int i = 0; i < N; ++i) {
x[i] = (T)0.5 + ((x_ptr[i][frame] > (T)0.5) ? +unbinarize_bias : -unbinarize_bias);
}
}
else {
for ( int i = 0; i < N; ++i) {
x[i] = max((T)0.0, min((T)1.0, x_ptr[i][frame]));
}
}
T y = StochasticLut<N, T, MAX_NODE_UNIT>::NodeForward(node_id, x, W);
y = ((y - mean) * rstd) * gamma + beta;
if ( binary_mode ) {
y = (y > (T)0.5) ? (T)1.0 : (T)0.0;
}
y_ptr[frame] = y;
}
}
}
template <int N>
BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_ForwardInference
(
float const *dev_x_buf,
float *dev_y_buf,
int const *dev_input_index,
float const *dev_W,
float const *running_mean_buf,
float const *running_var_buf,
float gamma,
float beta,
float unbinarize_bias,
int node_size,
int frame_size,
int frame_stride,
int lut_binarize,
int binary_mode,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
unsigned int const THREAD_SIZE = 256;
unsigned int const MAX_FRAME_UNIT = 256;
unsigned int const MAX_NODE_UNIT = 8; // THREAD_SIZE/32 より小さくすること
#if 0
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= frame_size && block.x > 32 ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= frame_size && block.x > 32) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid(1, (node_size + (block.y - 1)) / block.y);
kernal_DifferentiableLut_ForwardInference<N, float, MAX_FRAME_UNIT, MAX_NODE_UNIT><<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
dev_input_index,
dev_W,
running_mean_buf,
running_var_buf,
gamma,
beta,
unbinarize_bias,
node_size,
frame_size,
frame_stride,
lut_binarize,
binary_mode
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
// bit packing binary
template<int N=6, typename T=float, int MAX_FRAME_UNIT=32, int MAX_NODE_UNIT=32>
__global__ void kernal_bit_DifferentiableLut_ForwardInference
(
int const *x_buf,
int *y_buf,
int const *input_index,
T const *W_buf,
T const *running_mean_buf,
T const *running_var_buf,
T gamma,
T beta,
T unbinarize_bias,
int node_size,
int frame_size,
int frame_stride,
int lut_binarize
)
{
int node_id = threadIdx.y;
int node = blockIdx.y * blockDim.y + threadIdx.y;
int id = threadIdx.x;
int id_step = blockDim.x;
__shared__ T W[(1 << N)][MAX_NODE_UNIT];
int const *x_ptr[N];
int *y_ptr;
if ( node < node_size ) {
// read W
for ( int i = id; i < (1 << N); i += id_step ) {
W[i][node_id] = W_buf[node * (1 << N) + i];
if ( lut_binarize ) {
W[i][node_id] = W[i][node_id] > (T)0.5 ? (T)1.0 : (T)0.0;
}
}
// read input index
for ( int i = 0; i < N; ++i ) {
x_ptr[i] = &x_buf[frame_stride * input_index[N * node + i]];
}
y_ptr = &y_buf[node * frame_stride];
}
__syncthreads();
if ( node < node_size ) {
T mean = running_mean_buf[node];
T var = running_var_buf[node];
T rstd = (T)1.0 / (sqrt(var) + (T)1.0e-7);
int loop_size = ((frame_size + blockDim.x - 1) & ~(blockDim.x - 1));
for ( int frame = id; frame < loop_size; frame += id_step) {
int unit = (frame >> 5);
int bit = (frame & 0x1f);
int bit_mask = (1 << bit);
int y_mask = 0;
if ( node < node_size && frame < frame_size) {
// Forward計算
T x[N];
for ( int i = 0; i < N; ++i) {
x[i] = (T)0.5 + ((x_ptr[i][unit] & bit_mask) ? +unbinarize_bias : -unbinarize_bias);
}
T y = StochasticLut<N, T, MAX_NODE_UNIT>::NodeForward(node_id, x, W);
y = ((y - mean) * rstd) * gamma + beta;
if ( y > (T)0.5 ) {
y_mask = bit_mask;
}
}
y_mask = device_int_ShuffleOr(y_mask);
if ( bit == 0 ) {
if ( node < node_size && frame < frame_size ) {
y_ptr[unit] = y_mask;
}
}
}
}
}
template <int N>
BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_ForwardInference
(
int const *dev_x_buf,
int *dev_y_buf,
int const *dev_input_index,
float const *dev_W,
float const *running_mean_buf,
float const *running_var_buf,
float gamma,
float beta,
float unbinarize_bias,
int node_size,
int frame_size,
int frame_stride,
int lut_binarize,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
unsigned int const THREAD_SIZE = 256;
unsigned int const MAX_FRAME_UNIT = 256;
unsigned int const MAX_NODE_UNIT = 8; // THREAD_SIZE/32 より小さくすること
#if 0
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= frame_size && block.x > 32 ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= frame_size && block.x > 32) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid(1, (node_size + (block.y - 1)) / block.y);
kernal_bit_DifferentiableLut_ForwardInference<N, float, MAX_FRAME_UNIT, MAX_NODE_UNIT><<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
dev_input_index,
dev_W,
running_mean_buf,
running_var_buf,
gamma,
beta,
unbinarize_bias,
node_size,
frame_size,
frame_stride,
lut_binarize
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
// -------------------------------------------------
// Backward
// -------------------------------------------------
// real type
template<int N=6, typename T=float, int MAX_FRAME_UNIT=256, int MAX_NODE_UNIT=16>
__global__ void kernal_DifferentiableLut_BackwardPhase0
(
T const *x_buf,
T const *dy_buf,
int const *input_index,
T const *W_buf,
T *dW_buf,
T const *mean_buf,
T const *rstd_buf,
T *dmean_buf,
T *dvar_buf,
T gamma,
T beta,
T unbinarize_bias,
T reciprocal_frame_size,
int node_size,
int frame_size,
int frame_stride,
int lut_binarize,
int binary_mode
)
{
int node_id = threadIdx.y;
int node = blockIdx.y * blockDim.y + threadIdx.y;
int id = threadIdx.x;
int id_step = blockDim.x;
__shared__ T sbuf[MAX_NODE_UNIT][MAX_FRAME_UNIT];
__shared__ T W[(1 << N)][MAX_NODE_UNIT];
T const *x_ptr[N];
T const *dy_ptr;
// initialize dW
if ( node < node_size ) {
// read W
for ( int i = id; i < (1 << N); i += id_step ) {
W[i][node_id] = W_buf[node * (1 << N) + i];
if ( lut_binarize ) {
W[i][node_id] = W[i][node_id] > (T)0.5 ? (T)1.0 : (T)0.0;
}
}
// init pointer
for ( int i = 0; i < N; ++i ) {
int input_node = input_index[N*node + i];
x_ptr[i] = &x_buf[input_node * frame_stride];
}
dy_ptr = &dy_buf[node * frame_stride];
}
__syncthreads();
T mean;
T rstd;
if ( node < node_size ) {
mean = mean_buf[node];
rstd = rstd_buf[node];
}
T rstd2 = rstd * rstd;
T dmeanx = 0;
T dstd = 0;
for ( int frame = id; frame < frame_size; frame += id_step ) {
if ( node < node_size ) {
// x を再計算
T x_vec[N];
if ( binary_mode ) {
for ( int i = 0; i < N; ++i) {
x_vec[i] = (T)0.5 +((x_ptr[i][frame] > (T)0.5) ? +unbinarize_bias : -unbinarize_bias);
}
}
else {
for ( int i = 0; i < N; ++i) {
x_vec[i] = max((T)0.0, min((T)1.0, x_ptr[i][frame]));
}
}
T x = StochasticLut<N, T, MAX_NODE_UNIT>::NodeForward(node_id, x_vec, W);
T tanh_x = ((x - mean) * rstd) * gamma + beta;
// hard-tanh
T dy = dy_ptr[frame];
if (tanh_x <= (T)0.0) { dy = (T)0.0; }
if (tanh_x >= (T)1.0) { dy = (T)0.0; }
// BatchNorm
T xc = x - mean;
// T xn = xc * rstd;
T dxn = gamma * dy;
dstd += -(dxn * xc * rstd2);
dmeanx += -(dxn * rstd);
}
}
// block内でX軸で集計
dstd = device_LocalSumX<T>(dstd, sbuf[node_id]);
dmeanx = device_LocalSumX<T>(dmeanx, sbuf[node_id]);
T dvar = dstd * rstd;
T dmean = (dmeanx - (mean * dvar)) * reciprocal_frame_size;
if ( node < node_size ) {
if ( id == 0 ) {
dvar_buf[node] = dvar;
dmean_buf[node] = dmean;
}
}
}
template<int N=6, typename T=float, int MAX_FRAME_UNIT=256, int MAX_NODE_UNIT=16>
__global__ void kernal_DifferentiableLut_BackwardPhase1
(
T const *x_buf,
T const *dy_buf,
T *dx_buf,
int const *input_index,
T const *W_buf,
T *dW_buf,
T const *mean_buf,
T const *rstd_buf,
T const *dmean_buf,
T const *dvar_buf,
T gamma,
T beta,
T unbinarize_bias,
T reciprocal_frame_size,
int node_size,
int frame_size,
int frame_stride,
int dx_frame_stride,
int lut_binarize,
int binary_mode
)
{
int node_id = threadIdx.y;
int node = blockIdx.y * blockDim.y + threadIdx.y;
int id = threadIdx.x;
int id_step = blockDim.x;
__shared__ T sbuf[MAX_NODE_UNIT][MAX_FRAME_UNIT];
__shared__ T dW_prev[(1 << N)][MAX_NODE_UNIT];
__shared__ T W[(1 << N)][MAX_NODE_UNIT];
T dW[(1 << N)];
T const *x_ptr[N];
T const *dy_ptr;
// initialize dW
if ( node < node_size ) {
for ( int i = 0; i < (1 << N); ++i) {
dW[i] = 0;
}
for ( int i = id; i < (1 << N); i += id_step ) {
dW_prev[i][node_id] = dW_buf[node * (1 << N) + i];
}
// read W
for ( int i = id; i < (1 << N); i += id_step ) {
W[i][node_id] = W_buf[node * (1 << N) + i];
if ( lut_binarize ) {
W[i][node_id] = W[i][node_id] > (T)0.5 ? (T)1.0 : (T)0.0;
}
}
// init pointer
for ( int i = 0; i < N; ++i ) {
int input_node = input_index[N*node + i];
x_ptr[i] = &x_buf[input_node * frame_stride];
}
dy_ptr = &dy_buf[node * frame_stride];
}
T mean;
T rstd;
T dmean;
T dvar;
if ( node < node_size ) {
mean = mean_buf[node];
rstd = rstd_buf[node];
dmean = dmean_buf[node];
dvar = dvar_buf[node];
}
for ( int frame = id; frame < frame_size; frame += id_step ) {
if ( node < node_size ) {
// x を再計算
T x_vec[N];
if ( binary_mode ) {
for ( int i = 0; i < N; ++i) {
x_vec[i] = (T)0.5 +((x_ptr[i][frame] > (T)0.5) ? +unbinarize_bias : -unbinarize_bias);
}
}
else {
for ( int i = 0; i < N; ++i) {
x_vec[i] = max((T)0.0, min((T)1.0, x_ptr[i][frame]));
}
}
T x = StochasticLut<N, T, MAX_NODE_UNIT>::NodeForward(node_id, x_vec, W);
T tanh_x = ((x - mean) * rstd) * gamma + beta;
// hard-tanh
T dy = dy_ptr[frame];
if (tanh_x <= (T)0.0) { dy = (T)0.0; }
if (tanh_x >= (T)1.0) { dy = (T)0.0; }
T dxn = dy * gamma;
T dxc = dxn * rstd;
T dx = dxc + dmean + (x * dvar * reciprocal_frame_size);
StochasticLut<N, T, MAX_NODE_UNIT>::NodeBackward(node_id, x_vec, dx, &dx_buf[node*N*dx_frame_stride + frame], W, dW, dx_frame_stride);
}
}
for ( int i = 0; i < (1 << N); ++i ) {
dW[i] = device_LocalSumX<T>(dW[i], sbuf[node_id]);
}
if ( node < node_size ) {
if ( id == 0 ) {
for ( int i = 0; i < (1 << N); ++i) {
dW_buf[node*(1 << N) + i] = dW[i] + dW_prev[i][node_id];
}
}
}
}
template <int N>
BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_Backward
(
float const *dev_x_buf,
float const *dev_dy_buf,
float *dev_dx_buf,
float *dev_dx_tmp,
int const *dev_input_index,
int const *dev_reverse_index,
float const *dev_W,
float *dev_dW,
float const *dev_mean_buf,
float const *dev_rstd_buf,
float *dev_dmean_tmp,
float *dev_dvar_tmp,
float gamma,
float beta,
float unbinarize_bias,
int reverse_index_stride,
int input_node_size,
int output_node_size,
int frame_size,
int frame_stride,
int tmp_frame_size,
int tmp_frame_stride,
int lut_binarize,
int binary_mode,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
{
unsigned int const THREAD_SIZE = 256;
unsigned int const MAX_FRAME_UNIT = 256;
unsigned int const MAX_NODE_UNIT = 16;
#if 0
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= frame_size && frame_size > 32 ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= frame_size && frame_size > 32 ) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid(1, (output_node_size + (block.y - 1)) / block.y);
kernal_DifferentiableLut_BackwardPhase0<N, float, MAX_FRAME_UNIT, MAX_NODE_UNIT><<<grid, block, 0, streamId>>>
(
dev_x_buf,
dev_dy_buf,
dev_input_index,
dev_W,
dev_dW,
dev_mean_buf,
dev_rstd_buf,
dev_dmean_tmp,
dev_dvar_tmp,
gamma,
beta,
unbinarize_bias,
1.0f / frame_size,
output_node_size,
frame_size,
frame_stride,
lut_binarize,
binary_mode
);
BB_CUDA_CHECK_LAST_ERROR();
}
int frame_offset = 0;
do {
int unit_frame_size = frame_size - frame_offset;
if (unit_frame_size > tmp_frame_size) {
unit_frame_size = tmp_frame_size;
}
{
unsigned int const THREAD_SIZE = 256;
unsigned int const MAX_FRAME_UNIT = 256;
unsigned int const MAX_NODE_UNIT = 16;
#if 0
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= unit_frame_size && unit_frame_size > 32 ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= unit_frame_size && unit_frame_size > 32 ) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid(1, (output_node_size + (block.y - 1)) / block.y);
kernal_DifferentiableLut_BackwardPhase1<N, float, MAX_FRAME_UNIT, MAX_NODE_UNIT><<<grid, block, 0, streamId>>>
(
dev_x_buf + frame_offset,
dev_dy_buf + frame_offset,
dev_dx_tmp,
dev_input_index,
dev_W,
dev_dW,
dev_mean_buf,
dev_rstd_buf,
dev_dmean_tmp,
dev_dvar_tmp,
gamma,
beta,
unbinarize_bias,
1.0f / frame_size,
output_node_size,
unit_frame_size,
frame_stride,
tmp_frame_stride,
lut_binarize,
binary_mode
);
BB_CUDA_CHECK_LAST_ERROR();
}
{
unsigned int const THREAD_SIZE = 1024;
unsigned int const MAX_FRAME_UNIT = 1024;
unsigned int const MAX_NODE_UNIT = 1024;
#if 1
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= unit_frame_size ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= input_node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= input_node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= unit_frame_size ) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid((unit_frame_size + (block.x - 1)) / block.x, (input_node_size + (block.y - 1)) / block.y);
kernal_NodeIntegrateWithTable<float><<<grid, block>>>
(
dev_dx_tmp,
dev_dx_buf + frame_offset,
dev_reverse_index,
reverse_index_stride,
input_node_size,
unit_frame_size,
tmp_frame_stride,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
}
frame_offset += unit_frame_size;
} while ( frame_offset < frame_size );
return 0;
}
// bit packing binary
template<int N=6, typename T=float, int MAX_FRAME_UNIT=256, int MAX_NODE_UNIT=16>
__global__ void kernal_bit_DifferentiableLut_BackwardPhase0
(
int const *x_buf,
T const *dy_buf,
int const *input_index,
T const *W_buf,
T *dW_buf,
T const *mean_buf,
T const *rstd_buf,
T *dmean_buf,
T *dvar_buf,
T gamma,
T beta,
T unbinarize_bias,
T reciprocal_frame_size,
int node_size,
int frame_size,
int frame_stride,
int bin_frame_stride,
int lut_binarize
)
{
int node_id = threadIdx.y;
int node = blockIdx.y * blockDim.y + threadIdx.y;
int id = threadIdx.x;
int id_step = blockDim.x;
__shared__ T sbuf[MAX_NODE_UNIT][MAX_FRAME_UNIT];
__shared__ T W[(1 << N)][MAX_NODE_UNIT];
int const *x_ptr[N];
T const *dy_ptr;
// initialize dW
if ( node < node_size ) {
// read W
for ( int i = id; i < (1 << N); i += id_step ) {
W[i][node_id] = W_buf[node * (1 << N) + i];
if ( lut_binarize ) {
W[i][node_id] = W[i][node_id] > (T)0.5 ? (T)1.0 : (T)0.0;
}
}
// init pointer
for ( int i = 0; i < N; ++i ) {
int input_node = input_index[N*node + i];
x_ptr[i] = &x_buf[input_node * bin_frame_stride];
}
dy_ptr = &dy_buf[node * frame_stride];
}
__syncthreads();
T mean;
T rstd;
if ( node < node_size ) {
mean = mean_buf[node];
rstd = rstd_buf[node];
}
T rstd2 = rstd * rstd;
T dmeanx = 0;
T dstd = 0;
int loop_size = ((frame_size + blockDim.x - 1) & ~(blockDim.x - 1));
for ( int frame = id; frame < loop_size; frame += id_step ) {
if ( node < node_size && frame < frame_size ) {
int bit = (1 << (frame & 0x1f));
int unit = (frame >> 5);
// x を再計算
T x_vec[N];
for ( int i = 0; i < N; ++i) {
x_vec[i] = (T)0.5 +((x_ptr[i][unit] & bit) ? +unbinarize_bias : -unbinarize_bias);
}
T x = StochasticLut<N, T, MAX_NODE_UNIT>::NodeForward(node_id, x_vec, W);
T tanh_x = ((x - mean) * rstd) * gamma + beta;
// hard-tanh
T dy = dy_ptr[frame];
if (tanh_x <= (T)0.0) { dy = (T)0.0; }
if (tanh_x >= (T)1.0) { dy = (T)0.0; }
// BatchNorm
T xc = x - mean;
// T xn = xc * rstd;
T dxn = gamma * dy;
dstd += -(dxn * xc * rstd2);
dmeanx += -(dxn * rstd);
}
}
dstd = device_fp32_LocalSum(dstd, sbuf[node_id]);
dmeanx = device_fp32_LocalSum(dmeanx, sbuf[node_id]);
T dvar = dstd * rstd;
T dmean = (dmeanx - (mean * dvar)) * reciprocal_frame_size;
if ( node < node_size ) {
if ( id == 0 ) {
dvar_buf[node] = dvar;
dmean_buf[node] = dmean;
}
}
}
template<int N=6, typename T=float, int MAX_FRAME_UNIT=256, int MAX_NODE_UNIT=16>
__global__ void kernal_bit_DifferentiableLut_BackwardPhase1
(
int const *x_buf,
T const *dy_buf,
T *dx_buf,
int const *input_index,
T const *W_buf,
T *dW_buf,
T const *mean_buf,
T const *rstd_buf,
T const *dmean_buf,
T const *dvar_buf,
T gamma,
T beta,
T unbinarize_bias,
T reciprocal_frame_size,
int node_size,
int frame_size,
int x_frame_stride,
int dy_frame_stride,
int dx_frame_stride,
int lut_binarize
)
{
int node_id = threadIdx.y;
int node = blockIdx.y * blockDim.y + threadIdx.y;
int id = threadIdx.x;
int id_step = blockDim.x;
__shared__ T sbuf[MAX_NODE_UNIT][MAX_FRAME_UNIT];
__shared__ T dW_prev[(1 << N)][MAX_NODE_UNIT];
__shared__ T W[(1 << N)][MAX_NODE_UNIT];
T dW[(1 << N)];
int const *x_ptr[N];
T const *dy_ptr;
// initialize dW
if ( node < node_size ) {
for ( int i = 0; i < (1 << N); ++i) {
dW[i] = 0;
}
for ( int i = id; i < (1 << N); i += id_step ) {
dW_prev[i][node_id] = dW_buf[node * (1 << N) + i];
}
// read W
for ( int i = id; i < (1 << N); i += id_step ) {
W[i][node_id] = W_buf[node * (1 << N) + i];
if ( lut_binarize ) {
W[i][node_id] = W[i][node_id] > (T)0.5 ? (T)1.0 : (T)0.0;
}
}
// init pointer
for ( int i = 0; i < N; ++i ) {
int input_node = input_index[N*node + i];
x_ptr[i] = &x_buf[input_node * x_frame_stride];
}
dy_ptr = &dy_buf[node * dy_frame_stride];
}
T mean;
T rstd;
T dmean;
T dvar;
if ( node < node_size ) {
mean = mean_buf[node];
rstd = rstd_buf[node];
dmean = dmean_buf[node];
dvar = dvar_buf[node];
}
int loop_size = ((frame_size + blockDim.x - 1) & ~(blockDim.x - 1));
for ( int frame = id; frame < loop_size; frame += id_step ) {
if ( node < node_size && frame < frame_size ) {
int bit = (1 << (frame & 0x1f));
int unit = (frame >> 5);
// x を再計算
T x_vec[N];
for ( int i = 0; i < N; ++i) {
x_vec[i] = (T)0.5 + ((x_ptr[i][unit] & bit) ? +unbinarize_bias : -unbinarize_bias);
}
T x = StochasticLut<N, T, MAX_NODE_UNIT>::NodeForward(node_id, x_vec, W);
T tanh_x = ((x - mean) * rstd) * gamma + beta;
// hard-tanh
T dy = dy_ptr[frame];
if (tanh_x <= (T)0.0) { dy = (T)0.0; }
if (tanh_x >= (T)1.0) { dy = (T)0.0; }
T dxn = dy * gamma;
T dxc = dxn * rstd;
T dx = dxc + dmean + (x * dvar * reciprocal_frame_size);
StochasticLut<N, T, MAX_NODE_UNIT>::NodeBackward(node_id, x_vec, dx, &dx_buf[node*N*dx_frame_stride + frame], W, dW, dx_frame_stride);
}
}
for ( int i = 0; i < (1 << N); ++i ) {
dW[i] = device_LocalSumX<T>(dW[i], sbuf[node_id]);
}
if ( node < node_size ) {
if ( id == 0 ) {
for ( int i = 0; i < (1 << N); ++i) {
dW_buf[node*(1 << N) + i] = dW[i] + dW_prev[i][node_id];
}
}
}
}
template <int N>
BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_Backward
(
int const *dev_x_buf,
float const *dev_dy_buf,
float *dev_dx_buf,
float *dev_dx_tmp,
int const *dev_input_index,
int const *dev_reverse_index,
float const *dev_W,
float *dev_dW,
float const *dev_mean_buf,
float const *dev_rstd_buf,
float *dev_dmean_tmp,
float *dev_dvar_tmp,
float gamma,
float beta,
float unbinarize_bias,
int reverse_index_stride,
int input_node_size,
int output_node_size,
int frame_size,
int frame_stride,
int x_frame_stride,
int tmp_frame_size,
int tmp_frame_stride,
int lut_binarize,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
{
unsigned int const THREAD_SIZE = 256;
unsigned int const MAX_FRAME_UNIT = 256;
unsigned int const MAX_NODE_UNIT = 16;
#if 0
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= frame_size && frame_size > 32 ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= frame_size && frame_size > 32 ) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid(1, (output_node_size + (block.y - 1)) / block.y);
kernal_bit_DifferentiableLut_BackwardPhase0<N, float, MAX_FRAME_UNIT, MAX_NODE_UNIT><<<grid, block, 0, streamId>>>
(
dev_x_buf,
dev_dy_buf,
dev_input_index,
dev_W,
dev_dW,
dev_mean_buf,
dev_rstd_buf,
dev_dmean_tmp,
dev_dvar_tmp,
gamma,
beta,
unbinarize_bias,
1.0f / frame_size,
output_node_size,
frame_size,
frame_stride,
x_frame_stride,
lut_binarize
);
BB_CUDA_CHECK_LAST_ERROR();
}
int frame_offset = 0;
do {
int unit_frame_size = frame_size - frame_offset;
if (unit_frame_size > tmp_frame_size) {
unit_frame_size = tmp_frame_size;
}
{
unsigned int const THREAD_SIZE = 256;
unsigned int const MAX_FRAME_UNIT = 256;
unsigned int const MAX_NODE_UNIT = 16;
#if 0
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= unit_frame_size && unit_frame_size > 32 ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= output_node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= unit_frame_size && unit_frame_size > 32 ) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid(1, (output_node_size + (block.y - 1)) / block.y);
kernal_bit_DifferentiableLut_BackwardPhase1<N, float, MAX_FRAME_UNIT, MAX_NODE_UNIT><<<grid, block, 0, streamId>>>
(
dev_x_buf + (frame_offset / 32),
dev_dy_buf + frame_offset,
dev_dx_tmp,
dev_input_index,
dev_W,
dev_dW,
dev_mean_buf,
dev_rstd_buf,
dev_dmean_tmp,
dev_dvar_tmp,
gamma,
beta,
unbinarize_bias,
1.0f / frame_size,
output_node_size,
unit_frame_size,
x_frame_stride,
frame_stride,
tmp_frame_stride,
lut_binarize
);
BB_CUDA_CHECK_LAST_ERROR();
}
{
unsigned int const THREAD_SIZE = 1024;
unsigned int const MAX_FRAME_UNIT = 1024;
unsigned int const MAX_NODE_UNIT = 1024;
#if 1
dim3 block(MAX_FRAME_UNIT, THREAD_SIZE / MAX_FRAME_UNIT);
while ( (int)block.x / 2 >= unit_frame_size ) { block.x /= 2; block.y *= 2; }
while ( (int)block.y / 2 >= input_node_size ) { block.y /= 2; }
#else
dim3 block(THREAD_SIZE / MAX_NODE_UNIT, MAX_NODE_UNIT);
while ( (int)block.y / 2 >= input_node_size ) { block.y /= 2; block.x *= 2;}
while ( (int)block.x / 2 >= unit_frame_size ) { block.x /= 2; }
#endif
block.x = std::min(block.x, MAX_FRAME_UNIT);
block.y = std::min(block.y, MAX_NODE_UNIT);
dim3 grid((unit_frame_size + (block.x - 1)) / block.x, (input_node_size + (block.y - 1)) / block.y);
kernal_NodeIntegrateWithTable<float><<<grid, block>>>
(
dev_dx_tmp,
dev_dx_buf + frame_offset,
dev_reverse_index,
reverse_index_stride,
input_node_size,
unit_frame_size,
tmp_frame_stride,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
}
frame_offset += unit_frame_size;
} while ( frame_offset < frame_size );
return 0;
}
// 実体化
template BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_ForwardTraining<6>(float const *, float *, int const *, float const *, float *, float *, float *, float *, float, float, float, float, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_ForwardTraining<5>(float const *, float *, int const *, float const *, float *, float *, float *, float *, float, float, float, float, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_ForwardTraining<4>(float const *, float *, int const *, float const *, float *, float *, float *, float *, float, float, float, float, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_ForwardTraining<3>(float const *, float *, int const *, float const *, float *, float *, float *, float *, float, float, float, float, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_ForwardTraining<2>(float const *, float *, int const *, float const *, float *, float *, float *, float *, float, float, float, float, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_ForwardTraining<6>(int const *, int *, int const *, float const *, float *, float *, float *, float *, float, float, float, float, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_ForwardTraining<5>(int const *, int *, int const *, float const *, float *, float *, float *, float *, float, float, float, float, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_ForwardTraining<4>(int const *, int *, int const *, float const *, float *, float *, float *, float *, float, float, float, float, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_ForwardTraining<3>(int const *, int *, int const *, float const *, float *, float *, float *, float *, float, float, float, float, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_ForwardTraining<2>(int const *, int *, int const *, float const *, float *, float *, float *, float *, float, float, float, float, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_ForwardInference<6>(float const *, float *, int const *, float const *, float const *, float const *, float, float, float, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_ForwardInference<5>(float const *, float *, int const *, float const *, float const *, float const *, float, float, float, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_ForwardInference<4>(float const *, float *, int const *, float const *, float const *, float const *, float, float, float, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_ForwardInference<3>(float const *, float *, int const *, float const *, float const *, float const *, float, float, float, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_ForwardInference<2>(float const *, float *, int const *, float const *, float const *, float const *, float, float, float, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_ForwardInference<6>(int const *, int *, int const *, float const *, float const *, float const *, float, float, float, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_ForwardInference<5>(int const *, int *, int const *, float const *, float const *, float const *, float, float, float, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_ForwardInference<4>(int const *, int *, int const *, float const *, float const *, float const *, float, float, float, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_ForwardInference<3>(int const *, int *, int const *, float const *, float const *, float const *, float, float, float, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_ForwardInference<2>(int const *, int *, int const *, float const *, float const *, float const *, float, float, float, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_Backward<6>(float const *, float const *, float *, float *, int const *, int const *, float const *, float *, float const *, float const *, float *, float *, float, float, float, int, int, int, int, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_Backward<5>(float const *, float const *, float *, float *, int const *, int const *, float const *, float *, float const *, float const *, float *, float *, float, float, float, int, int, int, int, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_Backward<4>(float const *, float const *, float *, float *, int const *, int const *, float const *, float *, float const *, float const *, float *, float *, float, float, float, int, int, int, int, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_Backward<3>(float const *, float const *, float *, float *, int const *, int const *, float const *, float *, float const *, float const *, float *, float *, float, float, float, int, int, int, int, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_fp32_DifferentiableLutN_Backward<2>(float const *, float const *, float *, float *, int const *, int const *, float const *, float *, float const *, float const *, float *, float *, float, float, float, int, int, int, int, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_Backward<6>(int const *, float const *, float *, float *, int const *, int const *, float const *, float *, float const *, float const *, float *, float *, float, float, float, int, int, int, int, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_Backward<5>(int const *, float const *, float *, float *, int const *, int const *, float const *, float *, float const *, float const *, float *, float *, float, float, float, int, int, int, int, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_Backward<4>(int const *, float const *, float *, float *, int const *, int const *, float const *, float *, float const *, float const *, float *, float *, float, float, float, int, int, int, int, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_Backward<3>(int const *, float const *, float *, float *, int const *, int const *, float const *, float *, float const *, float const *, float *, float *, float, float, float, int, int, int, int, int, int, int, int, int, cudaStream_t);
template BBCU_DLL_EXPORT int bbcu_bit_fp32_DifferentiableLutN_Backward<2>(int const *, float const *, float *, float *, int const *, int const *, float const *, float *, float const *, float const *, float *, float *, float, float, float, int, int, int, int, int, int, int, int, int, cudaStream_t);
// end of file
|
the_stack
|
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(cols);
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
const int X_index = i * cols + j;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T>
__global__ void ColwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(rows);
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
const int X_index = j * cols + i;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T, int D>
__global__ void MomentsCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<FixedDivisor<int>, D> Y_dims,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
const T scale = T(1) / static_cast<T>(inner_size);
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int d = D - 1; d >= 0; --d) {
int r;
Y_dims.data[d].DivMod(Y_index, &Y_index, &r);
X_index += r * X_strides.data[d];
}
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Sum(m_val);
v_val = BlockReduce<T>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const T mu = m_val * scale;
mean[i] = mu;
variance[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
void Set<T>( \
const size_t N, const T alpha, T* Y, cudaStream_t context) { \
if (N == 0) { \
return; \
} \
if (alpha == T(0)) { \
cudaMemsetAsync(Y, 0, sizeof(T) * N, context); \
} else { \
SetKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context>>>(N, alpha, Y); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
#undef CAFFE2_SPECIALIZED_CUDA_SET
void ComputeTransposeAxesForReduceOp(
const int num_dims,
const int num_reduce_axes,
const int* reduce_axes,
int* transpose_axes) {
const int d = num_dims - num_reduce_axes;
std::copy_n(reduce_axes, num_reduce_axes, transpose_axes + d);
std::sort(transpose_axes + d, transpose_axes + num_dims);
int p = 0;
int q = d;
for (int i = 0; i < num_dims; ++i) {
if (q < num_dims && i == transpose_axes[q]) {
++q;
} else {
transpose_axes[p++] = i;
}
}
}
void ComputeTransposedStrides(
const int ndim,
const int* dims,
const int* axes,
int* strides) {
std::vector<int> buff(ndim);
int cur_stride = 1;
for (int i = ndim - 1; i >= 0; --i) {
buff[i] = cur_stride;
cur_stride *= dims[i];
}
for (int i = 0; i < ndim; ++i) {
strides[i] = buff[axes[i]];
}
}
bool IsRowwiseReduce(
const int ndim,
const int* A_dims,
const int* B_dims,
int* rows,
int* cols) {
*cols = 1;
int pivot = ndim - 1;
for (; pivot >= 0 && B_dims[pivot] == 1; --pivot) {
*cols *= A_dims[pivot];
}
*rows = 1;
for (int i = pivot; i >= 0; --i) {
if (A_dims[i] != B_dims[i]) {
return false;
}
*rows *= A_dims[i];
}
return true;
}
bool IsColwiseReduce(
const int ndim,
const int* A_dims,
const int* B_dims,
int* rows,
int* cols) {
*rows = 1;
int pivot = 0;
for (; pivot < ndim && B_dims[pivot] == 1; ++pivot) {
*rows *= A_dims[pivot];
}
*cols = 1;
for (int i = pivot; i < ndim; ++i) {
if (A_dims[i] != B_dims[i]) {
return false;
}
*cols *= A_dims[i];
}
return true;
}
template <typename T>
__global__ void
InvStdCUDAKernel(const int N, const T epsilon, const T* var, T* inv_std);
#define DELEGATE_INV_STD_KERNEL_FUNCTION(T, Func) \
template <> \
__global__ void InvStdCUDAKernel<T>( \
const int N, const T epsilon, const T* var, T* inv_std) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
inv_std[i] = Func(var[i] + epsilon); \
} \
}
DELEGATE_INV_STD_KERNEL_FUNCTION(float, rsqrtf)
#undef DELEGATE_INV_STD_KERNEL_FUNCTION
#define CAFFE2_SPECIALIZED_CUDA_INV_STD(T) \
template <> \
void InvStd<T>( \
const int N, \
const T epsilon, \
const T* var, \
T* inv_std, \
cudaStream_t context) { \
InvStdCUDAKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context>>>(N, epsilon, var, inv_std); \
}
CAFFE2_SPECIALIZED_CUDA_INV_STD(float)
#undef CAFFE2_SPECIALIZED_CUDA_INV_STD
template <typename T, int D>
void MomentsCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const T* X,
T* mean,
T* variance,
cudaStream_t context) {
SimpleArray<int, D> X_strides;
SimpleArray<FixedDivisor<int>, D> Y_dims;
ComputeTransposedStrides(D, dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = FixedDivisor<int>(dims[axes[i]]);
}
MomentsCUDAKernel<T, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context>>>(
outer_size, inner_size, X_strides, Y_dims, X, mean, variance);
}
template <typename T>
void MomentsCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* mean,
T* variance,
cudaStream_t context) {
CHECK_LE(num_axes, num_dims);
std::vector<int> Y_dims_vector(dims, dims + num_dims);
for (int i = 0; i < num_axes; ++i) {
Y_dims_vector[axes[i]] = 1;
}
const int* X_dims = dims;
const int* Y_dims = Y_dims_vector.data();
const int X_size = std::accumulate(X_dims, X_dims + num_dims, 1, std::multiplies<int>());
const int Y_size = std::accumulate(Y_dims, Y_dims + num_dims, 1, std::multiplies<int>());
if (X_size == 0) {
Set<T>(Y_size, T(0), mean, context);
Set<T>(Y_size, T(0), variance, context);
return;
}
if (std::equal(X_dims, X_dims + num_dims, Y_dims)) {
cudaMemcpyAsync(
mean,
X,
sizeof(T) * X_size,
cudaMemcpyDeviceToDevice,
context);
Set<T>(Y_size, T(0), variance, context);
return;
}
int rows;
int cols;
if (IsRowwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
RowwiseMomentsCUDAKernel<T>
<<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context>>>(rows, cols, X, mean, variance);
return;
}
if (IsColwiseReduce(num_dims, X_dims, Y_dims, &rows, &cols)) {
ColwiseMomentsCUDAKernel<T>
<<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context>>>(rows, cols, X, mean, variance);
return;
}
std::vector<int> transpose_axes(num_dims);
ComputeTransposeAxesForReduceOp(
num_dims, num_axes, axes, transpose_axes.data());
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
num_dims,
MomentsCUDAImpl,
T,
outer_size,
inner_size,
dims,
transpose_axes.data(),
X,
mean,
variance,
context);
}
#define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \
template <> \
void Moments<T>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* mean, \
T* variance, \
cudaStream_t context) { \
MomentsCUDA<T>( \
num_dims, dims, num_axes, axes, X, mean, variance, context); \
}
CAFFE2_SPECIALIZED_CUDA_MOMENTS(float)
#undef CAFFE2_SPECIALIZED_CUDA_MOMENTS
|
the_stack
|
#include "CUFLU.h"
#if ( MODEL == HYDRO )
// external functions
#ifdef __CUDACC__
#include "CUFLU_Shared_FluUtility.cu"
#else // #ifdef __CUDACC__
void Hydro_Rotate3D( real InOut[], const int XYZ, const bool Forward, const int Mag_Offset );
void Hydro_Con2Flux( const int XYZ, real Flux[], const real In[], const real MinPres,
const EoS_DE2P_t EoS_DensEint2Pres, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[],
const real *const EoS_Table[EOS_NTABLE_MAX], const real* const PresIn );
#endif // #ifdef __CUDACC__ ... else ...
//-------------------------------------------------------------------------------------------------------
// Function : Hydro_RiemannSolver_HLLC
// Description : Approximate Riemann solver of Harten, Lax, and van Leer extended to include the contact wave
//
// Note : 1. Input data should be conserved variables
// 2. Ref : a. Riemann Solvers and Numerical Methods for Fluid Dynamics - A Practical Introduction
// ~ by Eleuterio F. Toro (1999)
// b. Batten, P., Clarke, N., Lambert, C., & Causon, D. M. 1997, SIAM J. Sci. Comput., 18, 1553
// c. Coleman, M. S. B. 2020, ApJS, 248, 7
// 3. Wave-speed estimator is set by HLLC_WAVESPEED in CUFLU.h
// 4. Support general EoS
// 5. Shared by the MHM, MHM_RP, and CTU schemes
//
// Parameter : XYZ : Target spatial direction : (0/1/2) --> (x/y/z)
// Flux_Out : Array to store the output flux
// L/R_In : Input left/right states (conserved variables)
// MinDens/Pres : Density and pressure floors
// EoS_DensEint2Pres : EoS routine to compute the gas pressure
// EoS_DensPres2CSqr : EoS routine to compute the sound speed squared
// EoS_AuxArray_* : Auxiliary arrays for the EoS routines
// EoS_Table : EoS tables
//-------------------------------------------------------------------------------------------------------
GPU_DEVICE
void Hydro_RiemannSolver_HLLC( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[],
const real MinDens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres,
const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[],
const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] )
{
// 1. reorder the input variables for different spatial directions
real L[NCOMP_TOTAL], R[NCOMP_TOTAL];
for (int v=0; v<NCOMP_TOTAL; v++)
{
L[v] = L_In[v];
R[v] = R_In[v];
}
Hydro_Rotate3D( L, XYZ, true, MAG_OFFSET );
Hydro_Rotate3D( R, XYZ, true, MAG_OFFSET );
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( Hydro_CheckNegative(L[0]) )
printf( "ERROR : invalid density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
L[0], __FILE__, __LINE__, __FUNCTION__ );
if ( Hydro_CheckNegative(R[0]) )
printf( "ERROR : invalid density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
R[0], __FILE__, __LINE__, __FUNCTION__ );
# endif
// 2. estimate the maximum wave speeds
// 2-1. compute the left/right states
const real ZERO = (real)0.0;
const real ONE = (real)1.0;
const real _TWO = (real)0.5;
const bool CheckMinPres_Yes = true;
const real Emag = NULL_REAL;
real _RhoL, _RhoR, u_L, u_R, P_L, P_R, Cs_L, Cs_R, W_L, W_R;
_RhoL = ONE / L[0];
_RhoR = ONE / R[0];
u_L = _RhoL*L[1];
u_R = _RhoR*R[1];
P_L = Hydro_Con2Pres( L[0], L[1], L[2], L[3], L[4], L+NCOMP_FLUID, CheckMinPres_Yes, MinPres, Emag,
EoS_DensEint2Pres, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table, NULL );
P_R = Hydro_Con2Pres( R[0], R[1], R[2], R[3], R[4], R+NCOMP_FLUID, CheckMinPres_Yes, MinPres, Emag,
EoS_DensEint2Pres, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table, NULL );
Cs_L = SQRT( EoS_DensPres2CSqr( L[0], P_L, L+NCOMP_FLUID, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table ) );
Cs_R = SQRT( EoS_DensPres2CSqr( R[0], P_R, R+NCOMP_FLUID, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table ) );
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( Hydro_CheckNegative(P_L) )
printf( "ERROR : invalid pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n",
P_L, __FILE__, __LINE__, __FUNCTION__ );
if ( Hydro_CheckNegative(P_R) )
printf( "ERROR : invalid pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n",
P_R, __FILE__, __LINE__, __FUNCTION__ );
# endif
// 2-2a. use the Roe average eigenvalues
# if ( HLLC_WAVESPEED == HLL_WAVESPEED_ROE )
real H_L, H_R, RhoL_sqrt, RhoR_sqrt, _RhoL_sqrt, _RhoR_sqrt, _RhoLR_sqrt_sum;
real u_Roe, v_Roe, w_Roe, H_Roe;
// Roe averages
H_L = ( L[4] + P_L )*_RhoL;
H_R = ( R[4] + P_R )*_RhoR;
RhoL_sqrt = SQRT( L[0] );
RhoR_sqrt = SQRT( R[0] );
_RhoL_sqrt = ONE / RhoL_sqrt;
_RhoR_sqrt = ONE / RhoR_sqrt;
_RhoLR_sqrt_sum = ONE / ( RhoL_sqrt + RhoR_sqrt );
u_Roe = _RhoLR_sqrt_sum*( _RhoL_sqrt*L[1] + _RhoR_sqrt*R[1] );
v_Roe = _RhoLR_sqrt_sum*( _RhoL_sqrt*L[2] + _RhoR_sqrt*R[2] );
w_Roe = _RhoLR_sqrt_sum*( _RhoL_sqrt*L[3] + _RhoR_sqrt*R[3] );
H_Roe = _RhoLR_sqrt_sum*( RhoL_sqrt*H_L + RhoR_sqrt*H_R );
// sound speed
//###NOTE: we have assumed a constant-gamma EoS here
// --> otherwise, one needs to specify how to convert (H-0.5*V2, Rho) to Cs^2
// --> see Eq. [A4] in Coleman 2020
# if ( EOS != EOS_GAMMA )
# error : ERROR : HLL_WAVESPEED_ROE only works with EOS_GAMMA !!
# endif
const real Gamma = (real)EoS_AuxArray_Flt[0];
const real Gamma_m1 = (real)EoS_AuxArray_Flt[1];
const real _Gamma = (real)EoS_AuxArray_Flt[3];
real V2_Roe, Cs2_Roe, Cs_Roe, TempRho, TempPres;
V2_Roe = SQR( u_Roe ) + SQR( v_Roe ) + SQR( w_Roe );
Cs2_Roe = Gamma_m1*( H_Roe - _TWO*V2_Roe );
TempRho = _TWO*( L[0] + R[0] ); // different from Roe aveage density and is only for applying pressure floor
TempPres = Cs2_Roe*TempRho*_Gamma;
TempPres = Hydro_CheckMinPres( TempPres, MinPres );
Cs2_Roe = Gamma*TempPres/TempRho;
Cs_Roe = SQRT( Cs2_Roe );
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( Hydro_CheckNegative(Cs2_Roe) )
printf( "ERROR : invalid Cs2_Roe (%14.7e) at file <%s>, line <%d>, function <%s>\n",
Cs2_Roe, __FILE__, __LINE__, __FUNCTION__ );
# endif
// maximum and minimum eigenvalues
const real EVal_min = u_Roe - Cs_Roe;
const real EVal_max = u_Roe + Cs_Roe;
// left/right maximum wave speeds
W_L = FMIN( EVal_min, u_L-Cs_L );
W_R = FMAX( EVal_max, u_R+Cs_R );
// 2-2b. use the primitive variable Riemann solver (PVRS)
# elif ( HLLC_WAVESPEED == HLL_WAVESPEED_PVRS )
real Rho_PVRS, Cs_PVRS, RhoCs_PVRS, P_PVRS, Gamma_SL, Gamma_SR, q_L, q_R;
Rho_PVRS = _TWO*( L[0] + R[0] );
Cs_PVRS = _TWO*( Cs_L + Cs_R );
RhoCs_PVRS = Rho_PVRS * Cs_PVRS;
P_PVRS = _TWO*( ( P_L + P_R ) + ( u_L - u_R )*RhoCs_PVRS );
P_PVRS = Hydro_CheckMinPres( P_PVRS, MinPres );
// for EOS_GAMMA/EOS_ISOTHERMAL, the calculations of Gamma_SL/R can be greatly simplified
// --> results should be exactly the same except for round-off errors
# if ( EOS == EOS_GAMMA )
Gamma_SL = (real)EoS_AuxArray_Flt[0];
Gamma_SR = (real)EoS_AuxArray_Flt[0];
# elif ( EOS == EOS_ISOTHERMAL )
Gamma_SL = ONE;
Gamma_SR = ONE;
# else
real u_PVRS, Rho_Cs_PVRS, Rho_SL, Rho_SR, _P;
u_PVRS = _TWO*( ( u_L + u_R ) + ( P_L - P_R )/RhoCs_PVRS );
Rho_Cs_PVRS = Rho_PVRS / Cs_PVRS;
Rho_SL = L[0] + ( u_L - u_PVRS )*Rho_Cs_PVRS;
Rho_SR = R[0] + ( u_PVRS - u_R )*Rho_Cs_PVRS;
Rho_SL = FMAX( Rho_SL, MinDens );
Rho_SR = FMAX( Rho_SR, MinDens );
_P = ONE / P_PVRS;
// see Eq. [9.8] in Toro 1999 for passive scalars
Gamma_SL = EoS_DensPres2CSqr( Rho_SL, P_PVRS, L+NCOMP_FLUID, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table )*Rho_SL*_P;
Gamma_SR = EoS_DensPres2CSqr( Rho_SR, P_PVRS, R+NCOMP_FLUID, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table )*Rho_SR*_P;
# endif // EOS
q_L = ( P_PVRS <= P_L ) ? ONE : SQRT( ONE + _TWO*( Gamma_SL + ONE )/Gamma_SL*( P_PVRS/P_L - ONE ) );
q_R = ( P_PVRS <= P_R ) ? ONE : SQRT( ONE + _TWO*( Gamma_SR + ONE )/Gamma_SR*( P_PVRS/P_R - ONE ) );
W_L = u_L - Cs_L*q_L;
W_R = u_R + Cs_R*q_R;
# ifdef CHECK_NEGATIVE_IN_FLUID
if ( Hydro_CheckNegative(q_L) )
printf( "ERROR : invalid q_L (%14.7e) at file <%s>, line <%d>, function <%s>\n",
q_L, __FILE__, __LINE__, __FUNCTION__ );
if ( Hydro_CheckNegative(q_R) )
printf( "ERROR : invalid q_R (%14.7e) at file <%s>, line <%d>, function <%s>\n",
q_R, __FILE__, __LINE__, __FUNCTION__ );
# endif
// 2-2c. use the min/max of the left and right eigenvalues
# elif ( HLLC_WAVESPEED == HLL_WAVESPEED_DAVIS )
const real W_L1 = u_L - Cs_L;
const real W_L2 = u_R - Cs_R;
const real W_R1 = u_L + Cs_L;
const real W_R2 = u_R + Cs_R;
W_L = FMIN( W_L1, W_L2 );
W_R = FMAX( W_R1, W_R2 );
# else
# error : ERROR : unsupported HLLC_WAVESPEED !!
# endif // HLLC_WAVESPEED
// 3. evaluate the star-region velocity (V_S) and pressure (P_S)
real temp1_L, temp1_R, temp2, V_S, P_S;
// do not use u_L-W_L and u_R-W_R to prevent from large round-off errors when Cs<<u~W
// ==> temp1_L ~ temp1_R ~ 0.0 ==> temp2 = inf
# if ( HLLC_WAVESPEED == HLL_WAVESPEED_ROE )
temp1_L = L[0]*( (EVal_min<u_L-Cs_L) ? (u_L-EVal_min) : (+Cs_L) );
temp1_R = R[0]*( (EVal_max>u_R+Cs_R) ? (u_R-EVal_max) : (-Cs_R) );
# elif ( HLLC_WAVESPEED == HLL_WAVESPEED_PVRS )
temp1_L = +L[0]*( Cs_L*q_L );
temp1_R = -R[0]*( Cs_R*q_R );
# elif ( HLLC_WAVESPEED == HLL_WAVESPEED_DAVIS )
temp1_L = +L[0]*( ( W_L1 < W_L2 ) ? Cs_L : (u_L-u_R)+Cs_R );
temp1_R = -R[0]*( ( W_R2 > W_R1 ) ? Cs_R : (u_L-u_R)+Cs_L );
# else
# error : ERROR : unsupported HLLC_WAVESPEED !!
# endif
temp2 = ONE / ( temp1_L - temp1_R );
V_S = temp2*( P_L - P_R + temp1_L*u_L - temp1_R*u_R );
P_S = temp2*( temp1_L*( P_R + temp1_R*u_R ) - temp1_R*( P_L + temp1_L*u_L ) );
P_S = Hydro_CheckMinPres( P_S, MinPres );
// 4. evaluate the weightings of the left/right fluxes and contact wave
real Flux_LR[NCOMP_TOTAL], temp4, Coeff_LR, Coeff_S; // use NCOMP_TOTAL for Flux_LR since it will be passed to Hydro_Con2Flux()
if ( V_S >= ZERO )
{
const real MaxV_L = FMIN( W_L, ZERO );
Hydro_Con2Flux( 0, Flux_LR, L, MinPres, NULL, NULL, NULL, NULL, &P_L );
for (int v=0; v<NCOMP_FLUID; v++) Flux_LR[v] -= MaxV_L*L[v]; // fluxes along the maximum wave speed
// deal with the special case of V_S=MaxV_L=0
//###REVISE: should it return zero flux due to symmetry?
if ( V_S == ZERO && MaxV_L == ZERO )
{
Coeff_LR = ONE;
Coeff_S = ZERO;
}
else
{
temp4 = ONE / ( V_S - MaxV_L );
Coeff_LR = temp4*V_S;
Coeff_S = -temp4*MaxV_L*P_S;
}
} // if ( V_S >= ZERO )
else // V_S < 0.0
{
const real MaxV_R = FMAX( W_R, ZERO );
Hydro_Con2Flux( 0, Flux_LR, R, MinPres, NULL, NULL, NULL, NULL, &P_R );
for (int v=0; v<NCOMP_FLUID; v++) Flux_LR[v] -= MaxV_R*R[v]; // fluxes along the maximum wave speed
temp4 = ONE / ( V_S - MaxV_R );
Coeff_LR = temp4*V_S;
Coeff_S = -temp4*MaxV_R*P_S;
} // if ( V_S >= ZERO ) ... else ...
// 5. evaluate the HLLC fluxes
for (int v=0; v<NCOMP_FLUID; v++) Flux_Out[v] = Coeff_LR*Flux_LR[v];
Flux_Out[1] += Coeff_S;
Flux_Out[4] += Coeff_S*V_S;
// 6. evaluate the fluxes of passive scalars
# if ( NCOMP_PASSIVE > 0 )
if ( Flux_Out[FLUX_DENS] >= ZERO )
{
const real vx = Flux_Out[FLUX_DENS]*_RhoL;
for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) Flux_Out[v] = L[v]*vx;
}
else
{
const real vx = Flux_Out[FLUX_DENS]*_RhoR;
for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) Flux_Out[v] = R[v]*vx;
}
# endif
// 7. restore the correct order
Hydro_Rotate3D( Flux_Out, XYZ, false, MAG_OFFSET );
} // FUNCTION : Hydro_RiemannSolver_HLLC
#endif // #if ( MODEL == HYDRO )
#endif // #ifndef __CUFLU_RIEMANNSOLVER_HLLC__
|
the_stack
|
// host
void attenuate_segment( Input * __restrict I, Source * __restrict S,
int QSR_id, int FAI_id, float * __restrict state_flux,
SIMD_Vectors * __restrict simd_vecs)
{
// Unload local vector vectors
float * __restrict q0 = simd_vecs->q0;
float * __restrict q1 = simd_vecs->q1;
float * __restrict q2 = simd_vecs->q2;
float * __restrict sigT = simd_vecs->sigT;
float * __restrict tau = simd_vecs->tau;
float * __restrict sigT2 = simd_vecs->sigT2;
float * __restrict expVal = simd_vecs->expVal;
float * __restrict reuse = simd_vecs->reuse;
float * __restrict flux_integral = simd_vecs->flux_integral;
float * __restrict tally = simd_vecs->tally;
float * __restrict t1 = simd_vecs->t1;
float * __restrict t2 = simd_vecs->t2;
float * __restrict t3 = simd_vecs->t3;
float * __restrict t4 = simd_vecs->t4;
// Some placeholder constants - In the full app some of these are
// calculated based off position in geometry. This treatment
// shaves off a few FLOPS, but is not significant compared to the
// rest of the function.
const float dz = 0.1f;
const float zin = 0.3f;
const float weight = 0.5f;
const float mu = 0.9f;
const float mu2 = 0.3f;
const float ds = 0.7f;
const int egroups = I->egroups;
// load fine source region flux vector
float * FSR_flux = &S[QSR_id].fine_flux[FAI_id * egroups];
if( FAI_id == 0 )
{
float * f2 = &S[QSR_id].fine_source[FAI_id*egroups];
float * f3 = &S[QSR_id].fine_source[(FAI_id+1)*egroups];
// cycle over energy groups
#ifdef INTEL
#pragma vector
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// load neighboring sources
const float y2 = f2[g];
const float y3 = f3[g];
// do linear "fitting"
const float c0 = y2;
const float c1 = (y3 - y2) / dz;
// calculate q0, q1, q2
q0[g] = c0 + c1*zin;
q1[g] = c1;
q2[g] = 0;
}
}
else if ( FAI_id == I->fine_axial_intervals - 1 )
{
float * f1 = &S[QSR_id].fine_source[(FAI_id-1)*egroups];
float * f2 = &S[QSR_id].fine_source[FAI_id*egroups];
// cycle over energy groups
#ifdef INTEL
#pragma vector
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// load neighboring sources
const float y1 = f1[g];
const float y2 = f2[g];
// do linear "fitting"
const float c0 = y2;
const float c1 = (y2 - y1) / dz;
// calculate q0, q1, q2
q0[g] = c0 + c1*zin;
q1[g] = c1;
q2[g] = 0;
}
}
else
{
float * f1 = &S[QSR_id].fine_source[(FAI_id-1)*egroups];
float * f2 = &S[QSR_id].fine_source[FAI_id*egroups];
float * f3 = &S[QSR_id].fine_source[(FAI_id+1)*egroups];
// cycle over energy groups
#ifdef INTEL
#pragma vector
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// load neighboring sources
const float y1 = f1[g];
const float y2 = f2[g];
const float y3 = f3[g];
// do quadratic "fitting"
const float c0 = y2;
const float c1 = (y1 - y3) / (2.f*dz);
const float c2 = (y1 - 2.f*y2 + y3) / (2.f*dz*dz);
// calculate q0, q1, q2
q0[g] = c0 + c1*zin + c2*zin*zin;
q1[g] = c1 + 2.f*c2*zin;
q2[g] = c2;
}
}
// cycle over energy groups
#ifdef INTEL
#pragma vector
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// load total cross section
sigT[g] = S[QSR_id].sigT[g];
// calculate common values for efficiency
tau[g] = sigT[g] * ds;
sigT2[g] = sigT[g] * sigT[g];
}
// cycle over energy groups
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
expVal[g] = 1.f - expf( -tau[g] ); // exp is faster on many architectures
}
// Flux Integral
// Re-used Term
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
reuse[g] = tau[g] * (tau[g] - 2.f) + 2.f * expVal[g]
/ (sigT[g] * sigT2[g]);
}
//#pragma vector alignednontemporal
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// add contribution to new source flux
flux_integral[g] = (q0[g] * tau[g] + (sigT[g] * state_flux[g] - q0[g])
* expVal[g]) / sigT2[g] + q1[g] * mu * reuse[g] + q2[g] * mu2
* (tau[g] * (tau[g] * (tau[g] - 3.f) + 6.f) - 6.f * expVal[g])
/ (3.f * sigT2[g] * sigT2[g]);
}
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
// Prepare tally
tally[g] = weight * flux_integral[g];
}
#ifdef INTEL
#pragma vector
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
FSR_flux[g] += tally[g];
}
// Term 1
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
t1[g] = q0[g] * expVal[g] / sigT[g];
}
// Term 2
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
t2[g] = q1[g] * mu * (tau[g] - expVal[g]) / sigT2[g];
}
// Term 3
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
t3[g] = q2[g] * mu2 * reuse[g];
}
// Term 4
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
t4[g] = state_flux[g] * (1.f - expVal[g]);
}
#ifdef VERIFY
for( int g = 0; g < egroups; g++) {
printf("q0[%d] = %f\n", g, q0[g]);
printf("q1[%d] = %f\n", g, q1[g]);
printf("q2[%d] = %f\n", g, q2[g]);
printf("sigT[%d] = %f\n", g, sigT[g]);
printf("tau[%d] = %f\n", g, tau[g]);
printf("sigT2[%d] = %f\n", g, sigT2[g]);
printf("expVal[%d] = %f\n", g, expVal[g]);
printf("reuse[%d] = %f\n", g, reuse[g]);
printf("flux_integral[%d] = %f\n", g, flux_integral[g]);
printf("tally[%d] = %f\n", g, tally[g]);
printf("t1[%d] = %f\n", g, t1[g]);
printf("t2[%d] = %f\n", g, t2[g]);
printf("t3[%d] = %f\n", g, t3[g]);
printf("t4[%d] = %f\n", g, t4[g]);
}
#endif
// Total psi
#ifdef INTEL
#pragma vector aligned
#elif defined IBM
#pragma vector_level(10)
#endif
for( int g = 0; g < egroups; g++)
{
state_flux[g] = t1[g] + t2[g] + t3[g] + t4[g];
}
}
__global__ void
att ( const int* QSR_id_acc,
const int* FAI_id_acc,
float* fine_flux_acc,
float* fine_source_acc,
float* sigT_acc,
float* state_flux_acc,
float* v_acc,
const int fine_axial_intervals,
const int egroups,
const int segments )
{
int gid = blockIdx.x*blockDim.x+threadIdx.x;
if (gid >= segments) return;
const float dz = 0.1f;
const float zin = 0.3f;
const float weight = 0.5f;
const float mu = 0.9f;
const float mu2 = 0.3f;
const float ds = 0.7f;
int QSR_id = QSR_id_acc[gid];
int FAI_id = FAI_id_acc[gid];
// load fine source region flux vector
int offset = QSR_id * fine_axial_intervals * egroups;
float *FSR_flux = fine_flux_acc + offset + FAI_id * egroups;
float* q0 = v_acc;
float* q1 = v_acc + egroups;
float* q2 = v_acc + egroups * 2;
float* sigT = v_acc + egroups * 3;
float* tau = v_acc + egroups * 4;
float* sigT2 = v_acc + egroups * 5;
float* expVal = v_acc + egroups * 6;
float* reuse = v_acc + egroups * 7;
float* flux_integral = v_acc + egroups * 8;
float* tally = v_acc + egroups * 9;
float* t1 = v_acc + egroups * 10;
float* t2 = v_acc + egroups * 11;
float* t3 = v_acc + egroups * 12;
float* t4 = v_acc + egroups * 13;
if( FAI_id == 0 )
{
float * f2 = fine_source_acc + offset + FAI_id*egroups;
float * f3 = fine_source_acc + offset + (FAI_id+1)*egroups;
// cycle over energy groups
for( int g = 0; g < egroups; g++)
{
// load neighboring sources
const float y2 = f2[g];
const float y3 = f3[g];
// do linear "fitting"
const float c0 = y2;
const float c1 = (y3 - y2) / dz;
// calculate q0, q1, q2
q0[g] = c0 + c1*zin;
q1[g] = c1;
q2[g] = 0;
}
}
else if ( FAI_id == fine_axial_intervals - 1 )
{
float * f1 = fine_source_acc + offset + (FAI_id-1)*egroups;
float * f2 = fine_source_acc + offset + FAI_id*egroups;
for( int g = 0; g < egroups; g++)
{
// load neighboring sources
const float y1 = f1[g];
const float y2 = f2[g];
// do linear "fitting"
const float c0 = y2;
const float c1 = (y2 - y1) / dz;
// calculate q0, q1, q2
q0[g] = c0 + c1*zin;
q1[g] = c1;
q2[g] = 0;
}
}
else
{
float * f1 = fine_source_acc + offset + (FAI_id-1)*egroups;
float * f2 = fine_source_acc + offset + FAI_id*egroups;
float * f3 = fine_source_acc + offset + (FAI_id+1)*egroups;
// cycle over energy groups
for( int g = 0; g < egroups; g++)
{
// load neighboring sources
const float y1 = f1[g];
const float y2 = f2[g];
const float y3 = f3[g];
// do quadratic "fitting"
const float c0 = y2;
const float c1 = (y1 - y3) / (2.f*dz);
const float c2 = (y1 - 2.f*y2 + y3) / (2.f*dz*dz);
// calculate q0, q1, q2
q0[g] = c0 + c1*zin + c2*zin*zin;
q1[g] = c1 + 2.f*c2*zin;
q2[g] = c2;
}
}
// cycle over energy groups
offset = QSR_id * egroups;
for( int g = 0; g < egroups; g++)
{
// load total cross section
sigT[g] = sigT_acc[offset + g];
// calculate common values for efficiency
tau[g] = sigT[g] * ds;
sigT2[g] = sigT[g] * sigT[g];
expVal[g] = 1.f - exp( -tau[g] ); // exp is faster on many architectures
reuse[g] = tau[g] * (tau[g] - 2.f) + 2.f * expVal[g] / (sigT[g] * sigT2[g]);
// add contribution to new source flux
flux_integral[g] = (q0[g] * tau[g] + (sigT[g] * state_flux_acc[g] - q0[g])
* expVal[g]) / sigT2[g] + q1[g] * mu * reuse[g] + q2[g] * mu2
* (tau[g] * (tau[g] * (tau[g] - 3.f) + 6.f) - 6.f * expVal[g])
/ (3.f * sigT2[g] * sigT2[g]);
tally[g] = weight * flux_integral[g];
FSR_flux[g] += tally[g];
t1[g] = q0[g] * expVal[g] / sigT[g];
t2[g] = q1[g] * mu * (tau[g] - expVal[g]) / sigT2[g];
t3[g] = q2[g] * mu2 * reuse[g];
t4[g] = state_flux_acc[g] * (1.f - expVal[g]);
state_flux_acc[g] = t1[g]+t2[g]+t3[g]+t4[g];
}
}
int main( int argc, char * argv[] )
{
unsigned int seed = 2;
srand(seed);
// Get Inputs
Input * I = set_default_input();
read_CLI( argc, argv, I );
// Calculate Number of 3D Source Regions
I->source_3D_regions = (int) ceil((double)I->source_2D_regions *
I->coarse_axial_intervals / I->decomp_assemblies_ax);
logo(4); // Based on the 4th version
// Build Source data (needed when verification is disabled)
Source *S = initialize_sources(I);
// Build Device data from Source data
Source *S2 = copy_sources(I, S);
print_input_summary(I);
center_print("SIMULATION", 79);
border_print();
printf("Attentuating fluxes across segments...\n");
// Run Simulation Kernel Loop
// Host allocation
SIMD_Vectors simd_vecs = allocate_simd_vectors(I);
float * state_flux = (float *) malloc(I->egroups * sizeof(float));
// Device allocation
float * state_flux_device = NULL;
posix_memalign( (void**)&state_flux_device, 1024, I->egroups * sizeof(float));
int* QSR_id_arr = NULL;
int* FAI_id_arr = NULL;
posix_memalign( (void**)&QSR_id_arr, 1024, sizeof(int) * I->segments );
posix_memalign( (void**)&FAI_id_arr, 1024, sizeof(int) * I->segments );
// initialize the state flux
for( int i = 0; i < I->egroups; i++ ) {
state_flux_device[i] = rand_r(&seed) / (float) RAND_MAX;
state_flux[i] = state_flux_device[i];
}
// Verification is performed for one segment;
// Attentate segment is not run on CPU to reduce simulation time
for( long i = 0; i < I->segments; i++ )
{
// Pick Random QSR
int QSR_id = rand_r(&seed) % I->source_3D_regions;
// for device
QSR_id_arr[i] = QSR_id;
// Pick Random Fine Axial Interval
int FAI_id = rand_r(&seed) % I->fine_axial_intervals;
// for device
FAI_id_arr[i] = FAI_id;
// Attenuate Segment for one segment
#ifdef VERIFY
attenuate_segment( I, S, QSR_id, FAI_id, state_flux, &simd_vecs);
#endif
}
#ifdef VERIFY
float* simd_vecs_debug = (float*) malloc (sizeof(float)*I->egroups*14);
#endif
double start = get_time();
int fine_axial_intervals = I->fine_axial_intervals;
int egroups = I->egroups;
int segments = I->segments;
int *d_QSR_id;
int *d_FAI_id;
float *d_fine_flux;
float *d_fine_source;
float *d_sigT;
float* d_state_flux;
float* d_simd_vecs;
cudaMalloc((void**)&d_QSR_id, sizeof(int)*I->segments);
cudaMemcpy(d_QSR_id, QSR_id_arr, sizeof(int)*I->segments, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_FAI_id, sizeof(int)*I->segments);
cudaMemcpy(d_FAI_id, FAI_id_arr, sizeof(int)*I->segments, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_fine_flux,
sizeof(float) * I->source_3D_regions * I->fine_axial_intervals * I->egroups);
cudaMemcpy(d_fine_flux, S2->fine_flux,
sizeof(float) * I->source_3D_regions * I->fine_axial_intervals * I->egroups,
cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_fine_source, sizeof(float)*
I->source_3D_regions * I->fine_axial_intervals * I->egroups);
cudaMemcpy(d_fine_source, S2->fine_source,
sizeof(float) * I->source_3D_regions * I->fine_axial_intervals * I->egroups,
cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_sigT, sizeof(float)*I->source_3D_regions * I->egroups);
cudaMemcpy(d_sigT, S2->sigT,
sizeof(float) * I->source_3D_regions * I->egroups,
cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_state_flux, sizeof(float)*I->egroups);
cudaMemcpy(d_state_flux, state_flux_device,
sizeof(float) * I->egroups,
cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_simd_vecs, sizeof(float)*I->egroups*14);
dim3 grids ((segments+127)/128*128);
dim3 threads (128);
for (int n = 0; n < I->repeat; n++)
att<<<grids, threads>>>(
d_QSR_id,
d_FAI_id,
d_fine_flux,
d_fine_source,
d_sigT,
d_state_flux,
d_simd_vecs,
fine_axial_intervals,
egroups,
segments );
cudaMemcpy(state_flux_device, d_state_flux,
sizeof(float) * I->egroups,
cudaMemcpyDeviceToHost);
cudaMemcpy(S2->fine_flux, d_fine_flux,
sizeof(float) * I->source_3D_regions * I->fine_axial_intervals * I->egroups,
cudaMemcpyDeviceToHost);
#ifdef VERIFY
cudaMemcpy(simd_vecs_debug, d_simd_vecs,
sizeof(float) * I->egroups * 14,
cudaMemcpyDeviceToHost);
#endif
cudaFree(d_QSR_id);
cudaFree(d_FAI_id);
cudaFree(d_fine_flux);
cudaFree(d_fine_source);
cudaFree(d_sigT);
cudaFree(d_state_flux);
cudaFree(d_simd_vecs);
printf("Simulation Complete.\n");
double stop = get_time();
#ifdef VERIFY
const float* q0 = simd_vecs_debug;
const float* q1 = simd_vecs_debug + egroups;
const float* q2 = simd_vecs_debug + egroups * 2;
const float* sigT = simd_vecs_debug + egroups * 3;
const float* tau = simd_vecs_debug + egroups * 4;
const float* sigT2 = simd_vecs_debug + egroups * 5;
const float* expVal = simd_vecs_debug + egroups * 6;
const float* reuse = simd_vecs_debug + egroups * 7;
const float* flux_integral = simd_vecs_debug + egroups * 8;
const float* tally = simd_vecs_debug + egroups * 9;
const float* t1 = simd_vecs_debug + egroups * 10;
const float* t2 = simd_vecs_debug + egroups * 11;
const float* t3 = simd_vecs_debug + egroups * 12;
const float* t4 = simd_vecs_debug + egroups * 13;
for (int g = 0; g < I->egroups; g++) {
printf("q0[%d] = %f\n", g, q0[g]);
printf("q1[%d] = %f\n", g, q1[g]);
printf("q2[%d] = %f\n", g, q2[g]);
printf("sigT[%d] = %f\n", g, sigT[g]);
printf("tau[%d] = %f\n", g, tau[g]);
printf("sigT2[%d] = %f\n", g, sigT2[g]);
printf("expVal[%d] = %f\n", g, expVal[g]);
printf("reuse[%d] = %f\n", g, reuse[g]);
printf("flux_integral[%d] = %f\n", g, flux_integral[g]);
printf("tally[%d] = %f\n", g, tally[g]);
printf("t1[%d] = %f\n", g, t1[g]);
printf("t2[%d] = %f\n", g, t2[g]);
printf("t3[%d] = %f\n", g, t3[g]);
printf("t4[%d] = %f\n", g, t4[g]);
}
bool error = false;
for (int i = 0; i < I->egroups; i++) {
if ( fabs(state_flux_device[i] - state_flux[i]) > 1e-1 ) {
printf("%f %f\n", state_flux_device[i], state_flux[i]);
error = true;
break;
}
}
if (error)
printf("Fail\n");
else
printf("Success\n");
#endif
border_print();
center_print("RESULTS SUMMARY", 79);
border_print();
double tpi = ((double) (stop - start) /
(double)I->segments / (double) I->egroups) * 1.0e9;
printf("%-25s%.3lf seconds\n", "Runtime:", stop-start);
printf("%-25s%.3lf ns\n", "Time per Intersection:", tpi);
border_print();
free(simd_vecs.q0);
free(state_flux);
free(QSR_id_arr);
free(FAI_id_arr);
free(state_flux_device);
free(S2->fine_source);
free(S2->fine_flux);
free(S2->sigT);
free(S2);
free(S[0].fine_source);
free(S[0].fine_flux);
free(S[0].sigT);
free(S);
free(I);
return 0;
}
|
the_stack
|
#include "common/omptarget.h"
#include "target_impl.h"
// Return true if this is the master thread.
INLINE static bool IsMasterThread(bool isSPMDExecutionMode) {
return !isSPMDExecutionMode && GetMasterThreadID() == GetThreadIdInBlock();
}
////////////////////////////////////////////////////////////////////////////////
// Runtime functions for trunk data sharing scheme.
////////////////////////////////////////////////////////////////////////////////
INLINE static void data_sharing_init_stack_common() {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
omptarget_nvptx_TeamDescr *teamDescr =
&omptarget_nvptx_threadPrivateContext->TeamContext();
for (int WID = 0; WID < DS_Max_Warp_Number; WID++) {
__kmpc_data_sharing_slot *RootS = teamDescr->GetPreallocatedSlotAddr(WID);
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0];
}
}
// Initialize data sharing data structure. This function needs to be called
// once at the beginning of a data sharing context (coincides with the kernel
// initialization). This function is called only by the MASTER thread of each
// team in non-SPMD mode.
EXTERN void __kmpc_data_sharing_init_stack() {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
// This function initializes the stack pointer with the pointer to the
// statically allocated shared memory slots. The size of a shared memory
// slot is pre-determined to be 256 bytes.
data_sharing_init_stack_common();
omptarget_nvptx_globalArgs.Init();
}
// Initialize data sharing data structure. This function needs to be called
// once at the beginning of a data sharing context (coincides with the kernel
// initialization). This function is called in SPMD mode only.
EXTERN void __kmpc_data_sharing_init_stack_spmd() {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
// This function initializes the stack pointer with the pointer to the
// statically allocated shared memory slots. The size of a shared memory
// slot is pre-determined to be 256 bytes.
if (GetThreadIdInBlock() == 0)
data_sharing_init_stack_common();
__kmpc_impl_threadfence_block();
}
INLINE static void* data_sharing_push_stack_common(size_t PushSize) {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime.");
// Only warp active master threads manage the stack.
bool IsWarpMaster = (GetThreadIdInBlock() % WARPSIZE) == 0;
// Add worst-case padding to DataSize so that future stack allocations are
// correctly aligned.
const size_t Alignment = 8;
PushSize = (PushSize + (Alignment - 1)) / Alignment * Alignment;
// Frame pointer must be visible to all workers in the same warp.
const unsigned WID = GetWarpId();
void *FrameP = 0;
__kmpc_impl_lanemask_t CurActive = __kmpc_impl_activemask();
if (IsWarpMaster) {
// SlotP will point to either the shared memory slot or an existing
// global memory slot.
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
// Check if we have room for the data in the current slot.
const uintptr_t StartAddress = (uintptr_t)StackP;
const uintptr_t EndAddress = (uintptr_t)SlotP->DataEnd;
const uintptr_t RequestedEndAddress = StartAddress + (uintptr_t)PushSize;
// If we requested more data than there is room for in the rest
// of the slot then we need to either re-use the next slot, if one exists,
// or create a new slot.
if (EndAddress < RequestedEndAddress) {
__kmpc_data_sharing_slot *NewSlot = 0;
size_t NewSize = PushSize;
// Allocate at least the default size for each type of slot.
// Master is a special case and even though there is only one thread,
// it can share more things with the workers. For uniformity, it uses
// the full size of a worker warp slot.
size_t DefaultSlotSize = DS_Worker_Warp_Slot_Size;
if (DefaultSlotSize > NewSize)
NewSize = DefaultSlotSize;
NewSlot = (__kmpc_data_sharing_slot *) SafeMalloc(
sizeof(__kmpc_data_sharing_slot) + NewSize,
"Global memory slot allocation.");
NewSlot->Next = 0;
NewSlot->Prev = SlotP;
NewSlot->PrevSlotStackPtr = StackP;
NewSlot->DataEnd = &NewSlot->Data[0] + NewSize;
// Make previous slot point to the newly allocated slot.
SlotP->Next = NewSlot;
// The current slot becomes the new slot.
SlotP = NewSlot;
// The stack pointer always points to the next free stack frame.
StackP = &NewSlot->Data[0] + PushSize;
// The frame pointer always points to the beginning of the frame.
FrameP = DataSharingState.FramePtr[WID] = &NewSlot->Data[0];
} else {
// Add the data chunk to the current slot. The frame pointer is set to
// point to the start of the new frame held in StackP.
FrameP = DataSharingState.FramePtr[WID] = StackP;
// Reset stack pointer to the requested address.
StackP = (void *)RequestedEndAddress;
}
}
// Get address from lane 0.
int *FP = (int *)&FrameP;
FP[0] = __kmpc_impl_shfl_sync(CurActive, FP[0], 0);
if (sizeof(FrameP) == 8)
FP[1] = __kmpc_impl_shfl_sync(CurActive, FP[1], 0);
return FrameP;
}
EXTERN void *__kmpc_data_sharing_coalesced_push_stack(size_t DataSize,
int16_t UseSharedMemory) {
return data_sharing_push_stack_common(DataSize);
}
// Called at the time of the kernel initialization. This is used to initilize
// the list of references to shared variables and to pre-allocate global storage
// for holding the globalized variables.
//
// By default the globalized variables are stored in global memory. If the
// UseSharedMemory is set to true, the runtime will attempt to use shared memory
// as long as the size requested fits the pre-allocated size.
EXTERN void *__kmpc_data_sharing_push_stack(size_t DataSize,
int16_t UseSharedMemory) {
// Compute the total memory footprint of the requested data.
// The master thread requires a stack only for itself. A worker
// thread (which at this point is a warp master) will require
// space for the variables of each thread in the warp,
// i.e. one DataSize chunk per warp lane.
// TODO: change WARPSIZE to the number of active threads in the warp.
size_t PushSize = (isRuntimeUninitialized() || IsMasterThread(isSPMDMode()))
? DataSize
: WARPSIZE * DataSize;
// Compute the start address of the frame of each thread in the warp.
uintptr_t FrameStartAddress =
(uintptr_t) data_sharing_push_stack_common(PushSize);
FrameStartAddress += (uintptr_t) (GetLaneId() * DataSize);
return (void *)FrameStartAddress;
}
// Pop the stack and free any memory which can be reclaimed.
//
// When the pop operation removes the last global memory slot,
// reclaim all outstanding global memory slots since it is
// likely we have reached the end of the kernel.
EXTERN void __kmpc_data_sharing_pop_stack(void *FrameStart) {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime.");
__kmpc_impl_threadfence_block();
if (GetThreadIdInBlock() % WARPSIZE == 0) {
unsigned WID = GetWarpId();
// Current slot
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
// Pointer to next available stack.
void *&StackP = DataSharingState.StackPtr[WID];
// Pop the frame.
StackP = FrameStart;
// If the current slot is empty, we need to free the slot after the
// pop.
bool SlotEmpty = (StackP == &SlotP->Data[0]);
if (SlotEmpty && SlotP->Prev) {
// Before removing the slot we need to reset StackP.
StackP = SlotP->PrevSlotStackPtr;
// Remove the slot.
SlotP = SlotP->Prev;
SafeFree(SlotP->Next, "Free slot.");
SlotP->Next = 0;
}
}
}
// Begin a data sharing context. Maintain a list of references to shared
// variables. This list of references to shared variables will be passed
// to one or more threads.
// In L0 data sharing this is called by master thread.
// In L1 data sharing this is called by active warp master thread.
EXTERN void __kmpc_begin_sharing_variables(void ***GlobalArgs, size_t nArgs) {
omptarget_nvptx_globalArgs.EnsureSize(nArgs);
*GlobalArgs = omptarget_nvptx_globalArgs.GetArgs();
}
// End a data sharing context. There is no need to have a list of refs
// to shared variables because the context in which those variables were
// shared has now ended. This should clean-up the list of references only
// without affecting the actual global storage of the variables.
// In L0 data sharing this is called by master thread.
// In L1 data sharing this is called by active warp master thread.
EXTERN void __kmpc_end_sharing_variables() {
omptarget_nvptx_globalArgs.DeInit();
}
// This function will return a list of references to global variables. This
// is how the workers will get a reference to the globalized variable. The
// members of this list will be passed to the outlined parallel function
// preserving the order.
// Called by all workers.
EXTERN void __kmpc_get_shared_variables(void ***GlobalArgs) {
*GlobalArgs = omptarget_nvptx_globalArgs.GetArgs();
}
// This function is used to init static memory manager. This manager is used to
// manage statically allocated global memory. This memory is allocated by the
// compiler and used to correctly implement globalization of the variables in
// target, teams and distribute regions.
EXTERN void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
const void *buf, size_t size,
int16_t is_shared,
const void **frame) {
if (is_shared) {
*frame = buf;
return;
}
if (isSPMDExecutionMode) {
if (GetThreadIdInBlock() == 0) {
*frame = omptarget_nvptx_simpleMemoryManager.Acquire(buf, size);
}
__kmpc_impl_syncthreads();
return;
}
ASSERT0(LT_FUSSY, GetThreadIdInBlock() == GetMasterThreadID(),
"Must be called only in the target master thread.");
*frame = omptarget_nvptx_simpleMemoryManager.Acquire(buf, size);
__kmpc_impl_threadfence();
}
EXTERN void __kmpc_restore_team_static_memory(int16_t isSPMDExecutionMode,
int16_t is_shared) {
if (is_shared)
return;
if (isSPMDExecutionMode) {
__kmpc_impl_syncthreads();
if (GetThreadIdInBlock() == 0) {
omptarget_nvptx_simpleMemoryManager.Release();
}
return;
}
__kmpc_impl_threadfence();
ASSERT0(LT_FUSSY, GetThreadIdInBlock() == GetMasterThreadID(),
"Must be called only in the target master thread.");
omptarget_nvptx_simpleMemoryManager.Release();
}
|
the_stack
|
#include "miner.h"
#include "cuda_helper.h"
#include "cuda_vectors.h"
#include "skein_header.h"
#include <openssl/sha.h>
/* try 1024 for 970+ */
#define TPB52 1024
#define TPB50 768
#define maxResults 16
/* 16 adapters max */
static uint32_t *d_resNonce[MAX_GPUS];
static uint32_t *h_resNonce[MAX_GPUS];
extern "C" void skeincoinhash(void *output, const void *input){
sph_skein512_context ctx_skein;
SHA256_CTX sha256;
uint32_t hash[16];
sph_skein512_init(&ctx_skein);
sph_skein512(&ctx_skein, input, 80);
sph_skein512_close(&ctx_skein, hash);
SHA256_Init(&sha256);
SHA256_Update(&sha256, (unsigned char *)hash, 64);
SHA256_Final((unsigned char *)hash, &sha256);
memcpy(output, hash, 32);
}
__constant__ uint2 c_message16[2];
__constant__ uint2 _ALIGN(16) c_buffer[56];
__constant__ const uint2 c_t[ 5] = {{8,0},{0,0xFF000000},{8,0xFF000000},{0x50,0},{0,0xB0000000}};
__constant__ const uint2 c_add[18] = {{1,0},{2,0},{3,0},{4,0},{5,0},{6,0},{7,0},{8,0},{9,0},{10,0},{11,0},{12,0},{13,0},{14,0},{15,0},{16,0},{17,0},{18,0}};
// precomputed tables for SHA256
__constant__ const uint32_t sha256_hashTable[8] = {
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19
};
__constant__ uint32_t _ALIGN(16) sha256_endingTable[64] = {
0xc28a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf374,
0x649b69c1, 0xf0fe4786, 0x0fe1edc6, 0x240cf254, 0x4fe9346f, 0x6cc984be, 0x61b9411e, 0x16f988fa,
0xf2c65152, 0xa88e5a6d, 0xb019fc65, 0xb9d99ec7, 0x9a1231c3, 0xe70eeaa0, 0xfdb1232b, 0xc7353eb0,
0x3069bad5, 0xcb976d5f, 0x5a0f118f, 0xdc1eeefd, 0x0a35b689, 0xde0b7a04, 0x58f4ca9d, 0xe15d5b16,
0x007f3e86, 0x37088980, 0xa507ea32, 0x6fab9537, 0x17406110, 0x0d8cd6f1, 0xcdaa3b6d, 0xc0bbbe37,
0x83613bda, 0xdb48a363, 0x0b02e931, 0x6fd15ca7, 0x521afaca, 0x31338431, 0x6ed41a95, 0x6d437890,
0xc39c91f2, 0x9eccabbd, 0xb5c9a0e6, 0x532fb63c, 0xd2c741c6, 0x07237ea3, 0xa4954b68, 0x4c191d76
};
__constant__ uint32_t _ALIGN(16) sha256_constantTable[64] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
/* Elementary defines for SHA256 */
#define xor3b(a,b,c) ((a ^ b) ^ c)
#define R(x, n) ((x) >> (n))
//#define Maj(x, y, z) ((x & (y | z)) | (y & z)) //((b) & (c)) | (((b) | (c)) & (a)); //andor32(a,b,c);
__device__ __forceinline__
uint32_t Maj(const uint32_t a,const uint32_t b,const uint32_t c){ //Sha256 - Maj - andor
uint32_t result;
#if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050
asm ("lop3.b32 %0, %1, %2, %3, 0xE8;" : "=r"(result) : "r"(a), "r"(b),"r"(c)); // 0xE8 = ((0xF0 & (0xCC | 0xAA)) | (0xCC & 0xAA))
#else
result = ((a & (b | c)) | (b & c));
#endif
return result;
}
#define Ch(x, y, z) ((x & (y ^ z)) ^ z)
__device__ __forceinline__ uint32_t bsg2_0(const uint32_t x)
{
return xor3b(ROTR32(x,2),ROTR32(x,13),ROTR32(x,22));
}
__device__ __forceinline__ uint32_t bsg2_1(const uint32_t x)
{
return xor3b(ROTR32(x,6),ROTR32(x,11),ROTR32(x,25));
}
__device__ __forceinline__ uint32_t ssg2_0(const uint32_t x)
{
return xor3b(ROTR32(x,7),ROTR32(x,18),(x>>3));
}
__device__ __forceinline__ uint32_t ssg2_1(const uint32_t x)
{
return xor3b(ROTR32(x,17),ROTR32(x,19),(x>>10));
}
#if __CUDA_ARCH__ <= 500
__global__ __launch_bounds__(TPB50)
#else
__global__ __launch_bounds__(TPB52, 1)
#endif
void skeincoin_gpu_hash(uint32_t threads, uint32_t startNonce, uint32_t* resNonce, uint32_t target7)
{
uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
if (thread < threads)
{
uint32_t nonce = startNonce + thread;
uint2 nonce2 = make_uint2(c_message16[1].x, cuda_swab32(nonce));
uint2 h[ 9];
uint2 p[8];
*(uint2x4*)&h[ 0] = *(uint2x4*)&c_buffer[ 0];
*(uint2x4*)&h[ 4] = *(uint2x4*)&c_buffer[ 4];
h[ 8] = c_buffer[ 8];
int i = 9;
p[ 1] = nonce2+h[ 1];
p[ 0] = c_buffer[i++]+p[ 1];
p[ 1] = ROL2(p[ 1],46) ^ p[ 0];
p[ 2] = c_buffer[i++] + p[ 1];
p[ 3] = c_buffer[i++];
p[ 4] = c_buffer[i++];
p[ 5] = c_buffer[i++];
p[ 6] = c_buffer[i++];
p[ 7] = c_buffer[i++];
p[ 1] = ROL2(p[ 1],33) ^ p[ 2];
p[ 0]+= p[ 3];
p[ 3] = c_buffer[i++] ^ p[ 0];
p[ 4]+=p[ 1];p[ 6]+=p[ 3];p[ 0]+=p[ 5];p[ 2]+=p[ 7];p[ 1]=ROL2(p[ 1],17) ^ p[ 4];p[ 3]=ROL2(p[ 3],49) ^ p[ 6];p[ 5]=c_buffer[i++] ^ p[ 0];p[ 7]=c_buffer[i++]^ p[ 2];
p[ 6]+=p[ 1];p[ 0]+=p[ 7];p[ 2]+=p[ 5];p[ 4]+=p[ 3];p[ 1]=ROL2(p[ 1],44) ^ p[ 6];p[ 7]=ROL2(p[ 7], 9) ^ p[ 0];p[ 5]=ROL2(p[ 5],54) ^ p[ 2];p[ 3]=ROR8(p[ 3]) ^ p[ 4];
addwBuff(1,2,3,4,5);TFBIGMIX8o();
addwBuff(2,3,4,5,6);TFBIGMIX8e();
addwBuff(3,4,5,6,7);TFBIGMIX8o();
addwBuff(4,5,6,7,8);TFBIGMIX8e();
addwBuff(5,6,7,8,0);TFBIGMIX8o();
addwBuff(6,7,8,0,1);TFBIGMIX8e();
addwBuff(7,8,0,1,2);TFBIGMIX8o();
addwBuff(8,0,1,2,3);TFBIGMIX8e();
addwBuff(0,1,2,3,4);TFBIGMIX8o();
addwBuff(1,2,3,4,5);TFBIGMIX8e();
addwBuff(2,3,4,5,6);TFBIGMIX8o();
addwBuff(3,4,5,6,7);TFBIGMIX8e();
addwBuff(4,5,6,7,8);TFBIGMIX8o();
addwBuff(5,6,7,8,0);TFBIGMIX8e();
addwBuff(6,7,8,0,1);TFBIGMIX8o();
addwBuff(7,8,0,1,2);TFBIGMIX8e();
addwBuff(8,0,1,2,3);TFBIGMIX8o();
h[ 0] = c_message16[0] ^ (p[0]+h[ 0]);
h[ 1] = nonce2 ^ (p[1]+h[ 1]);
h[ 2]+= p[2];
h[ 3]+= p[3];
h[ 4]+= p[4];
h[ 5]+= p[5] + c_t[ 3];//make_uint2(0x50,0);// SPH_T64(bcount << 6) + (sph_u64)(extra);
h[ 6]+= p[6] + c_t[ 4];//make_uint2(0,0xB0000000);// (bcount >> 58) + ((sph_u64)(etype) << 55);
h[ 7]+= p[7] + vectorize(18);
h[ 8] = h[ 0] ^ h[ 1] ^ h[ 2] ^ h[ 3] ^ h[ 4] ^ h[ 5] ^ h[ 6] ^ h[ 7] ^ vectorize(0x1BD11BDAA9FC1A22);
p[ 0] = h[ 0]; p[ 1] = h[ 1]; p[ 2] = h[ 2]; p[ 3] = h[ 3]; p[ 4] = h[ 4]; p[ 5] = h[ 5] + c_t[ 0]; p[ 6] = h[ 6] + c_t[ 1];p[ 7] = h[ 7];
TFBIGMIX8e();
addwCon(1,2,3,4,5,6,7,8, 1,2, 0);TFBIGMIX8o();
addwCon(2,3,4,5,6,7,8,0, 2,0, 1);TFBIGMIX8e();
addwCon(3,4,5,6,7,8,0,1, 0,1, 2);TFBIGMIX8o();
addwCon(4,5,6,7,8,0,1,2, 1,2, 3);TFBIGMIX8e();
addwCon(5,6,7,8,0,1,2,3, 2,0, 4);TFBIGMIX8o();
addwCon(6,7,8,0,1,2,3,4, 0,1, 5);TFBIGMIX8e();
addwCon(7,8,0,1,2,3,4,5, 1,2, 6);TFBIGMIX8o();
addwCon(8,0,1,2,3,4,5,6, 2,0, 7);TFBIGMIX8e();
addwCon(0,1,2,3,4,5,6,7, 0,1, 8);TFBIGMIX8o();
addwCon(1,2,3,4,5,6,7,8, 1,2, 9);TFBIGMIX8e();
addwCon(2,3,4,5,6,7,8,0, 2,0,10);TFBIGMIX8o();
addwCon(3,4,5,6,7,8,0,1, 0,1,11);TFBIGMIX8e();
addwCon(4,5,6,7,8,0,1,2, 1,2,12);TFBIGMIX8o();
addwCon(5,6,7,8,0,1,2,3, 2,0,13);TFBIGMIX8e();
addwCon(6,7,8,0,1,2,3,4, 0,1,14);TFBIGMIX8o();
addwCon(7,8,0,1,2,3,4,5, 1,2,15);TFBIGMIX8e();
addwCon(8,0,1,2,3,4,5,6, 2,0,16);TFBIGMIX8o();
addwCon(0,1,2,3,4,5,6,7, 0,1,17);
uint32_t W1[16];
uint32_t W2[16];
uint32_t regs[8];
uint32_t hash[8];
uint32_t T1;
#pragma unroll 16
for (int k = 0; k<16; k++)
W1[k] = cuda_swab32(((uint32_t *)&p)[k]);
// Init with Hash-Table
#pragma unroll 8
for (int k = 0; k < 8; k++) {
hash[k] = regs[k] = sha256_hashTable[k];
}
// Progress W1
#pragma unroll 16
for (int j = 0; j<16; j++)
{
T1 = regs[7] + sha256_constantTable[j] + bsg2_1(regs[4]) + Ch(regs[4], regs[5], regs[6]) + W1[j];
#pragma unroll
for (int k = 6; k >= 0; k--) regs[k + 1] = regs[k];
regs[0] = T1 + bsg2_0(regs[1]) + Maj(regs[1], regs[2], regs[3]);
regs[4] += T1;
}
// Progress W2...W3
////// PART 1
#pragma unroll 2
for (int j = 0; j<2; j++)
W2[j] = ssg2_1(W1[14 + j]) + W1[9 + j] + ssg2_0(W1[1 + j]) + W1[j];
#pragma unroll 5
for (int j = 2; j<7; j++)
W2[j] = ssg2_1(W2[j - 2]) + W1[9 + j] + ssg2_0(W1[1 + j]) + W1[j];
#pragma unroll 8
for (int j = 7; j<15; j++)
W2[j] = ssg2_1(W2[j - 2]) + W2[j - 7] + ssg2_0(W1[1 + j]) + W1[j];
W2[15] = ssg2_1(W2[13]) + W2[8] + ssg2_0(W2[0]) + W1[15];
// Round function
#pragma unroll
for (int j = 0; j<16; j++)
{
T1 = regs[7] + sha256_constantTable[j + 16] + bsg2_1(regs[4]) + Ch(regs[4], regs[5], regs[6]) + W2[j];
#pragma unroll
for (int l = 6; l >= 0; l--) regs[l + 1] = regs[l];
regs[0] = T1 + bsg2_0(regs[1]) + Maj(regs[1], regs[2], regs[3]);
regs[4] += T1;
}
////// PART 2
#pragma unroll
for (int j = 0; j<2; j++)
W1[j] = ssg2_1(W2[14 + j]) + W2[9 + j] + ssg2_0(W2[1 + j]) + W2[j];
#pragma unroll
for (int j = 2; j<7; j++)
W1[j] = ssg2_1(W1[j - 2]) + W2[9 + j] + ssg2_0(W2[1 + j]) + W2[j];
#pragma unroll
for (int j = 7; j<15; j++)
W1[j] = ssg2_1(W1[j - 2]) + W1[j - 7] + ssg2_0(W2[1 + j]) + W2[j];
W1[15] = ssg2_1(W1[13]) + W1[8] + ssg2_0(W1[0]) + W2[15];
// Round function
#pragma unroll
for (int j = 0; j<16; j++)
{
T1 = regs[7] + sha256_constantTable[j + 32] + bsg2_1(regs[4]) + Ch(regs[4], regs[5], regs[6]) + W1[j];
#pragma unroll
for (int l = 6; l >= 0; l--) regs[l + 1] = regs[l];
regs[0] = T1 + bsg2_0(regs[1]) + Maj(regs[1], regs[2], regs[3]);
regs[4] += T1;
}
////// PART 3
#pragma unroll
for (int j = 0; j<2; j++)
W2[j] = ssg2_1(W1[14 + j]) + W1[9 + j] + ssg2_0(W1[1 + j]) + W1[j];
#pragma unroll
for (int j = 2; j<7; j++)
W2[j] = ssg2_1(W2[j - 2]) + W1[9 + j] + ssg2_0(W1[1 + j]) + W1[j];
#pragma unroll
for (int j = 7; j<15; j++)
W2[j] = ssg2_1(W2[j - 2]) + W2[j - 7] + ssg2_0(W1[1 + j]) + W1[j];
W2[15] = ssg2_1(W2[13]) + W2[8] + ssg2_0(W2[0]) + W1[15];
// Round function
#pragma unroll
for (int j = 0; j<16; j++)
{
T1 = regs[7] + sha256_constantTable[j + 48] + bsg2_1(regs[4]) + Ch(regs[4], regs[5], regs[6]) + W2[j];
#pragma unroll
for (int l = 6; l >= 0; l--) regs[l + 1] = regs[l];
regs[0] = T1 + bsg2_0(regs[1]) + Maj(regs[1], regs[2], regs[3]);
regs[4] += T1;
}
#pragma unroll
for (int k = 0; k<8; k++)
regs[k] = (hash[k] += regs[k]);
/////
///// Second Pass (ending)
/////
// Progress W1
#pragma unroll
for (int j = 0; j<56; j++)//62
{
T1 = regs[7] + sha256_endingTable[j] + bsg2_1(regs[4]) + Ch(regs[4], regs[5], regs[6]);
#pragma unroll
for (int k = 6; k >= 0; k--)
regs[k + 1] = regs[k];
regs[0] = T1 + bsg2_0(regs[1]) + Maj(regs[1], regs[2], regs[3]);
regs[4] += T1;
}
T1 = regs[7] + sha256_endingTable[56] + bsg2_1(regs[4]) + Ch(regs[4], regs[5], regs[6]);
regs[7] = T1 + bsg2_0(regs[0]) + Maj(regs[0], regs[1], regs[2]);
regs[3]+= T1;
T1 = regs[6] + sha256_endingTable[57] + bsg2_1(regs[3]) + Ch(regs[3], regs[4], regs[5]);
regs[2]+= T1;
//************
regs[1]+= regs[5] + sha256_endingTable[58] + bsg2_1(regs[2]) + Ch(regs[2], regs[3], regs[4]);
regs[0]+= regs[4] + sha256_endingTable[59] + bsg2_1(regs[1]) + Ch(regs[1], regs[2], regs[3]);
uint32_t test = cuda_swab32(hash[7] + sha256_endingTable[60] + regs[7] + regs[3] + bsg2_1(regs[0]) + Ch(regs[0], regs[1], regs[2]));
if (test <= target7){
uint32_t pos = atomicInc(&resNonce[0],0xffffffff)+1;
if(pos<maxResults)
resNonce[pos]=nonce;
}
}
}
__host__
void skeincoin_setBlock_80(int thr_id, void *pdata)
{
uint64_t message[16];
memcpy(&message[0], pdata, 80);
cudaMemcpyToSymbol(c_message16, &message[8], 16, 0, cudaMemcpyHostToDevice);
uint64_t h0, h1, h2, h3, h4, h5, h6, h7, h8;
uint64_t t0, t1, t2;
h0 = 0x4903ADFF749C51CEull;
h1 = 0x0D95DE399746DF03ull;
h2 = 0x8FD1934127C79BCEull;
h3 = 0x9A255629FF352CB1ull;
h4 = 0x5DB62599DF6CA7B0ull;
h5 = 0xEABE394CA9D5C3F4ull;
h6 = 0x991112C71A75B523ull;
h7 = 0xAE18A40B660FCC33ull;
//h8 = h0 ^ h1 ^ h2 ^ h3 ^ h4 ^ h5 ^ h6 ^ h7 ^ SPH_C64(0x1BD11BDAA9FC1A22);
h8 = 0xcab2076d98173ec4ULL;
t0 = 64; // ptr
t1 = 0x7000000000000000ull;
t2 = 0x7000000000000040ull;
uint64_t p[8];
for (int i = 0; i<8; i++)
p[i] = message[i];
TFBIG_4e_PRE(0);
TFBIG_4o_PRE(1);
TFBIG_4e_PRE(2);
TFBIG_4o_PRE(3);
TFBIG_4e_PRE(4);
TFBIG_4o_PRE(5);
TFBIG_4e_PRE(6);
TFBIG_4o_PRE(7);
TFBIG_4e_PRE(8);
TFBIG_4o_PRE(9);
TFBIG_4e_PRE(10);
TFBIG_4o_PRE(11);
TFBIG_4e_PRE(12);
TFBIG_4o_PRE(13);
TFBIG_4e_PRE(14);
TFBIG_4o_PRE(15);
TFBIG_4e_PRE(16);
TFBIG_4o_PRE(17);
TFBIG_ADDKEY_PRE(p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], h, t, 18);
h0 = p[ 0] ^ message[ 0];
h1 = p[ 1] ^ message[ 1];
h2 = p[ 2] ^ message[ 2];
h3 = p[ 3] ^ message[ 3];
h4 = p[ 4] ^ message[ 4];
h5 = p[ 5] ^ message[ 5];
h6 = p[ 6] ^ message[ 6];
h7 = p[ 7] ^ message[ 7];
h8 = ((h0 ^ h1) ^ (h2 ^ h3)) ^ ((h4 ^ h5) ^ (h6 ^ h7)) ^ SPH_C64(0x1BD11BDAA9FC1A22);
t0 = 0x50ull; // SPH_T64(bcount << 6) + (sph_u64)(extra);
t1 = 0xB000000000000000ul; // (bcount >> 58) + ((sph_u64)(etype) << 55);
t2 = 0xB000000000000050ull;
p[ 0] = message[ 8] + h0;
p[ 2] = h2;
p[ 3] = h3;
p[ 4] = h4;
p[ 5] = h5 + t0;
p[ 6] = h6 + t1;
p[ 7] = h7;
p[ 2] += p[ 3]; p[ 3] = ROTL64(p[ 3],36) ^ p[ 2];
p[ 4] += p[ 5]; p[ 5] = ROTL64(p[ 5],19) ^ p[ 4];
p[ 6] += p[ 7]; p[ 7] = ROTL64(p[ 7],37) ^ p[ 6];
p[ 4]+= p[ 7]; p[ 7] = ROTL64(p[ 7],27) ^ p[ 4];
p[ 6]+= p[ 5]; p[ 5] = ROTL64(p[ 5],14) ^ p[ 6];
uint64_t sk_buf[56];
int i = 0;
sk_buf[i++] = h0;
sk_buf[i++] = h1;
sk_buf[i++] = h2;
sk_buf[i++] = h3;
sk_buf[i++] = h4;
sk_buf[i++] = h5;
sk_buf[i++] = h6;
sk_buf[i++] = h7;
sk_buf[i++] = h8;
sk_buf[i++] = p[ 0];//10
sk_buf[i++] = p[ 2];
sk_buf[i++] = p[ 3];
sk_buf[i++] = p[ 4];
sk_buf[i++] = p[ 5];
sk_buf[i++] = p[ 6];
sk_buf[i++] = p[ 7];
sk_buf[i++] = ROTL64(p[ 3],42);
sk_buf[i++] = ROTL64(p[ 5],36);
sk_buf[i++] = ROTL64(p[ 7],39);
sk_buf[i++] = h6 + t1;//20
sk_buf[i++] = h8 + 1;
sk_buf[i++] = h7 + t2;
sk_buf[i++] = h0 + 2;
sk_buf[i++] = h8 + t0;
sk_buf[i++] = h1 + 3;
sk_buf[i++] = h0 + t1;
sk_buf[i++] = h2 + 4;
sk_buf[i++] = h1 + t2;
sk_buf[i++] = h3 + 5;
sk_buf[i++] = h2 + t0;
sk_buf[i++] = h4 + 6;
sk_buf[i++] = h3 + t1;
sk_buf[i++] = h5 + 7;
sk_buf[i++] = h4 + t2;
sk_buf[i++] = h6 + 8;
sk_buf[i++] = h5 + t0;
sk_buf[i++] = h7 + 9;
sk_buf[i++] = h6 + t1;
sk_buf[i++] = h8 + 10;
sk_buf[i++] = h7 + t2;
sk_buf[i++] = h0 + 11;
sk_buf[i++] = h8 + t0;
sk_buf[i++] = h1 + 12;
sk_buf[i++] = h0 + t1;
sk_buf[i++] = h2 + 13;
sk_buf[i++] = h1 + t2;
sk_buf[i++] = h3 + 14;
sk_buf[i++] = h2 + t0;
sk_buf[i++] = h4 + 15;
sk_buf[i++] = h3 + t1;
sk_buf[i++] = h5 + 16;
sk_buf[i++] = h4 + t2;
sk_buf[i++] = h6 + 17;
sk_buf[i++] = h5 + t0;
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_buffer, sk_buf, sizeof(sk_buf), 0, cudaMemcpyHostToDevice));
}
static bool init[MAX_GPUS] = { 0 };
extern "C" int scanhash_skeincoin(int thr_id, struct work* work, uint32_t max_nonce, unsigned long *hashes_done){
int dev_id = device_map[thr_id];
uint32_t *pdata = work->data;
uint32_t *ptarget = work->target;
const uint32_t first_nonce = pdata[19];
int intensity = (device_sm[dev_id] > 500) ? 27 : 23;
uint32_t throughput = cuda_default_throughput(dev_id, 1UL << intensity);
if (init[thr_id]) throughput = min(throughput, (max_nonce - first_nonce));
uint32_t target7 = ptarget[7];
if (opt_benchmark)
((uint64_t*)ptarget)[3] = 0;
if (!init[thr_id]){
cudaSetDevice(dev_id);
if (opt_cudaschedule == -1 && gpu_threads == 1) {
cudaDeviceReset();
// reduce cpu usage
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
CUDA_LOG_ERROR();
}
gpulog(LOG_INFO,thr_id, "Intensity set to %g, %u cuda threads", throughput2intensity(throughput), throughput);
CUDA_SAFE_CALL(cudaMalloc(&d_resNonce[thr_id], maxResults * sizeof(uint32_t)));
h_resNonce[thr_id] = (uint32_t*) malloc(maxResults * sizeof(uint32_t));
if(h_resNonce[thr_id] == NULL){
gpulog(LOG_ERR,thr_id,"Host memory allocation failed");
exit(EXIT_FAILURE);
}
CUDA_LOG_ERROR();
init[thr_id] = true;
}
uint32_t tpb = TPB52;
if (device_sm[dev_id] <= 500) tpb = TPB50;
const dim3 grid((throughput + tpb - 1) / tpb);
const dim3 block(tpb);
uint32_t _ALIGN(64) endiandata[20];
for (int k=0; k < 19; k++)
be32enc(&endiandata[k], pdata[k]);
skeincoin_setBlock_80(thr_id, (void*)endiandata);
int rc=0;
cudaMemset(d_resNonce[thr_id], 0x00, maxResults*sizeof(uint32_t));
do {
// GPU HASH
skeincoin_gpu_hash <<< grid, block >>> (throughput, pdata[19], d_resNonce[thr_id], target7);
cudaMemcpy(h_resNonce[thr_id], d_resNonce[thr_id], sizeof(uint32_t), cudaMemcpyDeviceToHost);
if (h_resNonce[thr_id][0] != 0){
cudaMemcpy(h_resNonce[thr_id], d_resNonce[thr_id], maxResults*sizeof(uint32_t), cudaMemcpyDeviceToHost);
cudaMemset(d_resNonce[thr_id], 0x00, sizeof(uint32_t));
if(h_resNonce[thr_id][0]>(maxResults-1)){
gpulog(LOG_WARNING,dev_id,"Candidate flood: %u",h_resNonce[thr_id][0]);
h_resNonce[thr_id][0]=maxResults-1;
}
uint32_t i;
for(i=1;i<h_resNonce[thr_id][0]+1;i++){
uint32_t _ALIGN(64) vhash[8];
be32enc(&endiandata[19],h_resNonce[thr_id][i]);
skeincoinhash(vhash, endiandata);
if (vhash[7] <= ptarget[7] && fulltest(vhash, ptarget)) {
*hashes_done = pdata[19] - first_nonce + throughput + 1;
work_set_target_ratio(work, vhash);
pdata[19] = h_resNonce[thr_id][i];
rc = 1;
//check Extranonce
for(uint32_t j=i+1;j<h_resNonce[thr_id][0]+1;j++){
be32enc(&endiandata[19],h_resNonce[thr_id][j]);
skeincoinhash(vhash, endiandata);
if (vhash[7] <= ptarget[7] && fulltest(vhash, ptarget)) {
pdata[21] = h_resNonce[thr_id][j];
// if(!opt_quiet)
// gpulog(LOG_BLUE,dev_id,"Found 2nd nonce: %08X",pdata[21]);
if (bn_hash_target_ratio(vhash, ptarget) > work->shareratio[0]){
work_set_target_ratio(work, vhash);
xchg(pdata[19],pdata[21]);
}
rc=2;
break;
}
}
return rc;
}
}
}
pdata[19] += throughput;
} while (!work_restart[thr_id].restart && (uint64_t)max_nonce > (uint64_t)throughput + (uint64_t)pdata[19]);
*hashes_done = pdata[19] - first_nonce + 1;
// MyStreamSynchronize(NULL, 0, device_map[thr_id]);
return rc;
}
// cleanup
extern "C" void free_skeincoin(int thr_id)
{
if (!init[thr_id])
return;
cudaDeviceSynchronize();
free(h_resNonce[thr_id]);
cudaFree(d_resNonce[thr_id]);
init[thr_id] = false;
cudaDeviceSynchronize();
}
|
the_stack
|
#include <ops/declarable/helpers/transforms.h>
#include <helpers/ShapeUtils.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void clipByNormCuda(const void* vClipNorm, const void* vNorm, const Nd4jLong* normShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int* dimensions, const int dimsLen, const bool useAverage) {
const T clipNorm = *reinterpret_cast<const T*>(vClipNorm);
const T* norm = reinterpret_cast<const T*>(vNorm);
T* z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong zLen, tadLen, totalThreads;
if (threadIdx.x == 0) {
zLen = shape::length(zShapeInfo);
tadLen = zLen / shape::length(normShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
int zCoords[MAX_RANK], normCoords[MAX_RANK];
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, zCoords);
// deduce norm coords
for (int j = 0; j < dimsLen; ++j)
normCoords[j] = zCoords[dimensions[j]];
const T actualNorm = useAverage ? norm[shape::getOffset(normShapeInfo, normCoords)] / tadLen : norm[shape::getOffset(normShapeInfo, normCoords)];
if(actualNorm > clipNorm)
z[shape::getOffset(zShapeInfo, zCoords)] *= clipNorm / actualNorm;
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void clipByNormCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream,
const void* vClipNorm, const void* vNorm, const Nd4jLong* normShapeInfo, void* vz, const Nd4jLong* zShapeInfo,
const int* dimensions, const int dimsLen, const bool useAverage) {
clipByNormCuda<T><<<blocksPerGrid, threadsPerBlock, 512, *stream>>>(vClipNorm, vNorm, normShapeInfo, vz, zShapeInfo, dimensions, dimsLen, useAverage);
}
//////////////////////////////////////////////////////////////////////////
void clipByNorm(sd::LaunchContext* context, NDArray& input, NDArray& output, const std::vector<int>& dims, const NDArray& clipNorm, const bool isInplace, const bool useAverage) {
NDArray* z = nullptr;
if(isInplace) {
z = &input;
}
else {
output.assign(input);
z = &output;
}
if(dims.empty()) {
const NDArray actualNorm = useAverage ? z->reduceAlongDimension(reduce::Norm2, {}) / z->lengthOf() : z->reduceAlongDimension(reduce::Norm2, {});
if(actualNorm.e<float>(0) > clipNorm.e<float>(0))
*z *= clipNorm / actualNorm;
}
else {
const NDArray actualNorms = z->reduceAlongDimension(reduce::Norm2, dims);
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(z->rankOf(), dims);
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (z->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(context, "clipByNorm");
const int* dimensions = reinterpret_cast<const int*>(manager.replicatePointer(dimsToExclude.data(), dimsToExclude.size() * sizeof(int)));
NDArray::prepareSpecialUse({z}, {z, &actualNorms, &clipNorm});
BUILD_SINGLE_SELECTOR(z->dataType(), clipByNormCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), clipNorm.specialBuffer(), actualNorms.specialBuffer(), actualNorms.specialShapeInfo(), z->specialBuffer(), z->specialShapeInfo(), dimensions, (int)dimsToExclude.size(), useAverage), FLOAT_TYPES);
NDArray::registerSpecialUse({z}, {z, &actualNorms, &clipNorm});
manager.synchronize();
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void clipByNormBpCuda(const void* vClipNorm,
const void* vx, const Nd4jLong* xShapeInfo, // input
const void* vy, const Nd4jLong* yShapeInfo, // gradO
const void* vNorm, const Nd4jLong* normShapeInfo,
const void* vSum, const Nd4jLong* sumShapeInfo,
void* vz, const Nd4jLong* zShapeInfo, // gradI
const int* dimensions, const int dimsLen, const bool useAverage) {
const T clipNorm = *reinterpret_cast<const T*>(vClipNorm);
const T* norm = reinterpret_cast<const T*>(vNorm);
const T* sum = reinterpret_cast<const T*>(vSum);
const T* x = reinterpret_cast<const T*>(vx);
const T* y = reinterpret_cast<const T*>(vy);
T* z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong zLen, tadLen, totalThreads;
__shared__ bool sameOffsets;
if (threadIdx.x == 0) {
zLen = shape::length(zShapeInfo);
tadLen = zLen / shape::length(normShapeInfo);
totalThreads = gridDim.x * blockDim.x;
sameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, yShapeInfo, zShapeInfo);
}
__syncthreads();
int zCoords[MAX_RANK], normCoords[MAX_RANK];
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
shape::index2coords(i, zShapeInfo, zCoords);
const auto zOffset = shape::getOffset(zShapeInfo, zCoords);
const auto yOffset = sameOffsets ? zOffset : shape::getOffset(yShapeInfo, zCoords);
// deduce norm coords
for (int j = 0; j < dimsLen; ++j)
normCoords[j] = zCoords[dimensions[j]];
const T actualNorm = useAverage ? norm[shape::getOffset(normShapeInfo, normCoords)] / tadLen : norm[shape::getOffset(normShapeInfo, normCoords)];
if(actualNorm > clipNorm) {
const T sumVal = sum[shape::getOffset(sumShapeInfo, normCoords)];
const auto xOffset = sameOffsets ? zOffset : shape::getOffset(xShapeInfo, zCoords);
z[zOffset] = (clipNorm / actualNorm) * y[yOffset] * (static_cast<T>(1.f) - (x[xOffset] * sumVal) / (actualNorm * actualNorm));
}
else
z[zOffset] = y[yOffset];
}
}
//////////////////////////////////////////////////////////////////////////
template<typename T>
void clipByNormBp_(sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const std::vector<int>& dims, const NDArray& clipNorm, const bool useAverage) {
const int rank = input.rankOf();
auto actualNorms = input.reduceAlongDimension(reduce::Norm2, dims);
if(actualNorms.lengthOf() == 1) {
const T norm = useAverage ? actualNorms.e<T>(0) / static_cast<T>(input.lengthOf()) : actualNorms.e<T>(0);
auto clipVal = clipNorm.e<T>(0);
if(norm > clipVal) {
const T sum = input.reduceNumber(reduce::Sum).e<T>(0); // reduce to scalar
const T factor1 = clipVal / norm;
const T factor2 = static_cast<T>(1.f) / (norm * norm); // 1 / (norm*norm*norm)
auto lambda = LAMBDA_TT(x, y, sum, factor1, factor2) {
return factor1 * y * (static_cast<T>(1.f) - factor2 * x * sum);
};
const_cast<NDArray&>(input).applyPairwiseLambda(const_cast<NDArray&>(gradO), lambda, gradI);
}
else
gradI.assign(gradO);
}
else {
const NDArray actualNorms = input.reduceAlongDimension(reduce::Norm2, dims);
const NDArray sums = input.reduceAlongDimension(reduce::Sum, dims);
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(gradI.rankOf(), dims);
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
PointersManager manager(context, "clipByNormBp");
const int* dimensions = reinterpret_cast<const int*>(manager.replicatePointer(dimsToExclude.data(), dimsToExclude.size() * sizeof(int)));
NDArray::prepareSpecialUse({&gradI}, {&actualNorms, &sums, &clipNorm, &input, &gradO});
clipByNormBpCuda<T><<<blocksPerGrid, threadsPerBlock, 512, *context->getCudaStream()>>>(clipNorm.specialBuffer(), input.specialBuffer(), input.specialShapeInfo(), gradO.specialBuffer(), gradO.specialShapeInfo(), actualNorms.specialBuffer(), actualNorms.specialShapeInfo(), sums.specialBuffer(), sums.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), dimensions, (int)dimsToExclude.size(), useAverage);
NDArray::registerSpecialUse({&gradI}, {&actualNorms, &sums, &clipNorm, &input, &gradO});
manager.synchronize();
}
}
BUILD_SINGLE_TEMPLATE(template void clipByNormBp_, (sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool useAverage), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
void clipByNormBp(sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool useAverage) {
const NDArray& castedInput = gradI.dataType() == input.dataType() ? input : input.cast(gradI.dataType());
BUILD_SINGLE_SELECTOR(gradI.dataType(), clipByNormBp_, (context, castedInput, gradO, gradI, dimensions, clipNorm, useAverage), FLOAT_TYPES);
}
template <typename T>
void clipByGlobalNorm_(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, sd::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) {
NDArray globalNorm = NDArrayFactory::create<T>(0, inputs[0]->getContext()); //sqrt(sum([l2norm(t)**2 for t in t_list]))
for (auto i = 0; i < inputs.size(); i++) {
auto input = inputs[i];
auto l2norm = input->reduceNumber(reduce::Norm2);
globalNorm += l2norm * l2norm;
}
globalNorm.applyTransform(transform::Sqrt, globalNorm); // = sd::math::nd4j_sqrt(globalNorm);
outputs[inputs.size()]->p(0, globalNorm);
globalNorm.syncToHost();
const T factor = static_cast<T>(clipNorm) / globalNorm.e<T>(0);
for (size_t e = 0; e < inputs.size(); e++) {
// all-reduce
auto input = inputs[e];
auto output = outputs[e];
if (globalNorm.e<double>(0) <= clipNorm) {
output->assign(input);
}
else {
auto lambda = LAMBDA_T(_x, factor) { return _x * factor; };
input->applyLambda(lambda, *output);
}
}
}
void clipByGlobalNorm(sd::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, sd::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) {
BUILD_SINGLE_SELECTOR(outputs[0]->dataType(), clipByGlobalNorm_, (context, inputs, clipNorm, workspace, outputs, isInplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByGlobalNorm_, (sd::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, sd::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace), FLOAT_TYPES);
template <typename T>
static void __global__ clipByValueKernel(void* input, const Nd4jLong* inputShape, void* output, const Nd4jLong* outputShape, double leftBound, double rightBound) {
__shared__ T* outputBuf;
__shared__ T* inputBuf;
__shared__ Nd4jLong length;
__shared__ bool linearBuffers;
if (threadIdx.x == 0) {
outputBuf = reinterpret_cast<T *>(output);
inputBuf = reinterpret_cast<T *>(input);
length = shape::length(inputShape);
linearBuffers = shape::elementWiseStride(inputShape) == shape::elementWiseStride(outputShape) && shape::elementWiseStride(inputShape) == 1;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
if (linearBuffers) {
if (inputBuf[e] > rightBound) outputBuf[e] = (T) rightBound;
else if (inputBuf[e] < leftBound) outputBuf[e] = (T) leftBound;
else outputBuf[e] = inputBuf[e];
}
else {
auto inputOffset = shape::getIndexOffset(e, inputShape);
auto outputOffset = shape::getIndexOffset(e, outputShape);
if (inputBuf[inputOffset] > rightBound) outputBuf[outputOffset] = (T) rightBound;
else if (inputBuf[inputOffset] < leftBound) outputBuf[outputOffset] = (T) leftBound;
else outputBuf[outputOffset] = inputBuf[outputOffset];
}
}
}
template <typename T>
static void clipByValue_(sd::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) {
auto stream = context->getCudaStream();
if (!input.isActualOnDeviceSide())
input.syncToDevice();
NDArray::prepareSpecialUse({&output}, {&input});
clipByValueKernel<T><<<256, 512, 8192, *stream>>>(input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftBound, rightBound);
NDArray::registerSpecialUse({&output}, {&input});
}
void clipByValue(sd::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) {
BUILD_SINGLE_SELECTOR(input.dataType(), clipByValue_, (context, input, leftBound, rightBound, output), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void clipByValue_, (sd::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output);, FLOAT_TYPES);
}
}
}
|
the_stack
|
// TODO: Run some/all tests for half-precision floating-point values, e.g __half from:
// #include <cuda_fp16.h>
// TODO: Also test behavior with warps with some inactive/exited lanes
#include <kat/detail/execution_space_specifiers.hpp>
namespace kernels {
template <typename T, std::size_t N>
KAT_FHD kat::array<T, N>& operator++(::kat::array<T, N>& x)
{
for(auto& e : x) { e++; }
return x;
}
template <typename T, std::size_t N>
KAT_FHD kat::array<T, N> operator++(::kat::array<T, N>& x, int)
{
kat::array<T, N> copy;
for(auto& e : x) { e++; }
return copy;
}
// TODO: Add __restrict__ to these kernels ... but that triggers a bug, for some reason, with CUDA 9.2
template <typename T>
__global__ void test_shuffle_up(
const T* unshuffled,
T* shuffled,
unsigned delta)
{
assert(gridDim.y == 1 and blockDim.y == 1);
auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x;
T datum { kat::shuffle_up(unshuffled[global_thread_index], delta) };
shuffled[global_thread_index] = datum;
}
template <typename T>
__global__ void test_shuffle_down(
const T* unshuffled,
T* shuffled,
unsigned delta)
{
assert(gridDim.y == 1 and blockDim.y == 1);
auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x;
T datum { kat::shuffle_down(unshuffled[global_thread_index], delta) };
shuffled[global_thread_index] = datum;
}
template <typename T>
__global__ void test_shuffle_xor(
const T* unshuffled,
T* shuffled,
const int mask)
{
assert(gridDim.y == 1 and blockDim.y == 1);
auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x;
// thread_printf("__shfl_xor_sync(%X, %d, %X, %d)", kat::full_warp_mask, 123, mask, kat::warp_size);
T datum {
// unshuffled[global_thread_index]
kat::shuffle_xor(unshuffled[global_thread_index], mask)
// kat::builtins::warp::shuffle::xor_(unshuffled[global_thread_index], mask)
// shfl_xor_sync(kat::full_warp_mask, unshuffled[global_thread_index], mask, kat::warp_size)
// 1000 + unshuffled[global_thread_index]
//123
};
shuffled[global_thread_index] = datum;
}
template <typename T, typename F>
__global__ void test_arbitrary_shuffle(
const T* unshuffled,
T* shuffled,
F get_source_lane_for)
{
assert(gridDim.y == 1 and blockDim.y == 1);
auto global_thread_index = threadIdx.x + blockIdx.x * blockDim.x;
auto lane_index = threadIdx.x % kat::warp_size;
auto shuffle_source_lane = get_source_lane_for(lane_index);
T datum { kat::shuffle_arbitrary(unshuffled[global_thread_index], shuffle_source_lane) };
shuffled[global_thread_index] = datum;
}
} // namespace kernels
constexpr const auto num_full_warps { 2 }; // this is aribtrary; didn't just want to have 1.
constexpr const auto block_size { num_full_warps * kat::warp_size };
TEST_SUITE("shuffle") {
TEST_CASE_TEMPLATE("up", I, INTEGER_TYPES, FLOAT_TYPES ) //, ARRAY_TYPES_BY_SIZE)
{
cuda::device_t device { cuda::device::current::get() };
// TODO: Test shuffles with non-full warps.
auto num_grid_blocks { 1 };
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
auto device_side_unshuffled { cuda::memory::device::make_unique<I[]>(device, block_size) };
auto device_side_shuffled { cuda::memory::device::make_unique<I[]>(device, block_size) };
std::array<I, block_size> host_side_unshuffled;
std::array<I, block_size> host_side_shuffled;
std::iota(host_side_unshuffled.begin(),host_side_unshuffled.end(), 0);
std::array<I, block_size> host_side_expected_shuffled;
for(int delta = 0; delta < kat::warp_size; delta++) {
for(std::size_t pos { 0 }; pos < host_side_expected_shuffled.size(); pos ++) {
// Note: I wonder if it's a good idea not to use a typedef for lane indices.
unsigned lane_index = pos % kat::warp_size;
auto shuffle_origin_pos = (lane_index >= delta) ? (pos - delta) : pos;
host_side_expected_shuffled[pos] = host_side_unshuffled[shuffle_origin_pos];
}
cuda::memory::copy(device_side_unshuffled.get(), host_side_unshuffled.data(), sizeof(host_side_unshuffled));
cuda::launch(
::kernels::test_shuffle_up<I>,
launch_config,
device_side_unshuffled.get(),
device_side_shuffled.get(),
delta);
cuda::memory::copy(host_side_shuffled.data(), device_side_shuffled.get(), sizeof(host_side_shuffled));
constexpr const auto print_results { false };
auto found_discrepancy { false };
for(auto i { 0 }; i < block_size; i++) {
CHECK(host_side_shuffled[i] == host_side_expected_shuffled[i]);
if (host_side_shuffled[i] != host_side_expected_shuffled[i]) {
found_discrepancy = true;
MESSAGE("index of discrepancy was: " << i);
}
}
if (print_results) {
if (found_discrepancy) {
std::cout << "Unshuffled input:\n" << host_side_unshuffled << '\n';
std::cout << "Input shuffled up with delta = " << delta << ":\n" << host_side_unshuffled << '\n';
std::cout << "Expected shuffled up output : \n" << host_side_expected_shuffled << '\n';
}
else {
std::cout << "No discrepancies for type = " << util::type_name<I>() << ", delta = " << delta << ".\n";
}
}
}
}
TEST_CASE_TEMPLATE("down", I, INTEGER_TYPES, FLOAT_TYPES ) //, ARRAY_TYPES_BY_SIZE)
{
cuda::device_t device { cuda::device::current::get() };
// TODO: Test shuffles with non-full warps.
auto num_grid_blocks { 1 };
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
auto device_side_unshuffled { cuda::memory::device::make_unique<I[]>(device, block_size) };
auto device_side_shuffled { cuda::memory::device::make_unique<I[]>(device, block_size) };
std::array<I, block_size> host_side_unshuffled;
std::array<I, block_size> host_side_shuffled;
std::iota(host_side_unshuffled.begin(),host_side_unshuffled.end(), 0);
std::array<I, block_size> host_side_expected_shuffled;
for(int delta = 0; delta < kat::warp_size; delta++) {
for(std::size_t pos { 0 }; pos < host_side_expected_shuffled.size(); pos ++) {
// Note: I wonder if it's a good idea not to use a typedef for lane indices.
unsigned lane_index = pos % kat::warp_size;
auto shuffle_origin_pos = (lane_index < kat::warp_size - delta) ? (pos + delta) : pos;
host_side_expected_shuffled[pos] = host_side_unshuffled[shuffle_origin_pos];
}
cuda::memory::copy(device_side_unshuffled.get(), host_side_unshuffled.data(), sizeof(host_side_unshuffled));
cuda::launch(
::kernels::test_shuffle_down<I>,
launch_config,
device_side_unshuffled.get(),
device_side_shuffled.get(),
delta);
cuda::memory::copy(host_side_shuffled.data(), device_side_shuffled.get(), sizeof(host_side_shuffled));
constexpr const auto print_results { false };
auto found_discrepancy { false };
for(auto i { 0 }; i < block_size; i++) {
CHECK(host_side_shuffled[i] == host_side_expected_shuffled[i]);
if (host_side_shuffled[i] != host_side_expected_shuffled[i]) {
found_discrepancy = true;
MESSAGE("index of discrepancy was: " << i);
}
}
if (print_results) {
if (found_discrepancy) {
std::cout << "Unshuffled input:\n" << host_side_unshuffled << '\n';
std::cout << "Input shuffled up with delta = " << delta << ":\n" << host_side_unshuffled << '\n';
std::cout << "Expected shuffled up output : \n" << host_side_expected_shuffled << '\n';
}
else {
std::cout << "No discrepancies for type = " << util::type_name<I>() << ", delta = " << delta << ".\n";
}
}
}
}
TEST_CASE_TEMPLATE("xor", I, INTEGER_TYPES, FLOAT_TYPES ) //, ARRAY_TYPES_BY_SIZE)
{
cuda::device_t device { cuda::device::current::get() };
// TODO: Test shuffles with non-full warps.
auto num_grid_blocks { 1 };
auto launch_config { cuda::make_launch_config(num_grid_blocks, block_size) };
auto device_side_unshuffled { cuda::memory::device::make_unique<I[]>(device, block_size) };
auto device_side_shuffled { cuda::memory::device::make_unique<I[]>(device, block_size) };
std::array<I, block_size> host_side_unshuffled;
std::array<I, block_size> host_side_shuffled;
std::iota(host_side_unshuffled.begin(),host_side_unshuffled.end(), 0);
std::array<I, block_size> host_side_expected_shuffled;
for(size_t mask_index { 0 }; mask_index < kat::warp_size; mask_index++) {
// Note the mask can't have bits that aren't present in actual lane indices,
// so the mask does nor exceed warp_size - 1
// std::uniform_int_distribution<kat::lane_mask_t> distribution(kat::empty_warp_mask, kat::full_warp_mask);
// // util::random::seed(std::time(0)); // seed with the current time
// auto mask = util::random::sample_from(distribution);
int mask = mask_index; // yes, just like that
// std::cout << "Using mask " << std::hex << (unsigned) mask << std::dec << std::endl;
for(std::size_t pos { 0 }; pos < host_side_expected_shuffled.size(); pos ++) {
// Note: I wonder if it's a good idea not to use a typedef for lane indices.
unsigned lane_index = pos % kat::warp_size;
auto shuffle_origin_pos = (pos - lane_index) ^ (lane_index xor mask);
host_side_expected_shuffled[pos] = host_side_unshuffled[shuffle_origin_pos];
// std::cout << "pos = " << std::setw(2) << pos << ", host_side_expected_shuffled[" << std::setw(2) << pos << "] = " << std::setw(2) << host_side_expected_shuffled[pos] << std::endl;
}
cuda::memory::copy(device_side_unshuffled.get(), host_side_unshuffled.data(), sizeof(host_side_unshuffled));
cuda::launch(
::kernels::test_shuffle_xor<I>,
launch_config,
device_side_unshuffled.get(),
device_side_shuffled.get(),
mask);
cuda::memory::copy(host_side_shuffled.data(), device_side_shuffled.get(), sizeof(host_side_shuffled));
constexpr const auto print_results { false };
auto found_discrepancy { false };
for(auto i { 0 }; i < block_size; i++) {
CHECK(host_side_shuffled[i] == host_side_expected_shuffled[i]);
if (host_side_shuffled[i] != host_side_expected_shuffled[i]) {
found_discrepancy = true;
MESSAGE("index of discrepancy was: " << i);
}
}
if (print_results) {
if (found_discrepancy) {
std::cout << "Unshuffled input:\n" << host_side_unshuffled << '\n';
std::cout << "Input shuffled up with mask = " << std::hex << mask << std::dec << ":\n" << host_side_unshuffled << '\n';
std::cout << "Expected shuffled up output : \n" << host_side_expected_shuffled << '\n';
}
else {
std::cout << "No discrepancies for type = " << util::type_name<I>() << ", mask = " << std::hex << mask << std::dec << ".\n";
}
}
}
}
} // TEST_SUITE("shuffle")
|
the_stack
|
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/geometry/trianglemesh.h"
#include "cupoch/geometry/voxelgrid.h"
#include "cupoch/io/class_io/pointcloud_io.h"
#include "cupoch/io/class_io/trianglemesh_io.h"
#include "cupoch/io/class_io/voxelgrid_io.h"
#include "cupoch/utility/console.h"
namespace cupoch {
namespace {
using namespace io;
namespace ply_pointcloud_reader {
struct PLYReaderState {
utility::ConsoleProgressBar *progress_bar;
HostPointCloud *pointcloud_ptr;
long vertex_index;
long vertex_num;
long normal_index;
long normal_num;
long color_index;
long color_num;
};
int ReadVertexCallback(p_ply_argument argument) {
PLYReaderState *state_ptr;
long index;
ply_get_argument_user_data(argument, reinterpret_cast<void **>(&state_ptr),
&index);
if (state_ptr->vertex_index >= state_ptr->vertex_num) {
return 0; // some sanity check
}
float value = ply_get_argument_value(argument);
state_ptr->pointcloud_ptr->points_[state_ptr->vertex_index](index) = value;
if (index == 2) { // reading 'z'
state_ptr->vertex_index++;
++(*state_ptr->progress_bar);
}
return 1;
}
int ReadNormalCallback(p_ply_argument argument) {
PLYReaderState *state_ptr;
long index;
ply_get_argument_user_data(argument, reinterpret_cast<void **>(&state_ptr),
&index);
if (state_ptr->normal_index >= state_ptr->normal_num) {
return 0;
}
float value = ply_get_argument_value(argument);
state_ptr->pointcloud_ptr->normals_[state_ptr->normal_index](index) = value;
if (index == 2) { // reading 'nz'
state_ptr->normal_index++;
}
return 1;
}
int ReadColorCallback(p_ply_argument argument) {
PLYReaderState *state_ptr;
long index;
ply_get_argument_user_data(argument, reinterpret_cast<void **>(&state_ptr),
&index);
if (state_ptr->color_index >= state_ptr->color_num) {
return 0;
}
float value = ply_get_argument_value(argument);
state_ptr->pointcloud_ptr->colors_[state_ptr->color_index](index) =
value / 255.0;
if (index == 2) { // reading 'blue'
state_ptr->color_index++;
}
return 1;
}
} // namespace ply_pointcloud_reader
namespace ply_trianglemesh_reader {
struct PLYReaderState {
utility::ConsoleProgressBar *progress_bar;
HostTriangleMesh *mesh_ptr;
long vertex_index;
long vertex_num;
long normal_index;
long normal_num;
long color_index;
long color_num;
std::vector<unsigned int> face;
long face_index;
long face_num;
};
int ReadVertexCallback(p_ply_argument argument) {
PLYReaderState *state_ptr;
long index;
ply_get_argument_user_data(argument, reinterpret_cast<void **>(&state_ptr),
&index);
if (state_ptr->vertex_index >= state_ptr->vertex_num) {
return 0;
}
float value = ply_get_argument_value(argument);
state_ptr->mesh_ptr->vertices_[state_ptr->vertex_index](index) = value;
if (index == 2) { // reading 'z'
state_ptr->vertex_index++;
++(*state_ptr->progress_bar);
}
return 1;
}
int ReadNormalCallback(p_ply_argument argument) {
PLYReaderState *state_ptr;
long index;
ply_get_argument_user_data(argument, reinterpret_cast<void **>(&state_ptr),
&index);
if (state_ptr->normal_index >= state_ptr->normal_num) {
return 0;
}
float value = ply_get_argument_value(argument);
state_ptr->mesh_ptr->vertex_normals_[state_ptr->normal_index](index) =
value;
if (index == 2) { // reading 'nz'
state_ptr->normal_index++;
}
return 1;
}
int ReadColorCallback(p_ply_argument argument) {
PLYReaderState *state_ptr;
long index;
ply_get_argument_user_data(argument, reinterpret_cast<void **>(&state_ptr),
&index);
if (state_ptr->color_index >= state_ptr->color_num) {
return 0;
}
float value = ply_get_argument_value(argument);
state_ptr->mesh_ptr->vertex_colors_[state_ptr->color_index](index) =
value / 255.0;
if (index == 2) { // reading 'blue'
state_ptr->color_index++;
}
return 1;
}
int ReadFaceCallBack(p_ply_argument argument) {
PLYReaderState *state_ptr;
long dummy, length, index;
ply_get_argument_user_data(argument, reinterpret_cast<void **>(&state_ptr),
&dummy);
float value = ply_get_argument_value(argument);
if (state_ptr->face_index >= state_ptr->face_num) {
return 0;
}
ply_get_argument_property(argument, NULL, &length, &index);
if (index == -1) {
state_ptr->face.clear();
} else {
state_ptr->face.push_back(int(value));
}
if (long(state_ptr->face.size()) == length) {
if (!AddTrianglesByEarClipping(*state_ptr->mesh_ptr, state_ptr->face)) {
utility::LogWarning(
"Read PLY failed: A polygon in the mesh could not be "
"decomposed into triangles.");
return 0;
}
state_ptr->face_index++;
++(*state_ptr->progress_bar);
}
return 1;
}
} // namespace ply_trianglemesh_reader
namespace ply_voxelgrid_reader {
struct PLYReaderState {
utility::ConsoleProgressBar *progress_bar;
thrust::host_vector<geometry::Voxel> *voxelgrid_ptr;
Eigen::Vector3f origin;
double voxel_size;
long voxel_index;
long voxel_num;
long color_index;
long color_num;
};
int ReadOriginCallback(p_ply_argument argument) {
PLYReaderState *state_ptr;
long index;
ply_get_argument_user_data(argument, reinterpret_cast<void **>(&state_ptr),
&index);
double value = ply_get_argument_value(argument);
state_ptr->origin(index) = value;
return 1;
}
int ReadScaleCallback(p_ply_argument argument) {
PLYReaderState *state_ptr;
long index;
ply_get_argument_user_data(argument, reinterpret_cast<void **>(&state_ptr),
&index);
double value = ply_get_argument_value(argument);
state_ptr->voxel_size = value;
return 1;
}
int ReadVoxelCallback(p_ply_argument argument) {
PLYReaderState *state_ptr;
long index;
ply_get_argument_user_data(argument, reinterpret_cast<void **>(&state_ptr),
&index);
if (state_ptr->voxel_index >= state_ptr->voxel_num) {
return 0; // some sanity check
}
double value = ply_get_argument_value(argument);
auto &ptr = *(state_ptr->voxelgrid_ptr);
ptr[state_ptr->voxel_index].grid_index_(index) = int(value);
if (index == 2) { // reading 'z'
state_ptr->voxel_index++;
++(*state_ptr->progress_bar);
}
return 1;
}
int ReadColorCallback(p_ply_argument argument) {
PLYReaderState *state_ptr;
long index;
ply_get_argument_user_data(argument, reinterpret_cast<void **>(&state_ptr),
&index);
if (state_ptr->color_index >= state_ptr->color_num) {
return 0;
}
double value = ply_get_argument_value(argument);
auto &ptr = *(state_ptr->voxelgrid_ptr);
ptr[state_ptr->color_index].color_(index) = value / 255.0;
if (index == 2) { // reading 'blue'
state_ptr->color_index++;
++(*state_ptr->progress_bar);
}
return 1;
}
} // namespace ply_voxelgrid_reader
} // namespace
namespace io {
bool ReadPointCloudFromPLY(const std::string &filename,
geometry::PointCloud &pointcloud,
bool print_progress) {
using namespace ply_pointcloud_reader;
p_ply ply_file = ply_open(filename.c_str(), NULL, 0, NULL);
if (!ply_file) {
utility::LogWarning("Read PLY failed: unable to open file: %s",
filename.c_str());
return false;
}
if (!ply_read_header(ply_file)) {
utility::LogWarning("Read PLY failed: unable to parse header.");
ply_close(ply_file);
return false;
}
PLYReaderState state;
HostPointCloud host_pc;
state.pointcloud_ptr = &host_pc;
state.vertex_num = ply_set_read_cb(ply_file, "vertex", "x",
ReadVertexCallback, &state, 0);
ply_set_read_cb(ply_file, "vertex", "y", ReadVertexCallback, &state, 1);
ply_set_read_cb(ply_file, "vertex", "z", ReadVertexCallback, &state, 2);
state.normal_num = ply_set_read_cb(ply_file, "vertex", "nx",
ReadNormalCallback, &state, 0);
ply_set_read_cb(ply_file, "vertex", "ny", ReadNormalCallback, &state, 1);
ply_set_read_cb(ply_file, "vertex", "nz", ReadNormalCallback, &state, 2);
state.color_num = ply_set_read_cb(ply_file, "vertex", "red",
ReadColorCallback, &state, 0);
ply_set_read_cb(ply_file, "vertex", "green", ReadColorCallback, &state, 1);
ply_set_read_cb(ply_file, "vertex", "blue", ReadColorCallback, &state, 2);
if (state.vertex_num <= 0) {
utility::LogWarning("Read PLY failed: number of vertex <= 0.");
ply_close(ply_file);
return false;
}
state.vertex_index = 0;
state.normal_index = 0;
state.color_index = 0;
host_pc.Clear();
host_pc.points_.resize(state.vertex_num);
host_pc.normals_.resize(state.normal_num);
host_pc.colors_.resize(state.color_num);
utility::ConsoleProgressBar progress_bar(state.vertex_num + 1,
"Reading PLY: ", print_progress);
state.progress_bar = &progress_bar;
if (!ply_read(ply_file)) {
utility::LogWarning("Read PLY failed: unable to read file: {}",
filename);
ply_close(ply_file);
return false;
}
ply_close(ply_file);
++progress_bar;
host_pc.ToDevice(pointcloud);
return true;
}
bool WritePointCloudToPLY(const std::string &filename,
const geometry::PointCloud &pointcloud,
bool write_ascii /* = false*/,
bool compressed /* = false*/,
bool print_progress) {
if (pointcloud.IsEmpty()) {
utility::LogWarning("Write PLY failed: point cloud has 0 points.");
return false;
}
p_ply ply_file = ply_create(filename.c_str(),
write_ascii ? PLY_ASCII : PLY_LITTLE_ENDIAN,
NULL, 0, NULL);
if (!ply_file) {
utility::LogWarning("Write PLY failed: unable to open file: {}",
filename);
return false;
}
ply_add_comment(ply_file, "Created by Cupoch");
ply_add_element(ply_file, "vertex",
static_cast<long>(pointcloud.points_.size()));
ply_add_property(ply_file, "x", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
ply_add_property(ply_file, "y", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
ply_add_property(ply_file, "z", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
if (pointcloud.HasNormals()) {
ply_add_property(ply_file, "nx", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
ply_add_property(ply_file, "ny", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
ply_add_property(ply_file, "nz", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
}
if (pointcloud.HasColors()) {
ply_add_property(ply_file, "red", PLY_UCHAR, PLY_UCHAR, PLY_UCHAR);
ply_add_property(ply_file, "green", PLY_UCHAR, PLY_UCHAR, PLY_UCHAR);
ply_add_property(ply_file, "blue", PLY_UCHAR, PLY_UCHAR, PLY_UCHAR);
}
if (!ply_write_header(ply_file)) {
utility::LogWarning("Write PLY failed: unable to write header.");
ply_close(ply_file);
return false;
}
utility::ConsoleProgressBar progress_bar(
static_cast<size_t>(pointcloud.points_.size()),
"Writing PLY: ", print_progress);
bool printed_color_warning = false;
HostPointCloud host_pc;
host_pc.FromDevice(pointcloud);
for (size_t i = 0; i < pointcloud.points_.size(); i++) {
const Eigen::Vector3f &point = host_pc.points_[i];
ply_write(ply_file, point(0));
ply_write(ply_file, point(1));
ply_write(ply_file, point(2));
if (pointcloud.HasNormals()) {
const Eigen::Vector3f &normal = host_pc.normals_[i];
ply_write(ply_file, normal(0));
ply_write(ply_file, normal(1));
ply_write(ply_file, normal(2));
}
if (pointcloud.HasColors()) {
const Eigen::Vector3f &color = host_pc.colors_[i];
if (!printed_color_warning &&
(color(0) < 0 || color(0) > 1 || color(1) < 0 || color(1) > 1 ||
color(2) < 0 || color(2) > 1)) {
utility::LogWarning(
"Write Ply clamped color value to valid range");
printed_color_warning = true;
}
ply_write(ply_file,
std::min(255.0, std::max(0.0, color(0) * 255.0)));
ply_write(ply_file,
std::min(255.0, std::max(0.0, color(1) * 255.0)));
ply_write(ply_file,
std::min(255.0, std::max(0.0, color(2) * 255.0)));
}
++progress_bar;
}
ply_close(ply_file);
return true;
}
bool ReadTriangleMeshFromPLY(const std::string &filename,
geometry::TriangleMesh &mesh,
bool print_progress) {
using namespace ply_trianglemesh_reader;
p_ply ply_file = ply_open(filename.c_str(), NULL, 0, NULL);
if (!ply_file) {
utility::LogWarning("Read PLY failed: unable to open file: {}",
filename);
return false;
}
if (!ply_read_header(ply_file)) {
utility::LogWarning("Read PLY failed: unable to parse header.");
ply_close(ply_file);
return false;
}
HostTriangleMesh host_mesh;
PLYReaderState state;
state.mesh_ptr = &host_mesh;
state.vertex_num = ply_set_read_cb(ply_file, "vertex", "x",
ReadVertexCallback, &state, 0);
ply_set_read_cb(ply_file, "vertex", "y", ReadVertexCallback, &state, 1);
ply_set_read_cb(ply_file, "vertex", "z", ReadVertexCallback, &state, 2);
state.normal_num = ply_set_read_cb(ply_file, "vertex", "nx",
ReadNormalCallback, &state, 0);
ply_set_read_cb(ply_file, "vertex", "ny", ReadNormalCallback, &state, 1);
ply_set_read_cb(ply_file, "vertex", "nz", ReadNormalCallback, &state, 2);
state.color_num = ply_set_read_cb(ply_file, "vertex", "red",
ReadColorCallback, &state, 0);
ply_set_read_cb(ply_file, "vertex", "green", ReadColorCallback, &state, 1);
ply_set_read_cb(ply_file, "vertex", "blue", ReadColorCallback, &state, 2);
if (state.vertex_num <= 0) {
utility::LogWarning("Read PLY failed: number of vertex <= 0.");
ply_close(ply_file);
return false;
}
state.face_num = ply_set_read_cb(ply_file, "face", "vertex_indices",
ReadFaceCallBack, &state, 0);
if (state.face_num == 0) {
state.face_num = ply_set_read_cb(ply_file, "face", "vertex_index",
ReadFaceCallBack, &state, 0);
}
state.vertex_index = 0;
state.normal_index = 0;
state.color_index = 0;
state.face_index = 0;
host_mesh.Clear();
host_mesh.vertices_.resize(state.vertex_num);
host_mesh.vertex_normals_.resize(state.normal_num);
host_mesh.vertex_colors_.resize(state.color_num);
utility::ConsoleProgressBar progress_bar(state.vertex_num + state.face_num,
"Reading PLY: ", print_progress);
state.progress_bar = &progress_bar;
if (!ply_read(ply_file)) {
utility::LogWarning("Read PLY failed: unable to read file: {}",
filename);
ply_close(ply_file);
return false;
}
ply_close(ply_file);
host_mesh.ToDevice(mesh);
return true;
}
bool WriteTriangleMeshToPLY(const std::string &filename,
const geometry::TriangleMesh &mesh,
bool write_ascii /* = false*/,
bool compressed /* = false*/,
bool write_vertex_normals /* = true*/,
bool write_vertex_colors /* = true*/,
bool write_triangle_uvs /* = true*/,
bool print_progress) {
if (write_triangle_uvs && mesh.HasTriangleUvs()) {
utility::LogWarning(
"This file format currently does not support writing textures "
"and uv coordinates. Consider using .obj");
}
if (mesh.IsEmpty()) {
utility::LogWarning("Write PLY failed: mesh has 0 vertices.");
return false;
}
p_ply ply_file = ply_create(filename.c_str(),
write_ascii ? PLY_ASCII : PLY_LITTLE_ENDIAN,
NULL, 0, NULL);
if (!ply_file) {
utility::LogWarning("Write PLY failed: unable to open file: {}",
filename);
return false;
}
write_vertex_normals = write_vertex_normals && mesh.HasVertexNormals();
write_vertex_colors = write_vertex_colors && mesh.HasVertexColors();
ply_add_comment(ply_file, "Created by Open3D");
ply_add_element(ply_file, "vertex",
static_cast<long>(mesh.vertices_.size()));
ply_add_property(ply_file, "x", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
ply_add_property(ply_file, "y", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
ply_add_property(ply_file, "z", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
if (write_vertex_normals) {
ply_add_property(ply_file, "nx", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
ply_add_property(ply_file, "ny", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
ply_add_property(ply_file, "nz", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
}
if (write_vertex_colors) {
ply_add_property(ply_file, "red", PLY_UCHAR, PLY_UCHAR, PLY_UCHAR);
ply_add_property(ply_file, "green", PLY_UCHAR, PLY_UCHAR, PLY_UCHAR);
ply_add_property(ply_file, "blue", PLY_UCHAR, PLY_UCHAR, PLY_UCHAR);
}
ply_add_element(ply_file, "face",
static_cast<long>(mesh.triangles_.size()));
ply_add_property(ply_file, "vertex_indices", PLY_LIST, PLY_UCHAR, PLY_UINT);
if (!ply_write_header(ply_file)) {
utility::LogWarning("Write PLY failed: unable to write header.");
ply_close(ply_file);
return false;
}
utility::ConsoleProgressBar progress_bar(
static_cast<size_t>(mesh.vertices_.size() + mesh.triangles_.size()),
"Writing PLY: ", print_progress);
bool printed_color_warning = false;
HostTriangleMesh host_mesh;
host_mesh.FromDevice(mesh);
for (size_t i = 0; i < mesh.vertices_.size(); i++) {
const auto &vertex = host_mesh.vertices_[i];
ply_write(ply_file, vertex(0));
ply_write(ply_file, vertex(1));
ply_write(ply_file, vertex(2));
if (write_vertex_normals) {
const auto &normal = host_mesh.vertex_normals_[i];
ply_write(ply_file, normal(0));
ply_write(ply_file, normal(1));
ply_write(ply_file, normal(2));
}
if (write_vertex_colors) {
const auto &color = host_mesh.vertex_colors_[i];
if (!printed_color_warning &&
(color(0) < 0 || color(0) > 1 || color(1) < 0 || color(1) > 1 ||
color(2) < 0 || color(2) > 1)) {
utility::LogWarning(
"Write Ply clamped color value to valid range");
printed_color_warning = true;
}
ply_write(ply_file,
std::min(255.0, std::max(0.0, color(0) * 255.0)));
ply_write(ply_file,
std::min(255.0, std::max(0.0, color(1) * 255.0)));
ply_write(ply_file,
std::min(255.0, std::max(0.0, color(2) * 255.0)));
}
++progress_bar;
}
for (size_t i = 0; i < mesh.triangles_.size(); i++) {
const auto &triangle = host_mesh.triangles_[i];
ply_write(ply_file, 3);
ply_write(ply_file, triangle(0));
ply_write(ply_file, triangle(1));
ply_write(ply_file, triangle(2));
++progress_bar;
}
ply_close(ply_file);
return true;
}
bool ReadVoxelGridFromPLY(const std::string &filename,
geometry::VoxelGrid &voxelgrid,
bool print_progress) {
using namespace ply_voxelgrid_reader;
p_ply ply_file = ply_open(filename.c_str(), NULL, 0, NULL);
if (!ply_file) {
utility::LogWarning("Read PLY failed: unable to open file: {}",
filename);
return false;
}
if (!ply_read_header(ply_file)) {
utility::LogWarning("Read PLY failed: unable to parse header.");
ply_close(ply_file);
return false;
}
PLYReaderState state;
thrust::host_vector<geometry::Voxel> voxelgrid_ptr;
state.voxelgrid_ptr = &voxelgrid_ptr;
state.voxel_num = ply_set_read_cb(ply_file, "vertex", "x",
ReadVoxelCallback, &state, 0);
ply_set_read_cb(ply_file, "vertex", "y", ReadVoxelCallback, &state, 1);
ply_set_read_cb(ply_file, "vertex", "z", ReadVoxelCallback, &state, 2);
if (state.voxel_num <= 0) {
utility::LogWarning("Read PLY failed: number of vertex <= 0.");
ply_close(ply_file);
return false;
}
state.color_num = ply_set_read_cb(ply_file, "vertex", "red",
ReadColorCallback, &state, 0);
ply_set_read_cb(ply_file, "vertex", "green", ReadColorCallback, &state, 1);
ply_set_read_cb(ply_file, "vertex", "blue", ReadColorCallback, &state, 2);
ply_set_read_cb(ply_file, "origin", "x", ReadOriginCallback, &state, 0);
ply_set_read_cb(ply_file, "origin", "y", ReadOriginCallback, &state, 1);
ply_set_read_cb(ply_file, "origin", "z", ReadOriginCallback, &state, 2);
ply_set_read_cb(ply_file, "voxel_size", "val", ReadScaleCallback, &state,
0);
state.voxel_index = 0;
state.color_index = 0;
voxelgrid_ptr.clear();
voxelgrid_ptr.resize(state.voxel_num);
utility::ConsoleProgressBar progress_bar(state.voxel_num + state.color_num,
"Reading PLY: ", print_progress);
state.progress_bar = &progress_bar;
if (!ply_read(ply_file)) {
utility::LogWarning("Read PLY failed: unable to read file: {}",
filename);
ply_close(ply_file);
return false;
}
voxelgrid.Clear();
voxelgrid.AddVoxels(voxelgrid_ptr);
voxelgrid.origin_ = state.origin;
voxelgrid.voxel_size_ = state.voxel_size;
ply_close(ply_file);
return true;
}
bool WriteVoxelGridToPLY(const std::string &filename,
const geometry::VoxelGrid &voxelgrid,
bool write_ascii /* = false*/,
bool compressed /* = false*/,
bool print_progress) {
if (voxelgrid.IsEmpty()) {
utility::LogWarning("Write PLY failed: voxelgrid has 0 voxels.");
return false;
}
p_ply ply_file = ply_create(filename.c_str(),
write_ascii ? PLY_ASCII : PLY_LITTLE_ENDIAN,
NULL, 0, NULL);
if (!ply_file) {
utility::LogWarning("Write PLY failed: unable to open file: {}",
filename);
return false;
}
ply_add_comment(ply_file, "Created by Open3D");
ply_add_element(ply_file, "origin", 1);
ply_add_property(ply_file, "x", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
ply_add_property(ply_file, "y", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
ply_add_property(ply_file, "z", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
ply_add_element(ply_file, "voxel_size", 1);
ply_add_property(ply_file, "val", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
ply_add_element(ply_file, "vertex",
static_cast<long>(voxelgrid.voxels_keys_.size()));
// PLY_UINT could be used for x, y, z but PLY_DOUBLE used instead due to
// compatibility issue.
ply_add_property(ply_file, "x", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
ply_add_property(ply_file, "y", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
ply_add_property(ply_file, "z", PLY_DOUBLE, PLY_DOUBLE, PLY_DOUBLE);
if (voxelgrid.HasColors()) {
ply_add_property(ply_file, "red", PLY_UCHAR, PLY_UCHAR, PLY_UCHAR);
ply_add_property(ply_file, "green", PLY_UCHAR, PLY_UCHAR, PLY_UCHAR);
ply_add_property(ply_file, "blue", PLY_UCHAR, PLY_UCHAR, PLY_UCHAR);
}
if (!ply_write_header(ply_file)) {
utility::LogWarning("Write PLY failed: unable to write header.");
ply_close(ply_file);
return false;
}
utility::ConsoleProgressBar progress_bar(
static_cast<size_t>(voxelgrid.voxels_keys_.size()),
"Writing PLY: ", print_progress);
const Eigen::Vector3f &origin = voxelgrid.origin_;
ply_write(ply_file, origin(0));
ply_write(ply_file, origin(1));
ply_write(ply_file, origin(2));
ply_write(ply_file, voxelgrid.voxel_size_);
HostVoxelGrid host_vg;
host_vg.FromDevice(voxelgrid);
for (auto &it : host_vg.voxels_values_) {
const geometry::Voxel &voxel = it;
ply_write(ply_file, voxel.grid_index_(0));
ply_write(ply_file, voxel.grid_index_(1));
ply_write(ply_file, voxel.grid_index_(2));
const Eigen::Vector3f &color = voxel.color_;
ply_write(ply_file, std::min(255.0, std::max(0.0, color(0) * 255.0)));
ply_write(ply_file, std::min(255.0, std::max(0.0, color(1) * 255.0)));
ply_write(ply_file, std::min(255.0, std::max(0.0, color(2) * 255.0)));
++progress_bar;
}
ply_close(ply_file);
return true;
}
} // namespace io
} // namespace cupoch
|
the_stack
|
* \file
* cub::WarpScanSmem provides smem-based variants of parallel prefix scan across CUDA warps.
*/
#pragma once
#include "../../thread/thread_operators.cuh"
#include "../../thread/thread_load.cuh"
#include "../../thread/thread_store.cuh"
#include "../../util_type.cuh"
#include "../../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \brief WarpScanSmem provides smem-based variants of parallel prefix scan across CUDA warps.
*/
template <
typename T, ///< Data type being scanned
int LOGICAL_WARPS, ///< Number of logical warps entrant
int LOGICAL_WARP_THREADS> ///< Number of threads per logical warp
struct WarpScanSmem
{
/******************************************************************************
* Constants and type definitions
******************************************************************************/
enum
{
/// The number of warp scan steps
STEPS = Log2<LOGICAL_WARP_THREADS>::VALUE,
/// The number of threads in half a warp
HALF_WARP_THREADS = 1 << (STEPS - 1),
/// The number of shared memory elements per warp
WARP_SMEM_ELEMENTS = LOGICAL_WARP_THREADS + HALF_WARP_THREADS,
};
/// Shared memory storage layout type (1.5 warps-worth of elements for each warp)
typedef T _TempStorage[LOGICAL_WARPS][WARP_SMEM_ELEMENTS];
// Alias wrapper allowing storage to be unioned
struct TempStorage : Uninitialized<_TempStorage> {};
/******************************************************************************
* Thread fields
******************************************************************************/
_TempStorage &temp_storage;
unsigned int warp_id;
unsigned int lane_id;
/******************************************************************************
* Construction
******************************************************************************/
/// Constructor
__device__ __forceinline__ WarpScanSmem(
TempStorage &temp_storage,
int warp_id,
int lane_id)
:
temp_storage(temp_storage.Alias()),
warp_id(warp_id),
lane_id(lane_id)
{}
/******************************************************************************
* Operation
******************************************************************************/
/// Initialize identity padding (specialized for operations that have identity)
__device__ __forceinline__ void InitIdentity(Int2Type<true> has_identity)
{
T identity = ZeroInitialize<T>();
ThreadStore<STORE_VOLATILE>(&temp_storage[warp_id][lane_id], identity);
}
/// Initialize identity padding (specialized for operations without identity)
__device__ __forceinline__ void InitIdentity(Int2Type<false> has_identity)
{}
/// Basic inclusive scan iteration(template unrolled, base-case specialization)
template <
bool HAS_IDENTITY,
typename ScanOp>
__device__ __forceinline__ void ScanStep(
T &partial,
ScanOp scan_op,
Int2Type<STEPS> step)
{}
/// Basic inclusive scan iteration (template unrolled, inductive-case specialization)
template <
bool HAS_IDENTITY,
int STEP,
typename ScanOp>
__device__ __forceinline__ void ScanStep(
T &partial,
ScanOp scan_op,
Int2Type<STEP> step)
{
const int OFFSET = 1 << STEP;
// Share partial into buffer
ThreadStore<STORE_VOLATILE>(&temp_storage[warp_id][HALF_WARP_THREADS + lane_id], partial);
// Update partial if addend is in range
if (HAS_IDENTITY || (lane_id >= OFFSET))
{
T addend = ThreadLoad<LOAD_VOLATILE>(&temp_storage[warp_id][HALF_WARP_THREADS + lane_id - OFFSET]);
partial = scan_op(addend, partial);
}
ScanStep<HAS_IDENTITY>(partial, scan_op, Int2Type<STEP + 1>());
}
/// Broadcast
__device__ __forceinline__ T Broadcast(
T input, ///< [in] The value to broadcast
unsigned int src_lane) ///< [in] Which warp lane is to do the broadcasting
{
if (lane_id == src_lane)
{
ThreadStore<STORE_VOLATILE>(temp_storage[warp_id], input);
}
return ThreadLoad<LOAD_VOLATILE>(temp_storage[warp_id]);
}
/// Basic inclusive scan
template <
bool HAS_IDENTITY,
bool SHARE_FINAL,
typename ScanOp>
__device__ __forceinline__ T BasicScan(
T partial, ///< Calling thread's input partial reduction
ScanOp scan_op) ///< Binary associative scan functor
{
// Iterate scan steps
ScanStep<HAS_IDENTITY>(partial, scan_op, Int2Type<0>());
if (SHARE_FINAL)
{
// Share partial into buffer
ThreadStore<STORE_VOLATILE>(&temp_storage[warp_id][HALF_WARP_THREADS + lane_id], partial);
}
return partial;
}
/// Inclusive prefix sum
__device__ __forceinline__ void InclusiveSum(
T input, ///< [in] Calling thread's input item.
T &output) ///< [out] Calling thread's output item. May be aliased with \p input.
{
const bool HAS_IDENTITY = Traits<T>::PRIMITIVE;
// Initialize identity region
InitIdentity(Int2Type<HAS_IDENTITY>());
// Compute inclusive warp scan (has identity, don't share final)
output = BasicScan<HAS_IDENTITY, false>(input, Sum());
}
/// Inclusive prefix sum with aggregate
__device__ __forceinline__ void InclusiveSum(
T input, ///< [in] Calling thread's input item.
T &output, ///< [out] Calling thread's output item. May be aliased with \p input.
T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items.
{
const bool HAS_IDENTITY = Traits<T>::PRIMITIVE;
// Initialize identity region
InitIdentity(Int2Type<HAS_IDENTITY>());
// Compute inclusive warp scan (has identity, share final)
output = BasicScan<HAS_IDENTITY, true>(input, Sum());
// Retrieve aggregate in <em>warp-lane</em><sub>0</sub>
warp_aggregate = ThreadLoad<LOAD_VOLATILE>(&temp_storage[warp_id][WARP_SMEM_ELEMENTS - 1]);
}
/// Inclusive scan
template <typename ScanOp>
__device__ __forceinline__ void InclusiveScan(
T input, ///< [in] Calling thread's input item.
T &output, ///< [out] Calling thread's output item. May be aliased with \p input.
ScanOp scan_op) ///< [in] Binary scan operator
{
// Compute inclusive warp scan (no identity, don't share final)
output = BasicScan<false, false>(input, scan_op);
}
/// Inclusive scan with aggregate
template <typename ScanOp>
__device__ __forceinline__ void InclusiveScan(
T input, ///< [in] Calling thread's input item.
T &output, ///< [out] Calling thread's output item. May be aliased with \p input.
ScanOp scan_op, ///< [in] Binary scan operator
T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items.
{
// Compute inclusive warp scan (no identity, share final)
output = BasicScan<false, true>(input, scan_op);
// Retrieve aggregate
warp_aggregate = ThreadLoad<LOAD_VOLATILE>(&temp_storage[warp_id][WARP_SMEM_ELEMENTS - 1]);
}
/// Exclusive scan
template <typename ScanOp>
__device__ __forceinline__ void ExclusiveScan(
T input, ///< [in] Calling thread's input item.
T &output, ///< [out] Calling thread's output item. May be aliased with \p input.
T identity, ///< [in] Identity value
ScanOp scan_op) ///< [in] Binary scan operator
{
// Initialize identity region
ThreadStore<STORE_VOLATILE>(&temp_storage[warp_id][lane_id], identity);
// Compute inclusive warp scan (identity, share final)
T inclusive = BasicScan<true, true>(input, scan_op);
// Retrieve exclusive scan
output = ThreadLoad<LOAD_VOLATILE>(&temp_storage[warp_id][HALF_WARP_THREADS + lane_id - 1]);
}
/// Exclusive scan with aggregate
template <typename ScanOp>
__device__ __forceinline__ void ExclusiveScan(
T input, ///< [in] Calling thread's input item.
T &output, ///< [out] Calling thread's output item. May be aliased with \p input.
T identity, ///< [in] Identity value
ScanOp scan_op, ///< [in] Binary scan operator
T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items.
{
// Exclusive warp scan (which does share final)
ExclusiveScan(input, output, identity, scan_op);
// Retrieve aggregate
warp_aggregate = ThreadLoad<LOAD_VOLATILE>(&temp_storage[warp_id][WARP_SMEM_ELEMENTS - 1]);
}
/// Exclusive scan without identity
template <typename ScanOp>
__device__ __forceinline__ void ExclusiveScan(
T input, ///< [in] Calling thread's input item.
T &output, ///< [out] Calling thread's output item. May be aliased with \p input.
ScanOp scan_op) ///< [in] Binary scan operator
{
// Compute inclusive warp scan (no identity, share final)
T inclusive = BasicScan<false, true>(input, scan_op);
// Retrieve exclusive scan
output = ThreadLoad<LOAD_VOLATILE>(&temp_storage[warp_id][HALF_WARP_THREADS + lane_id - 1]);
}
/// Exclusive scan with aggregate, without identity
template <typename ScanOp>
__device__ __forceinline__ void ExclusiveScan(
T input, ///< [in] Calling thread's input item.
T &output, ///< [out] Calling thread's output item. May be aliased with \p input.
ScanOp scan_op, ///< [in] Binary scan operator
T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items.
{
// Exclusive warp scan (which does share final)
ExclusiveScan(input, output, scan_op);
// Retrieve aggregate
warp_aggregate = ThreadLoad<LOAD_VOLATILE>(&temp_storage[warp_id][WARP_SMEM_ELEMENTS - 1]);
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
|
the_stack
|
namespace vilib {
#define BLOCKDIM_X 32
#define BLOCKDIM_Y 4
#define RESULT_STEPS 8
#define HALO_STEPS 1
#define INSTANTIATE_1D_COL(I, O) \
template __host__ void conv_filter_col_gpu<I, O>(const I * d_image_in, \
const int input_pitch, \
O * d_image_out, \
const int output_pitch, \
const int width_px, \
const int height_px, \
const conv_filter_type_t filter_type, \
const conv_filter_border_type_t border_type, \
const bool skip_first_and_last_col, \
const float scale, \
cudaStream_t stream)
template<typename I, typename O, int RADIUS, conv_filter_border_type BORDER>
__global__ void conv_filter_col_gpu_shm_kernel(O * __restrict__ output,
const int output_pitch,
const I * __restrict__ input,
const int input_pitch,
const int output_width,
const int input_height,
const filter1x3_t filter,
const float scale) {
__shared__ float s_Data[BLOCKDIM_X][(RESULT_STEPS + 2 * HALO_STEPS) * BLOCKDIM_Y + 1];
// Offset to the upper halo edge
const int baseX = blockIdx.x * BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * RESULT_STEPS - HALO_STEPS) * BLOCKDIM_Y + threadIdx.y;
if(baseX >= output_width) return;
input += baseX;
output += baseY * output_pitch + baseX;
// Main data
#pragma unroll
for (int i = HALO_STEPS, i_y_offset = (i*BLOCKDIM_Y + baseY) * input_pitch; i < HALO_STEPS + RESULT_STEPS; i++, i_y_offset+= BLOCKDIM_Y * input_pitch) {
s_Data[threadIdx.x][threadIdx.y + i * BLOCKDIM_Y] = input[i_y_offset];
}
// Upper halo
#pragma unroll
for (int i = 0, i_y_offset = baseY * input_pitch; i < HALO_STEPS; i++, i_y_offset += BLOCKDIM_Y * input_pitch) {
const int i_y = baseY + i * BLOCKDIM_Y;
switch(BORDER) {
case conv_filter_border_type::BORDER_SKIP:
// fall-through
case conv_filter_border_type::BORDER_ZERO: // 000000|abcdefgh|0000000
s_Data[threadIdx.x][threadIdx.y + i * BLOCKDIM_Y] = (i_y >= 0) ? input[i_y_offset] : 0;
break;
case conv_filter_border_type::BORDER_REPLICATE: // aaaaaa|abcdefgh|hhhhhhh
s_Data[threadIdx.x][threadIdx.y + i * BLOCKDIM_Y] = (i_y >= 0) ? input[i_y_offset] : input[0];
break;
case conv_filter_border_type::BORDER_REFLECT: // fedcba|abcdefgh|hgfedcb
s_Data[threadIdx.x][threadIdx.y + i * BLOCKDIM_Y] = (i_y >= 0) ? input[i_y_offset] : input[(-i_y-1) * input_pitch];
break;
case conv_filter_border_type::BORDER_WRAP: // cdefgh|abcdefgh|abcdefg
s_Data[threadIdx.x][threadIdx.y + i * BLOCKDIM_Y] = (i_y >= 0) ? input[i_y_offset] : input[(i_y + input_height) * input_pitch];
break;
case conv_filter_border_type::BORDER_REFLECT_101: // gfedcb|abcdefgh|gfedcba
s_Data[threadIdx.x][threadIdx.y + i * BLOCKDIM_Y] = (i_y >= 0) ? input[i_y_offset] : input[-i_y_offset];
break;
}
}
// Lower halo
#pragma unroll
for (int i = HALO_STEPS + RESULT_STEPS, i_y_offset = (i*BLOCKDIM_Y + baseY) * input_pitch; i < HALO_STEPS + RESULT_STEPS + HALO_STEPS; i++, i_y_offset += BLOCKDIM_Y * input_pitch) {
const int i_y = baseY + i * BLOCKDIM_Y;
switch(BORDER) {
case conv_filter_border_type::BORDER_SKIP:
// fall-through
case conv_filter_border_type::BORDER_ZERO: // 000000|abcdefgh|0000000
s_Data[threadIdx.x][threadIdx.y + i * BLOCKDIM_Y]= (input_height > i_y) ? input[i_y_offset] : 0;
break;
case conv_filter_border_type::BORDER_REPLICATE: // aaaaaa|abcdefgh|hhhhhhh
s_Data[threadIdx.x][threadIdx.y + i * BLOCKDIM_Y]= (input_height > i_y) ? input[i_y_offset] : input[(input_height-1) * input_pitch];
break;
case conv_filter_border_type::BORDER_REFLECT: // fedcba|abcdefgh|hgfedcb
s_Data[threadIdx.x][threadIdx.y + i * BLOCKDIM_Y]= (input_height > i_y) ? input[i_y_offset] : input[((input_height<<1) - 1 - i_y) * input_pitch];
break;
case conv_filter_border_type::BORDER_WRAP: // cdefgh|abcdefgh|abcdefg
s_Data[threadIdx.x][threadIdx.y + i * BLOCKDIM_Y]= (input_height > i_y) ? input[i_y_offset] : input[(i_y - input_height) * input_pitch];
break;
case conv_filter_border_type::BORDER_REFLECT_101: // gfedcb|abcdefgh|gfedcba
s_Data[threadIdx.x][threadIdx.y + i * BLOCKDIM_Y]= (input_height > i_y) ? input[i_y_offset] : input[((input_height<<1) - 2 - i_y) * input_pitch];
break;
}
}
// Compute and store results
__syncthreads();
#pragma unroll
for (int i = HALO_STEPS; i < HALO_STEPS + RESULT_STEPS; i++) {
float sum = 0;
#pragma unroll
for (int j = -RADIUS; j <= RADIUS; j++) {
sum += filter.d[RADIUS + j] * s_Data[threadIdx.x][threadIdx.y + i * BLOCKDIM_Y + j];
}
sum *= scale;
// Saturate if non-float
if(sizeof(O) < sizeof(float)) {
sum = max(min(sum,255.0f),0.f);
}
output[i * BLOCKDIM_Y * output_pitch] = sum;
}
}
template <typename I, typename O>
__host__ void conv_filter_col_gpu(const I * d_image_in,
const int input_pitch,
O * d_image_out,
const int output_pitch,
const int width_px,
const int height_px,
const conv_filter_type_t filter_type,
const conv_filter_border_type_t border_type,
const bool skip_first_and_last_col,
const float scale,
cudaStream_t stream) {
const filter1x3_t & filter = conv_filter_get1x3(filter_type);
int width_px_out = width_px - (skip_first_and_last_col?2:0);
dim3 threads_per_block(BLOCKDIM_X, BLOCKDIM_Y);
dim3 blocks_per_grid((width_px_out + BLOCKDIM_X -1)/ BLOCKDIM_X,
(height_px + RESULT_STEPS * BLOCKDIM_Y -1) / (RESULT_STEPS * BLOCKDIM_Y));
// Note: we actually support radiuses up to BLOCKDIM_X * HALO_STEPS, but the filter itself
// is not defined beyond 1
decltype (&conv_filter_col_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_ZERO>) kernel;
switch(border_type) {
case conv_filter_border_type::BORDER_SKIP:
case conv_filter_border_type::BORDER_ZERO: // 000000|abcdefgh|0000000
kernel = conv_filter_col_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_ZERO>;
break;
case conv_filter_border_type::BORDER_REPLICATE: // aaaaaa|abcdefgh|hhhhhhh
kernel = conv_filter_col_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_REPLICATE>;
break;
case conv_filter_border_type::BORDER_REFLECT: // fedcba|abcdefgh|hgfedcb
kernel = conv_filter_col_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_REFLECT>;
break;
case conv_filter_border_type::BORDER_WRAP: // cdefgh|abcdefgh|abcdefg
kernel = conv_filter_col_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_WRAP>;
break;
case conv_filter_border_type::BORDER_REFLECT_101: // gfedcb|abcdefgh|gfedcba
kernel = conv_filter_col_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_REFLECT_101>;
break;
default:
assert(0);
kernel = conv_filter_col_gpu_shm_kernel<I,O,1,conv_filter_border_type::BORDER_ZERO>;
break;
}
kernel<<<blocks_per_grid,threads_per_block,0,stream>>>(
d_image_out + (skip_first_and_last_col?1:0),
output_pitch,
d_image_in + (skip_first_and_last_col?1:0),
input_pitch,
width_px_out,
height_px,
filter,
scale);
CUDA_KERNEL_CHECK();
}
__host__ void conv_filter_col_cpu(const unsigned char * h_image_in,
const int input_pitch,
unsigned char * h_image_out,
const int output_pitch,
const int width_px,
const int height_px,
const conv_filter_type_t filter_type,
const conv_filter_border_type_t border_type,
const bool skip_first_and_last_col,
const float scale) {
const filter1x3_t & filter = conv_filter_get1x3(filter_type);
const int x_min = 0 + (skip_first_and_last_col?1:0);
const int x_max = (width_px-1) - (skip_first_and_last_col?1:0);
const int y_min = 0 + (border_type==conv_filter_border_type::BORDER_SKIP?1:0);
const int y_max = (height_px-1) - (border_type==conv_filter_border_type::BORDER_SKIP?1:0);
for(int y=y_min;y<=y_max;++y) {
for(int x=x_min;x<=x_max;++x) {
float accu = 0.0f;
for(int f_y=-1;f_y<=1;++f_y) {
int i_y = y+f_y;
switch(border_type) {
case conv_filter_border_type::BORDER_SKIP:
// nothing to do
break;
case conv_filter_border_type::BORDER_ZERO: // 000000|abcdefgh|0000000
// nothing to do
break;
case conv_filter_border_type::BORDER_REPLICATE: // aaaaaa|abcdefgh|hhhhhhh
i_y = min(max(i_y,0),y_max);
break;
case conv_filter_border_type::BORDER_REFLECT: // fedcba|abcdefgh|hgfedcb
if(i_y < y_min) {
i_y = -1*i_y - 1;
} else if(i_y > y_max) {
i_y = y_max - (i_y-height_px);
}
break;
case conv_filter_border_type::BORDER_WRAP: // cdefgh|abcdefgh|abcdefg
if(i_y < 0) {
i_y += height_px;
} else if(i_y > y_max) {
i_y -= height_px;
}
break;
case conv_filter_border_type::BORDER_REFLECT_101: // gfedcb|abcdefgh|gfedcba
if(i_y < 0) {
i_y *= -1;
} else if(i_y > y_max) {
i_y = 2*y_max - i_y;
}
break;
}
// Handling of BORDER_ZERO
accu += ((i_y < 0 || i_y >= height_px) ? 0.0f : h_image_in[i_y*input_pitch + x])*filter.d[f_y+1];
}
accu *= scale;
h_image_out[y*output_pitch + x] = static_cast<unsigned char>(min(max(accu,0.0f),255.0f));
}
}
}
// Explicit instantiations
INSTANTIATE_1D_COL(unsigned char, unsigned char);
INSTANTIATE_1D_COL(unsigned char, float);
INSTANTIATE_1D_COL(float, unsigned char);
INSTANTIATE_1D_COL(float, float);
} // namespace vilib
|
the_stack
|
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
// this definitionmust go here.
void CreateTexture2(const GpuIds& gpuids, float* projectiondata,Geometry geo,cudaArray** d_cuArrTex,unsigned int nangles, cudaTextureObject_t *texImage,cudaStream_t* stream,int nStreamDevice,bool allocate);
__global__ void matrixConstantMultiply(const Geometry geo,float* image,float constant){
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) {
image[idx]*=constant;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArray2Dev[7*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArray2Dev[5*PROJ_PER_KERNEL];
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojection(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections, cudaTextureObject_t tex)
{
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we don't go out of bounds
if (indX>=geo.nVoxelX || indY>=geo.nVoxelY || startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
#pragma unroll
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArray2Dev[7*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArray2Dev[7*projNumber+1];
Point3D deltaZ = projParamsArray2Dev[7*projNumber+2];
Point3D xyzOrigin = projParamsArray2Dev[7*projNumber+3];
Point3D xyzOffset = projParamsArray2Dev[7*projNumber+4];
Point3D uv0Offset = projParamsArray2Dev[7*projNumber+5];
Point3D S = projParamsArray2Dev[7*projNumber+6];
float sinalpha = projSinCosArray2Dev[5*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float cosalpha = projSinCosArray2Dev[5*projNumber+1];
float COR = projSinCosArray2Dev[5*projNumber+2];
float DSD = projSinCosArray2Dev[5*projNumber+3];
float DSO = projSinCosArray2Dev[5*projNumber+4];
// Precomputations for the weights:
//Real coords of Source
// We already have S.x (geo.DSO), and S.y and S.z are always zero. we just need to rotate
Point3D realS;
realS.x= DSO*cosalpha;
realS.y=-DSO*sinalpha;
realS.z=0;
Point3D realvoxel_init;
realvoxel_init.x=-geo.sVoxelX/2+geo.dVoxelX/2+xyzOffset.x;
realvoxel_init.y=-geo.sVoxelY/2+geo.dVoxelY/2+xyzOffset.y;
realvoxel_init.z=-geo.sVoxelZ/2+geo.dVoxelZ/2+xyzOffset.z;
// Real XYZ coordinates of Detector.
Point3D realD, realDaux;
// We know the index of the detector (u,v). Start from there.
realDaux.x=-(DSD-DSO);
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=__fdividef(DSO-DSD-S.x,vectX);
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+(float)geo.nDetecU*0.5f;
v=z+(float)geo.nDetecV*0.5f;
#if IS_FOR_MATLAB_TIGRE
float sample=tex3D<float>(tex, v, u ,indAlpha+0.5f);
#else
float sample=tex3D<float>(tex, u, v ,indAlpha+0.5f);
#endif
float weigth=0;
//
//
//
// IMPORTANT: The weights are almost 50% of the computational time. Is there a way of speeding this up??
//
//Real coordinates of Voxel. Instead of reverting the tranformation, its less math (faster) to compute it from the indexes.
Point3D realvoxel;
realvoxel.x=realvoxel_init.x+indX*geo.dVoxelX;
realvoxel.y=realvoxel_init.y+indY*geo.dVoxelY;
realvoxel.z=realvoxel_init.z+indZ*geo.dVoxelZ;
realDaux.y=(-geo.sDetecU+geo.dDetecU)*0.5f + u*geo.dDetecU +uv0Offset.x;
realD.z =(-geo.sDetecV+geo.dDetecV)*0.5f + v*geo.dDetecV +uv0Offset.y;
//rotate the detector
realD.x= realDaux.x*cosalpha + realDaux.y*sinalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
realD.y=-realDaux.x*sinalpha + realDaux.y*cosalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
float L,lsq;
L = __fsqrt_rd( (realS.x-realD.x)*(realS.x-realD.x)+ (realS.y-realD.y)*(realS.y-realD.y)+ (realD.z)*(realD.z)); // Sz=0 always.
lsq = (realS.x-realvoxel.x)*(realS.x-realvoxel.x)
+ (realS.y-realvoxel.y)*(realS.y-realvoxel.y)
+ (realS.z-realvoxel.z)*(realS.z-realvoxel.z);
weigth=__fdividef(L*L*L,(DSD*lsq));
// weigth=1;
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=sample* weigth;
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection2(float * projections, Geometry geo, float* result,float const * const alphas, int nalpha, const GpuIds& gpuids){
// Prepare for MultiGPU
int deviceCount = gpuids.GetLength();
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("Atb:Voxel_backprojection:GPUselect","There are no available device(s) that support CUDA\n");
}
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning thrown)
// Check the available devices, and if they are the same
if (!gpuids.AreEqualDevices()) {
mexWarnMsgIdAndTxt("Atb:Voxel_backprojection2:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed.");
}
int dev;
// Split the CT problem
unsigned int split_image;
unsigned int split_projections;
splitCTbackprojection(gpuids,geo,nalpha,&split_image,&split_projections);
// Create the arrays for the geometry. The main difference is that geo.offZ has been tuned for the
// image slices. The rest of the Geometry is the same
Geometry* geoArray=(Geometry*)malloc(split_image*deviceCount*sizeof(Geometry));
createGeoArray(split_image*deviceCount,geo,geoArray,nalpha);
// Now lest allocate all the image memory on the GPU, so we can use it later. If we have made our numbers correctly
// in the previous section this should leave enough space for the textures.
size_t num_bytes_img = (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ* sizeof(float);
float** dimage=(float**)malloc(deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaMalloc((void**)&dimage[dev], num_bytes_img);
cudaCheckErrors("cudaMalloc fail");
}
//Pagelock memory for synchronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus should have the same attributes.
int isHostRegisterSupported = 0;
#if CUDART_VERSION >= 9020
cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,gpuids[0]);
#endif
// empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to
// pin the memory is greater than the lost time in Synchronously launching the memcpys. This is only worth it when the image is too big.
if (isHostRegisterSupported & split_image>1){
cudaHostRegister(result, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),cudaHostRegisterPortable);
}
if (isHostRegisterSupported ){
cudaHostRegister(projections, (size_t)geo.nDetecU*(size_t)geo.nDetecV*(size_t)nalpha*(size_t)sizeof(float),cudaHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
//If it is the first time, lets make sure our image is zeroed.
int nStreamDevice=2;
int nStreams=deviceCount*nStreamDevice;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));;
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
for (int i = 0; i < nStreamDevice; ++i){
cudaStreamCreate(&stream[i+dev*nStreamDevice]);
}
}
// Kernel auxiliary variables
Point3D* projParamsArray2Host;
cudaMallocHost((void**)&projParamsArray2Host,7*PROJ_PER_KERNEL*sizeof(Point3D));
float* projSinCosArray2Host;
cudaMallocHost((void**)&projSinCosArray2Host,5*PROJ_PER_KERNEL*sizeof(float));
// Texture object variables
cudaTextureObject_t *texProj;
cudaArray **d_cuArrTex;
texProj =(cudaTextureObject_t*)malloc(deviceCount*2*sizeof(cudaTextureObject_t));
d_cuArrTex =(cudaArray**)malloc(deviceCount*2*sizeof(cudaArray*));
unsigned int proj_split_overlap_number;
// Start with the main loop. The Projection data needs to be allocated and dealocated in the main loop
// as due to the nature of cudaArrays, we can not reuse them. This should not be a problem for the fast execution
// of the code, as repeated allocation and deallocation only happens when the projection data is very very big,
// and therefore allcoation time should be negligible, fluctuation of other computations should mask the time.
unsigned long long proj_linear_idx_start;
unsigned int current_proj_split_size,current_proj_overlap_split_size;
size_t num_bytes_img_curr;
size_t img_linear_idx_start;
float** partial_projection;
size_t* proj_split_size;
for(unsigned int img_slice=0;img_slice<split_image;img_slice++){
//
// Initialize the memory if its the first time.
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaMemset(dimage[dev],0,num_bytes_img);
cudaCheckErrors("memset fail");
}
for( unsigned int proj=0;proj<split_projections;proj++){
// What is the size of the current chunk of proejctions we need in?
current_proj_split_size=(nalpha+split_projections-1)/split_projections;
// if its the last one its probably less
current_proj_split_size=((proj+1)*current_proj_split_size<nalpha)? current_proj_split_size: nalpha-current_proj_split_size*proj;
// We are going to split it in the same amount of kernels we need to execute.
proj_split_overlap_number=(current_proj_split_size+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL;
// Create pointer to pointers of projections and precompute their location and size.
if(!proj && !img_slice){
partial_projection=(float**)malloc(current_proj_split_size*sizeof(float*));
proj_split_size=(size_t*)malloc(current_proj_split_size*sizeof(size_t*));
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Crop the last one, as its likely its not completely divisible.
// now lets split this for simultanoeus memcopy and compute.
// We want to make sure that if we can, we run PROJ_PER_KERNEL projections, to maximize kernel acceleration
// current_proj_overlap_split_size units = angles
current_proj_overlap_split_size=max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL);
current_proj_overlap_split_size=(proj_block_split<proj_split_overlap_number-1)?current_proj_overlap_split_size:current_proj_split_size-(proj_split_overlap_number-1)*current_proj_overlap_split_size;
//Get the linear index where the current memory chunk starts.
proj_linear_idx_start=(unsigned long long)((nalpha+split_projections-1)/split_projections)*(unsigned long long)proj*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
proj_linear_idx_start+=proj_block_split*max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL)*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
//Store result
proj_split_size[proj_block_split]=current_proj_overlap_split_size;
partial_projection[proj_block_split]=&projections[proj_linear_idx_start];
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Now get the projections on memory
CreateTexture2(gpuids,
partial_projection[proj_block_split],geo,
&d_cuArrTex[(proj_block_split%2)*deviceCount],
proj_split_size[proj_block_split],
&texProj [(proj_block_split%2)*deviceCount],
stream, nStreamDevice,
(proj_block_split<2)&!proj&!img_slice);// Only allocate if its the first 2 calls
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaStreamSynchronize(stream[dev*nStreamDevice+1]);
}
for (dev = 0; dev < deviceCount; dev++){
//Safety:
// Depends on the amount of GPUs, the case where a image slice is zero hight can happen.
// Just break the loop if we reached that point
if(geoArray[img_slice*deviceCount+dev].nVoxelZ==0)
break;
cudaSetDevice(gpuids[dev]);
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geoArray[img_slice*deviceCount+dev].nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
unsigned int noOfKernelCalls = (proj_split_size[proj_block_split]+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++){
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
unsigned int j;
for(j=0; j<PROJ_PER_KERNEL; j++){
unsigned int currProjNumber_slice=i*PROJ_PER_KERNEL+j;
unsigned int currProjNumber_global=i*PROJ_PER_KERNEL+j // index within kernel
+proj*(nalpha+split_projections-1)/split_projections // index of the global projection split
+proj_block_split*max(current_proj_split_size/proj_split_overlap_number,PROJ_PER_KERNEL); // indexof overlap current split
if(currProjNumber_slice>=proj_split_size[proj_block_split])
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
if(currProjNumber_global>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source;
float sinalpha,cosalpha;
geoArray[img_slice*deviceCount+dev].alpha=-alphas[currProjNumber_global*3];//we got 3 angles now.
geoArray[img_slice*deviceCount+dev].theta=-alphas[currProjNumber_global*3+1];
geoArray[img_slice*deviceCount+dev].psi =-alphas[currProjNumber_global*3+2];
sinalpha=sin(geoArray[img_slice*deviceCount+dev].alpha);
cosalpha=cos(geoArray[img_slice*deviceCount+dev].alpha);
projSinCosArray2Host[5*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection
projSinCosArray2Host[5*j+1]=cosalpha;
projSinCosArray2Host[5*j+2]=geo.COR[currProjNumber_global];
projSinCosArray2Host[5*j+3]=geo.DSD[currProjNumber_global];
projSinCosArray2Host[5*j+4]=geo.DSO[currProjNumber_global];
computeDeltasCube(geoArray[img_slice*deviceCount+dev],currProjNumber_global,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[currProjNumber_global];
offOrig.y=geo.offOrigY[currProjNumber_global];
offOrig.z=geoArray[img_slice*deviceCount+dev].offOrigZ[currProjNumber_global];
offDetec.x=geo.offDetecU[currProjNumber_global];
offDetec.y=geo.offDetecV[currProjNumber_global];
offDetec.z=0;//unused
projParamsArray2Host[7*j] =deltaX; // 7*j because we have 7 Point3D values per projection
projParamsArray2Host[7*j+1]=deltaY;
projParamsArray2Host[7*j+2]=deltaZ;
projParamsArray2Host[7*j+3]=xyzOrigin;
projParamsArray2Host[7*j+4]=offOrig;
projParamsArray2Host[7*j+5]=offDetec;
projParamsArray2Host[7*j+6]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
cudaMemcpyToSymbolAsync(projSinCosArray2Dev, projSinCosArray2Host, sizeof(float)*5*PROJ_PER_KERNEL,0,cudaMemcpyHostToDevice,stream[dev*nStreamDevice]);
cudaMemcpyToSymbolAsync(projParamsArray2Dev, projParamsArray2Host, sizeof(Point3D)*7*PROJ_PER_KERNEL,0,cudaMemcpyHostToDevice,stream[dev*nStreamDevice]);
cudaStreamSynchronize(stream[dev*nStreamDevice]);
kernelPixelBackprojection<<<grid,block,0,stream[dev*nStreamDevice]>>>(geoArray[img_slice*deviceCount+dev],dimage[dev],i,proj_split_size[proj_block_split],texProj[(proj_block_split%2)*deviceCount+dev]);
} // END for
//////////////////////////////////////////////////////////////////////////////////////
// END RB code, Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
}
} // END sub-split of current projection chunk
} // END projection splits
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
matrixConstantMultiply<<<60,MAXTREADS,0,stream[dev*nStreamDevice]>>>( geoArray[img_slice*deviceCount+dev],dimage[dev],geo.dVoxelX*geo.dVoxelY*geo.dVoxelZ/(geo.dDetecU*geo.dDetecV));
}
// Now we need to take the image out of the GPU
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaStreamSynchronize(stream[dev*nStreamDevice]);
num_bytes_img_curr=(size_t)geoArray[img_slice*deviceCount+dev].nVoxelX*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelY*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelZ*sizeof(float);
img_linear_idx_start=(size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ*(size_t)(img_slice*deviceCount+dev);
cudaMemcpyAsync(&result[img_linear_idx_start], dimage[dev], num_bytes_img_curr, cudaMemcpyDeviceToHost,stream[dev*nStreamDevice+1]);
}
} // end image splits
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaDeviceSynchronize();
}
// Clean the GPU
bool two_buffers_used=((((nalpha+split_projections-1)/split_projections)+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL)>1;
for(unsigned int i=0; i<2;i++){ // 2 buffers (if needed, maybe only 1)
if (!two_buffers_used && i==1)
break; for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaDestroyTextureObject(texProj[i*deviceCount+dev]);
cudaFreeArray(d_cuArrTex[i*deviceCount+dev]);
}
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaFree(dimage[dev]);
}
cudaFreeHost(projSinCosArray2Host);
cudaFreeHost(projParamsArray2Host);
free(partial_projection);
free(proj_split_size);
freeGeoArray(split_image*deviceCount,geoArray);
if (isHostRegisterSupported & split_image>1){
cudaHostUnregister(result);
}
if (isHostRegisterSupported){
cudaHostUnregister(projections);
}
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]);
cudaCheckErrors("cudaFree fail");
// cudaDeviceReset(); // For the Nvidia Visual Profiler
return 0;
} // END voxel_backprojection
void CreateTexture2(const GpuIds& gpuids, float* projectiondata,Geometry geo,cudaArray** d_cuArrTex,unsigned int nangles, cudaTextureObject_t *texImage,cudaStream_t* stream,int nStreamDevice,bool allocate){
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
int num_devices = gpuids.GetLength();
#if IS_FOR_MATLAB_TIGRE
const cudaExtent extent =make_cudaExtent(geo.nDetecV, geo.nDetecU, nangles);
#else
const cudaExtent extent =make_cudaExtent(geo.nDetecU, geo.nDetecV, nangles);
#endif
if (allocate){
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(gpuids[dev]);
//cudaArray Descriptor
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
//cuda Array
cudaMalloc3DArray(&d_cuArrTex[dev], &channelDesc, extent);
}
}
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(gpuids[dev]);
cudaMemcpy3DParms copyParams = {0};
//Array creation
copyParams.srcPtr = make_cudaPitchedPtr((void *)projectiondata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[dev];
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3DAsync(©Params,stream[dev*nStreamDevice+1]);
}
//Array creation End
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(gpuids[dev]);
cudaResourceDesc texRes;
memset(&texRes, 0, sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = d_cuArrTex[dev];
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = cudaFilterModeLinear;
texDescr.addressMode[0] = cudaAddressModeBorder;
texDescr.addressMode[1] = cudaAddressModeBorder;
texDescr.addressMode[2] = cudaAddressModeBorder;
texDescr.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&texImage[dev], &texRes, &texDescr, NULL);
}
}
#ifndef BACKPROJECTION_HPP
void splitCTbackprojection(const GpuIds& gpuids, Geometry geo,int nalpha, unsigned int* split_image, unsigned int * split_projections){
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(gpuids, &mem_GPU_global);
const int deviceCount = gpuids.GetLength();
// Compute how much memory each of the relevant memory pieces need
size_t mem_image= (unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float);
size_t mem_proj= (unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV*sizeof(float);
// Does everything fit in the GPU?
if(mem_image/deviceCount+mem_proj*PROJ_PER_KERNEL*2<mem_GPU_global){
// We only need to split if we have extra GPUs
*split_image=1;
*split_projections=1;
}
// We know we need to split, but:
// Does all the image fit in the GPU, with some slack for a stack of projections??
else
{
// As we can overlap memcpys from H2D of the projections, we should then minimize the amount of image splits.
// Lets assume to start with that we only need 1 stack of PROJ_PER_KERNEL projections. The rest is for the image.
size_t mem_free=mem_GPU_global-2*mem_proj*PROJ_PER_KERNEL;
*split_image=(mem_image/deviceCount+mem_free-1)/mem_free;
// Now knowing how many splits we have for images, we can recompute how many slices of projections actually
// fit on the GPU. Must be more than 0 obviously.
mem_free=mem_GPU_global-(mem_image/deviceCount)/(*split_image); // NOTE: There is some rounding error, but its in the order of bytes, and we have 5% of GPU free jsut in case. We are safe
*split_projections=(mem_proj*PROJ_PER_KERNEL*2+mem_free-1)/mem_free;
}
}
void computeDeltasCube(Geometry geo,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S)
{
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
P.x=P.x+(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x+(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x+(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x+(geo.DSD[i]-geo.DSO[i]);
rollPitchYawT(geo,i,&P);
rollPitchYawT(geo,i,&Px);
rollPitchYawT(geo,i,&Py);
rollPitchYawT(geo,i,&Pz);
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x-(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x-(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x-(geo.DSD[i]-geo.DSO[i]);
//Done for P, now source
Point3D source;
source.x=geo.DSD[i]; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
rollPitchYawT(geo,i,&source);
source.x=source.x-(geo.DSD[i]-geo.DSO[i]);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
} // END computeDeltasCube
void rollPitchYawT(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y
-sin(geo.dPitch[i])*auxPoint.z;
point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z;
point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
void checkFreeMemory(const GpuIds& gpuids,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
const int gpuids.GetLength();
for (int dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
#endif
|
the_stack
|
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/filters.hpp"
namespace cv { namespace cuda { namespace device
{
namespace imgproc
{
template <typename Ptr2D, typename T> __global__ void remap(const Ptr2D src, const PtrStepf mapx, const PtrStepf mapy, PtrStepSz<T> dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
const float xcoo = mapx.ptr(y)[x];
const float ycoo = mapy.ptr(y)[x];
dst.ptr(y)[x] = saturate_cast<T>(src(ycoo, xcoo));
}
}
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcherStream
{
static void call(PtrStepSz<T> src, PtrStepSzf mapx, PtrStepSzf mapy, PtrStepSz<T> dst, const float* borderValue, cudaStream_t stream, bool)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader< PtrStep<T>, B<work_type> > brdSrc(src, brd);
Filter< BorderReader< PtrStep<T>, B<work_type> > > filter_src(brdSrc);
remap<<<grid, block, 0, stream>>>(filter_src, mapx, mapy, dst);
cudaSafeCall( cudaGetLastError() );
}
};
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcherNonStream
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy, PtrStepSz<T> dst, const float* borderValue, bool)
{
(void)srcWhole;
(void)xoff;
(void)yoff;
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader< PtrStep<T>, B<work_type> > brdSrc(src, brd);
Filter< BorderReader< PtrStep<T>, B<work_type> > > filter_src(brdSrc);
remap<<<grid, block>>>(filter_src, mapx, mapy, dst);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
};
#define OPENCV_CUDA_IMPLEMENT_REMAP_TEX(type) \
texture< type , cudaTextureType2D> tex_remap_ ## type (0, cudaFilterModePoint, cudaAddressModeClamp); \
struct tex_remap_ ## type ## _reader \
{ \
typedef type elem_type; \
typedef int index_type; \
int xoff, yoff; \
tex_remap_ ## type ## _reader (int xoff_, int yoff_) : xoff(xoff_), yoff(yoff_) {} \
__device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const \
{ \
return tex2D(tex_remap_ ## type , x + xoff, y + yoff); \
} \
}; \
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, type> \
{ \
static void call(PtrStepSz< type > src, PtrStepSz< type > srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy, \
PtrStepSz< type > dst, const float* borderValue, bool cc20) \
{ \
typedef typename TypeVec<float, VecTraits< type >::cn>::vec_type work_type; \
dim3 block(32, cc20 ? 8 : 4); \
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \
bindTexture(&tex_remap_ ## type , srcWhole); \
tex_remap_ ## type ##_reader texSrc(xoff, yoff); \
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue)); \
BorderReader< tex_remap_ ## type ##_reader, B<work_type> > brdSrc(texSrc, brd); \
Filter< BorderReader< tex_remap_ ## type ##_reader, B<work_type> > > filter_src(brdSrc); \
remap<<<grid, block>>>(filter_src, mapx, mapy, dst); \
cudaSafeCall( cudaGetLastError() ); \
cudaSafeCall( cudaDeviceSynchronize() ); \
} \
}; \
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, type> \
{ \
static void call(PtrStepSz< type > src, PtrStepSz< type > srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy, \
PtrStepSz< type > dst, const float*, bool) \
{ \
dim3 block(32, 8); \
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \
bindTexture(&tex_remap_ ## type , srcWhole); \
tex_remap_ ## type ##_reader texSrc(xoff, yoff); \
if (srcWhole.cols == src.cols && srcWhole.rows == src.rows) \
{ \
Filter< tex_remap_ ## type ##_reader > filter_src(texSrc); \
remap<<<grid, block>>>(filter_src, mapx, mapy, dst); \
} \
else \
{ \
BrdReplicate<type> brd(src.rows, src.cols); \
BorderReader< tex_remap_ ## type ##_reader, BrdReplicate<type> > brdSrc(texSrc, brd); \
Filter< BorderReader< tex_remap_ ## type ##_reader, BrdReplicate<type> > > filter_src(brdSrc); \
remap<<<grid, block>>>(filter_src, mapx, mapy, dst); \
} \
cudaSafeCall( cudaGetLastError() ); \
cudaSafeCall( cudaDeviceSynchronize() ); \
} \
};
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(uchar)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(uchar2)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(uchar4)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(schar)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(char2)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(char4)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(ushort)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(ushort2)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(ushort4)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(short)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(short2)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(short4)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(int)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(int2)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(int4)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(float)
//OPENCV_CUDA_IMPLEMENT_REMAP_TEX(float2)
OPENCV_CUDA_IMPLEMENT_REMAP_TEX(float4)
#undef OPENCV_CUDA_IMPLEMENT_REMAP_TEX
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcher
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy,
PtrStepSz<T> dst, const float* borderValue, cudaStream_t stream, bool cc20)
{
if (stream == 0)
RemapDispatcherNonStream<Filter, B, T>::call(src, srcWhole, xoff, yoff, mapx, mapy, dst, borderValue, cc20);
else
RemapDispatcherStream<Filter, B, T>::call(src, mapx, mapy, dst, borderValue, stream, cc20);
}
};
template <typename T> void remap_gpu(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap,
PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap,
PtrStepSz<T> dst, const float* borderValue, cudaStream_t stream, bool cc20);
static const caller_t callers[3][5] =
{
{
RemapDispatcher<PointFilter, BrdConstant, T>::call,
RemapDispatcher<PointFilter, BrdReplicate, T>::call,
RemapDispatcher<PointFilter, BrdReflect, T>::call,
RemapDispatcher<PointFilter, BrdWrap, T>::call,
RemapDispatcher<PointFilter, BrdReflect101, T>::call
},
{
RemapDispatcher<LinearFilter, BrdConstant, T>::call,
RemapDispatcher<LinearFilter, BrdReplicate, T>::call,
RemapDispatcher<LinearFilter, BrdReflect, T>::call,
RemapDispatcher<LinearFilter, BrdWrap, T>::call,
RemapDispatcher<LinearFilter, BrdReflect101, T>::call
},
{
RemapDispatcher<CubicFilter, BrdConstant, T>::call,
RemapDispatcher<CubicFilter, BrdReplicate, T>::call,
RemapDispatcher<CubicFilter, BrdReflect, T>::call,
RemapDispatcher<CubicFilter, BrdWrap, T>::call,
RemapDispatcher<CubicFilter, BrdReflect101, T>::call
}
};
callers[interpolation][borderMode](static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<T> >(srcWhole), xoff, yoff, xmap, ymap,
static_cast< PtrStepSz<T> >(dst), borderValue, stream, cc20);
}
template void remap_gpu<uchar >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<uchar2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<uchar3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<uchar4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<schar>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<char2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<char3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<char4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<ushort >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<ushort2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<ushort3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<ushort4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<short >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<short2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<short3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<short4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<int >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<int2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<int3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<int4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<float >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
//template void remap_gpu<float2>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<float3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<float4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
} // namespace imgproc
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* CUDA_DISABLER */
|
the_stack
|
#include <nbla/array.hpp>
#include <nbla/variable.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/batch_normalization.hpp>
#include <nbla/cuda/limits.hpp>
//#include <nbla/cuda/function/kernel/batch_normalization.cuh>
#include "kernel/batch_normalization.cu"
#define BATCH_NORMALIZATION_USE_PARALLEL_REDUCTION
namespace nbla {
template <typename T>
void BatchNormalizationCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
BatchNormalization<T>::setup_impl(inputs, outputs);
v_dmean_.reshape(Shape_t{this->size1_}, true);
v_dvar_.reshape(Shape_t{this->size1_}, true);
#ifdef BATCH_NORMALIZATION_USE_PARALLEL_REDUCTION
// setup for transpose
const int ndim = inputs[0]->ndim();
// for transpose
v_axes_.reshape(Shape_t{ndim}, true);
v_in_strides_.reshape(Shape_t{ndim}, true);
v_out_strides_.reshape(Shape_t{ndim}, true);
v_in_shape_.reshape(Shape_t{ndim}, true);
v_out_shape_.reshape(Shape_t{ndim}, true);
v_din_trans_.reshape(inputs[0]->shape(), true);
// work memory for data of each axis
v_inv_sqrt_variance_.reshape(Shape_t{this->size1_}, true);
v_t_.reshape(Shape_t{this->size1_}, true);
// work memory for each block data of shuffle reduction
this->blocks =
min((this->size02_ + NBLA_CUDA_NUM_THREADS - 1) / NBLA_CUDA_NUM_THREADS,
Size_t{1024});
v_mean_reduction_space_.reshape(Shape_t{blocks}, true);
v_variance_reduction_space_.reshape(Shape_t{blocks}, true);
v_tmp_reduction_space_.reshape(Shape_t{blocks}, true);
// make shape for transpose
Context cpu; // CPU Context
int *p_axes = v_axes_.cast_data_and_get_pointer<int>(cpu, true);
int *p_in_strides = v_in_strides_.cast_data_and_get_pointer<int>(cpu, true);
int *p_out_strides = v_out_strides_.cast_data_and_get_pointer<int>(cpu, true);
int *p_out_shape = v_out_shape_.cast_data_and_get_pointer<int>(cpu, true);
int *p_in_shape = v_in_shape_.cast_data_and_get_pointer<int>(cpu, true);
for (int i = 0; i < ndim; p_axes[i] = i, ++i)
;
if (this->axes_[0] != 0) {
p_axes[0] = this->axes_[0];
p_axes[this->axes_[0]] = 0;
}
Shape_t shape(ndim);
for (int i = 0; i < ndim; ++i)
shape[i] = inputs[0]->shape()[p_axes[i]];
v_in_trans_.reshape(shape, true);
for (int i = 0; i < ndim; ++i) {
p_in_strides[i] = inputs[0]->strides()[i];
p_out_strides[i] = v_in_trans_.strides()[i];
p_in_shape[i] = inputs[0]->shape()[i];
p_out_shape[i] = v_in_trans_.shape()[i];
}
#endif
}
template <class T>
void BatchNormalizationCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
if (this->batch_stat_) { // Training mode.
forward_impl_batch(inputs, outputs, true /* update_inputs */);
} else { // Testing mode.
forward_impl_global(inputs, outputs);
}
}
template <class T>
void BatchNormalizationCuda<T>::recompute_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
if (this->batch_stat_) { // Training mode.
forward_impl_batch(inputs, outputs, false /* update_inputs */);
} else { // Testing mode.
forward_impl_global(inputs, outputs);
}
}
template <class T>
void BatchNormalizationCuda<T>::forward_impl_batch(const Variables &inputs,
const Variables &outputs,
const bool update_inputs) {
// Check whether it outputs batch mean and var.
Variable *batch_mean = &this->mean_;
Variable *batch_var = &this->var_;
if (outputs.size() == 3) {
batch_mean = outputs[1];
batch_var = outputs[2];
}
// Inputs
const int b_idx = this->b_idx_;
const int g_idx = this->g_idx_;
const int m_idx = this->m_idx_;
const int v_idx = this->v_idx_;
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
const Tc *beta = this->no_bias_
? nullptr
: inputs[b_idx]->get_data_pointer<Tc>(this->ctx_);
const Tc *gamma = this->no_scale_
? nullptr
: inputs[g_idx]->get_data_pointer<Tc>(this->ctx_);
// Output
Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true);
Tc *m =
batch_mean->cast_data_and_get_pointer<Tc>(this->ctx_, true); // batch mean
Tc *v =
batch_var->cast_data_and_get_pointer<Tc>(this->ctx_, true); // batch varf
// Inputs/Outputs
Tc *rm =
!update_inputs ? nullptr : inputs[m_idx]->cast_data_and_get_pointer<Tc>(
this->ctx_); // running mean
Tc *rv =
!update_inputs ? nullptr : inputs[v_idx]->cast_data_and_get_pointer<Tc>(
this->ctx_); // running var
#ifdef BATCH_NORMALIZATION_USE_PARALLEL_REDUCTION
const int ndim = inputs[0]->ndim();
auto get_ = [this](Variable &var) {
return var.get_data_pointer<int>(this->ctx_);
};
auto get_data_ptr_ = [this](Variable &var) {
return var.cast_data_and_get_pointer<Tc>(this->ctx_);
};
const int *axes = get_(this->v_axes_);
const int *in_strides = get_(this->v_in_strides_);
const int *out_strides = get_(this->v_out_strides_);
const int *in_shape = get_(this->v_in_shape_);
const int *out_shape = get_(this->v_out_shape_);
Tc *in_trans = get_data_ptr_(this->v_in_trans_);
Tc *mean_reduction_space = get_data_ptr_(this->v_mean_reduction_space_);
Tc *variance_reduction_space =
get_data_ptr_(this->v_variance_reduction_space_);
Tc *inv_sqrt_variance = get_data_ptr_(this->v_inv_sqrt_variance_);
forward_batch_parallel_reduction(
this->size0_, this->size1_, this->size2_, ndim, axes, in_strides,
in_shape, out_strides, out_shape, this->decay_rate_, this->eps_, x, gamma,
beta, in_trans, m, v, rm, rv, y, mean_reduction_space,
variance_reduction_space, inv_sqrt_variance);
#else
forward_batch(this->size0_, this->size1_, this->size2_, this->decay_rate_,
this->eps_, x, gamma, beta, m, v, rm, rv, y);
#endif
}
template <class T>
void BatchNormalizationCuda<T>::forward_impl_global(const Variables &inputs,
const Variables &outputs) {
// Inputs
const int b_idx = this->b_idx_;
const int g_idx = this->g_idx_;
const int m_idx = this->m_idx_;
const int v_idx = this->v_idx_;
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
const Tc *beta = this->no_bias_
? nullptr
: inputs[b_idx]->get_data_pointer<Tc>(this->ctx_);
const Tc *gamma = this->no_scale_
? nullptr
: inputs[g_idx]->get_data_pointer<Tc>(this->ctx_);
const Tc *rm =
inputs[m_idx]->get_data_pointer<Tc>(this->ctx_); // running mean
const Tc *rv = inputs[v_idx]->get_data_pointer<Tc>(this->ctx_); // running var
// Output
Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
forward_global_kernel, this->size1_ * this->size02_, this->size0_,
this->size1_, this->size2_, this->size02_, this->size12_,
this->decay_rate_, this->eps_, x, rm, rv, gamma, beta, y);
}
template <class T>
void BatchNormalizationCuda<T>::backward_impl(
const Variables &inputs, const Variables &outputs,
const vector<bool> &propagate_down, const vector<bool> &accum) {
cuda_set_device(std::stoi(this->ctx_.device_id));
if (this->batch_stat_) { // Training mode.
backward_impl_batch(inputs, outputs, propagate_down, accum);
} else { // Testing mode.
this->backward_impl_global(inputs, outputs, propagate_down, accum);
}
}
template <class T>
void BatchNormalizationCuda<T>::backward_impl_batch(
const Variables &inputs, const Variables &outputs,
const vector<bool> &propagate_down, const vector<bool> &accum) {
if (!(propagate_down[0] || propagate_down[1] || propagate_down[2])) {
return;
}
const int b_idx = this->b_idx_;
const int g_idx = this->g_idx_;
const bool pd_beta = !this->no_bias_ && propagate_down[b_idx];
const bool pd_gamma = !this->no_scale_ && propagate_down[g_idx];
const bool accum_beta = !this->no_bias_ && accum[b_idx];
const bool accum_gamma = !this->no_scale_ && accum[g_idx];
// Check whether it outputs batch mean/var.
Variable *batch_mean = &this->mean_;
Variable *batch_var = &this->var_;
if (outputs.size() == 3) {
batch_mean = outputs[1];
batch_var = outputs[2];
}
// Common inputs wrt. gradient.
const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_);
const Tc *m = batch_mean->get_data_pointer<Tc>(this->ctx_);
const Tc *v = batch_var->get_data_pointer<Tc>(this->ctx_);
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
auto get_data_ptr_ = [this](Variable &var) {
return var.cast_data_and_get_pointer<Tc>(this->ctx_);
};
#ifdef BATCH_NORMALIZATION_USE_PARALLEL_REDUCTION
int ndim = inputs[0]->ndim();
auto get_ = [this](Variable &var) {
return var.get_data_pointer<int>(this->ctx_);
};
const int *axes = get_(this->v_axes_);
const int *in_strides = get_(this->v_in_strides_);
const int *out_strides = get_(this->v_out_strides_);
const int *in_shape = get_(this->v_in_shape_);
const int *out_shape = get_(this->v_out_shape_);
// TODO: write_only flags
Tc *d_x_trans = get_data_ptr_(this->v_in_trans_);
Tc *d_dy_trans = get_data_ptr_(this->v_din_trans_);
Tc *mean_reduction_space = get_data_ptr_(this->v_mean_reduction_space_);
Tc *variance_reduction_space =
get_data_ptr_(this->v_variance_reduction_space_);
Tc *inv_sqrt_variance = get_data_ptr_(this->v_inv_sqrt_variance_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
transpose_2value_kernel, this->size1_ * this->size02_, ndim, axes,
in_strides, out_strides, out_shape, x, dy, d_x_trans, d_dy_trans);
#endif
if (propagate_down[0]) {
if (!accum[0])
inputs[0]->grad()->zero(); // TODO: optimize this out if possible
Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, false);
const Tc *g = this->no_scale_
? nullptr
: inputs[g_idx]->get_data_pointer<Tc>(this->ctx_);
const Tc *dm = nullptr;
const Tc *dv = nullptr;
if (outputs.size() == 3) {
dm = batch_mean->get_grad_pointer<Tc>(this->ctx_);
dv = batch_var->get_grad_pointer<Tc>(this->ctx_);
}
Tc *dmean = get_data_ptr_(this->v_dmean_);
Tc *dvar = get_data_ptr_(this->v_dvar_);
#ifdef BATCH_NORMALIZATION_USE_PARALLEL_REDUCTION
Tc *tmp_reduction_space = get_data_ptr_(this->v_tmp_reduction_space_);
Tc *t = get_data_ptr_(this->v_t_);
backward_batch_data_parallel_reduction(
this->size0_, this->size1_, this->size2_, ndim, axes, in_strides,
in_shape, out_strides, out_shape, this->decay_rate_, this->eps_, dy, m,
v, x, g, dm, dv, dx, mean_reduction_space, variance_reduction_space,
tmp_reduction_space, dmean, dvar, t, inv_sqrt_variance, d_x_trans,
d_dy_trans);
#else
backward_batch_data(this->size0_, this->size1_, this->size2_,
this->decay_rate_, this->eps_, dy, m, v, x, g, dm, dv,
dx, dmean, dvar);
#endif
}
if (pd_beta || pd_gamma) { // beta and gamma
if (!this->no_bias_ && !accum[b_idx])
inputs[b_idx]->grad()->zero(); // TODO: optimize this out if possible
if (!this->no_scale_ && !accum[g_idx])
inputs[g_idx]->grad()->zero(); // TODO: optimize this out if possible
Tc *db = !pd_beta ? nullptr : inputs[b_idx]->cast_grad_and_get_pointer<Tc>(
this->ctx_, false);
Tc *dg = !pd_gamma ? nullptr : inputs[g_idx]->cast_grad_and_get_pointer<Tc>(
this->ctx_, false);
#ifdef BATCH_NORMALIZATION_USE_PARALLEL_REDUCTION
backward_batch_gamma_beta_parallel_reduction(
this->size0_, this->size1_, this->size2_, d_dy_trans, m, v, d_x_trans,
this->eps_, db, dg, mean_reduction_space, variance_reduction_space,
inv_sqrt_variance);
#else
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
backward_batch_gamma_beta_kernel, this->size1_, this->size2_,
this->size02_, this->size12_, this->eps_, dy, m, v, x, db, dg);
#endif
}
}
}
|
the_stack
|
#include <nbla/array.hpp>
#include <nbla/logger.hpp>
#include <nbla/variable.hpp>
#include <nbla/cuda/array/cuda_array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/cudnn/cudnn.hpp>
#include <nbla/cuda/cudnn/function/batch_normalization.hpp>
#include <nbla/cuda/function/batch_normalization.hpp>
#include <nbla/cuda/limits.hpp>
#include <type_traits>
namespace nbla {
#define DRV_BN_T() get_dtype_by_cudnn_data_type(derived_bn_dtype_)
template <typename T>
void BatchNormalizationCudaCudnn<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
if (outputs.size() == 3) {
// [WORKAROUND]
// To use saved mean and variance and to propagate mean and variance
// gradient are not supported with cuDNN.
// Because cuDNN's backward interface is different from NNabla's one.
// So Fall back to CUDA implementation if outputs.size() == 3
// TODO: Change saved variance to inverse variance like cuDNN
this->fall_back_func_ = make_shared<BatchNormalizationCuda<T>>(
this->ctx_, this->axes_, this->decay_rate_, this->eps_,
this->batch_stat_, this->no_scale_, this->no_bias_);
this->fall_back_func_->setup(inputs, outputs);
return;
}
BatchNormalizationCuda<T>::setup_impl(inputs, outputs);
cudnn_handle_ = SingletonManager::get<CudnnHandleManager>()->handle(device_);
NBLA_CHECK(this->axes_.size() == 1, error_code::value,
"Axes on a single dimension is only supported.");
int N = this->size0_;
int C = this->size1_;
int H = this->size2_;
int W = 1;
mode_ = CUDNN_BATCHNORM_SPATIAL;
// Channel last is restricted for spatial input
bool channel_last = this->axes_[0] == inputs[0]->ndim() - 1;
if (inputs[0]->ndim() == 2 && H == 1 && W == 1) {
// typical 1-d affine output with shape (N, C)
mode_ = CUDNN_BATCHNORM_PER_ACTIVATION;
NBLA_CUDNN_CHECK(
cudnnSetTensor4dDescriptor(input_desc_.desc, CUDNN_TENSOR_NHWC,
cudnn_data_type<T>::type(), N, C, H, W));
NBLA_CUDNN_CHECK(
cudnnSetTensor4dDescriptor(output_desc_.desc, CUDNN_TENSOR_NHWC,
cudnn_data_type<T>::type(), N, C, H, W));
} else if (channel_last) {
// To prevent NOT SUPPORTED error in CUDNNN, N and H are recalculated.
// (Large N is not allowed.)
N = inputs[0]->shape()[0];
H = inputs[0]->size() / (N * C);
if (this->batch_stat_) {
// cudnnBatchNormalizationForwardInference does not support this mode.
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
}
NBLA_CUDNN_CHECK(
cudnnSetTensor4dDescriptor(input_desc_.desc, CUDNN_TENSOR_NHWC,
cudnn_data_type<T>::type(), N, C, H, W));
NBLA_CUDNN_CHECK(
cudnnSetTensor4dDescriptor(output_desc_.desc, CUDNN_TENSOR_NHWC,
cudnn_data_type<T>::type(), N, C, H, W));
} else {
NBLA_CUDNN_CHECK(
cudnnSetTensor4dDescriptor(input_desc_.desc, CUDNN_TENSOR_NCHW,
cudnn_data_type<T>::type(), N, C, H, W));
NBLA_CUDNN_CHECK(
cudnnSetTensor4dDescriptor(output_desc_.desc, CUDNN_TENSOR_NCHW,
cudnn_data_type<T>::type(), N, C, H, W));
}
// Get BN data type.
NBLA_CUDNN_CHECK(cudnnDeriveBNTensorDescriptor(
bn_scale_bias_mean_var_desc_.desc, input_desc_.desc, mode_));
int n, c, h, w, sn, sc, sh, sw; // garbage
NBLA_CUDNN_CHECK(cudnnGetTensor4dDescriptor(bn_scale_bias_mean_var_desc_.desc,
&derived_bn_dtype_, &n, &c, &h,
&w, &sn, &sc, &sh, &sw));
#if CUDNN_VERSION >= 7400
// Check if the confition we can use faster BN.
can_use_bn_ex_ =
channel_last && std::is_same<Tw, nbla::HalfCuda>::value && C % 4 == 0;
#if _WIN32
// In cudnn 7.4.1 and 7.4.2, cudnnBatchNormalization*Ex does't support windows
// platform without TCC mode.
// In cudnn 7.5 or higher, cudnnBatchNormalization*Ex just fallbacks to a
// slower implementation.
// Currently we don't support TCC mode. Therefore, can_use_bn_ex must be
// false.
if (can_use_bn_ex_) {
NBLA_LOG_WARN(
"[BatchNormalization] "
"Currently, on windows, cudnn doesn't support a faster "
"BatchNormalization kernel,"
" which is used with channel_last and half datatype on other platforms."
"Fallbacks to a slower implemantation.")
can_use_bn_ex_ = false;
}
#endif // _WIN32
can_use_bn_ex_ &= this->batch_stat_;
if (can_use_bn_ex_) {
NBLA_CUDNN_CHECK(cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(
this->cudnn_handle_, this->mode_, this->ops_,
this->input_desc_.desc, /* x desc */
nullptr, /* z desc */
this->output_desc_.desc, /* y desc */
this->bn_scale_bias_mean_var_desc_.desc, nullptr,
&forward_workspace_size_));
NBLA_CUDNN_CHECK(cudnnGetBatchNormalizationTrainingExReserveSpaceSize(
this->cudnn_handle_, this->mode_, this->ops_, this->act_desc_.desc,
this->input_desc_.desc, &reserve_size_));
NBLA_CUDNN_CHECK(cudnnGetBatchNormalizationBackwardExWorkspaceSize(
this->cudnn_handle_, this->mode_, this->ops_,
this->input_desc_.desc, /* x desc */
this->output_desc_.desc, /* y desc */
this->output_desc_.desc, /* dy desc */
this->input_desc_.desc, /*dz desc*/
this->input_desc_.desc, /* dx desc */
this->bn_scale_bias_mean_var_desc_.desc, this->act_desc_.desc,
&backward_workspace_size_));
}
#endif
}
template <class T>
void BatchNormalizationCudaCudnn<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
if (this->batch_stat_) { // Training mode.
forward_impl_batch(inputs, outputs, true /* update_inputs */);
} else { // Testing mode.
forward_impl_global(inputs, outputs);
}
}
template <class T>
void BatchNormalizationCudaCudnn<T>::recompute_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
if (this->batch_stat_) { // Training mode.
forward_impl_batch(inputs, outputs, false /* update_inputs */);
} else { // Testing mode.
forward_impl_global(inputs, outputs);
}
}
template <class T>
void BatchNormalizationCudaCudnn<T>::forward_impl_batch(
const Variables &inputs, const Variables &outputs,
const bool update_inputs) {
// Check whether it outputs batch mean and var.
Variable *batch_mean = &this->mean_;
Variable *batch_var = &this->var_;
// Inputs
const Tw *x = inputs[0]->get_data_pointer<Tw>(this->ctx_);
// dummy beta, gamma variables
Variable beta_dummy, gamma_dummy;
const auto param_shape = this->mean_.shape();
if (this->no_bias_) {
beta_dummy.reshape(param_shape, true);
beta_dummy.data()->zero();
}
if (this->no_scale_) {
gamma_dummy.reshape(param_shape, true);
gamma_dummy.data()->fill(1.);
}
const void *beta =
this->no_bias_
? beta_dummy.data()->get(DRV_BN_T(), this->ctx_)->const_pointer()
: inputs[this->b_idx_]
->data()
->get(DRV_BN_T(), this->ctx_)
->const_pointer();
const void *gamma =
this->no_scale_
? gamma_dummy.data()->get(DRV_BN_T(), this->ctx_)->const_pointer()
: inputs[this->g_idx_]
->data()
->get(DRV_BN_T(), this->ctx_)
->const_pointer();
// Output
Tw *y = outputs[0]->cast_data_and_get_pointer<Tw>(this->ctx_, true);
void *m = batch_mean->data()
->cast(DRV_BN_T(), this->ctx_, true)
->pointer(); // batch mean
void *v = batch_var->data()
->cast(DRV_BN_T(), this->ctx_, true)
->pointer(); // batch var
// Inputs/Outputs
void *rm = !update_inputs ? nullptr : inputs[this->m_idx_]
->data()
->cast(DRV_BN_T(), this->ctx_)
->pointer(); // running mean
void *rv = !update_inputs ? nullptr : inputs[this->v_idx_]
->data()
->cast(DRV_BN_T(), this->ctx_)
->pointer(); // running var
auto a = get_cudnn_scalar_arg<T>(1);
auto b = get_cudnn_scalar_arg<T>(0);
double eps = std::max((double)this->eps_, CUDNN_BN_MIN_EPSILON);
#if CUDNN_VERSION >= 7400
if (can_use_bn_ex_) {
// Get buffers.
NdArray workspace(Shape_t{(Size_t)forward_workspace_size_});
reserve_ = make_shared<NdArray>(Shape_t{(Size_t)reserve_size_});
void *workspace_ptr =
workspace.cast(DRV_BN_T(), this->ctx_, true)->pointer();
void *reserve_ptr = reserve_->cast(DRV_BN_T(), this->ctx_, true)->pointer();
// Execute forward.
NBLA_CUDNN_CHECK(cudnnBatchNormalizationForwardTrainingEx(
this->cudnn_handle_, this->mode_, this->ops_, &a, &b, input_desc_.desc,
x, /* x */
nullptr, nullptr, /* z */
output_desc_.desc, y, /* y */
this->bn_scale_bias_mean_var_desc_.desc, gamma, beta,
1 - this->decay_rate_, rm, rv, eps, m, v,
this->act_desc_.desc, /* activation descriptor */
workspace_ptr, /* workspace pointer */
forward_workspace_size_, /* workspace size */
reserve_ptr, /* reserve space pointer */
reserve_size_ /* reserve space size */
));
return;
}
#endif
NBLA_CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
cudnn_handle_, mode_, &a, &b, input_desc_.desc, x, output_desc_.desc, y,
bn_scale_bias_mean_var_desc_.desc, gamma, beta, 1 - this->decay_rate_, rm,
rv, eps, m, v));
}
template <class T>
void BatchNormalizationCudaCudnn<T>::forward_impl_global(
const Variables &inputs, const Variables &outputs) {
// dummy beta, gamma variables
Variable beta_dummy, gamma_dummy;
const auto param_shape = this->mean_.shape();
if (this->no_bias_) {
beta_dummy.reshape(param_shape, true);
beta_dummy.data()->zero();
}
if (this->no_scale_) {
gamma_dummy.reshape(param_shape, true);
gamma_dummy.data()->fill(1.);
}
// Inputs
const Tw *x = inputs[0]->get_data_pointer<Tw>(this->ctx_);
const void *beta =
this->no_bias_
? beta_dummy.data()->get(DRV_BN_T(), this->ctx_)->const_pointer()
: inputs[this->b_idx_]
->data()
->get(DRV_BN_T(), this->ctx_)
->const_pointer();
const void *gamma =
this->no_scale_
? gamma_dummy.data()->get(DRV_BN_T(), this->ctx_)->const_pointer()
: inputs[this->g_idx_]
->data()
->get(DRV_BN_T(), this->ctx_)
->const_pointer();
const void *rm = inputs[this->m_idx_]
->data()
->get(DRV_BN_T(), this->ctx_)
->const_pointer(); // running mean
const void *rv = inputs[this->v_idx_]
->data()
->get(DRV_BN_T(), this->ctx_)
->const_pointer(); // running var
// Output
Tw *y = outputs[0]->cast_data_and_get_pointer<Tw>(this->ctx_, true);
auto a = get_cudnn_scalar_arg<T>(1);
auto b = get_cudnn_scalar_arg<T>(0);
double eps = std::max((double)this->eps_, CUDNN_BN_MIN_EPSILON);
NBLA_CUDNN_CHECK(cudnnBatchNormalizationForwardInference(
cudnn_handle_, mode_, &a, &b, input_desc_.desc, x, output_desc_.desc, y,
bn_scale_bias_mean_var_desc_.desc, gamma, beta, rm, rv, eps));
}
template <class T>
void BatchNormalizationCudaCudnn<T>::backward_impl(
const Variables &inputs, const Variables &outputs,
const vector<bool> &propagate_down, const vector<bool> &accum) {
cuda_set_device(std::stoi(this->ctx_.device_id));
if (this->batch_stat_) { // Training mode.
backward_impl_batch(inputs, outputs, propagate_down, accum);
} else { // Testing mode.
this->backward_impl_global(inputs, outputs, propagate_down, accum);
}
}
template <class T>
void BatchNormalizationCudaCudnn<T>::backward_impl_batch(
const Variables &inputs, const Variables &outputs,
const vector<bool> &propagate_down, const vector<bool> &accum) {
if (!(propagate_down[0] || propagate_down[1] || propagate_down[2])) {
return;
}
const bool pd_beta = !this->no_bias_ && propagate_down[this->b_idx_];
const bool pd_gamma = !this->no_scale_ && propagate_down[this->g_idx_];
const bool accum_beta = !this->no_bias_ && accum[this->b_idx_];
const bool accum_gamma = !this->no_scale_ && accum[this->g_idx_];
// Check whether it outputs batch mean/var.
Variable *batch_mean = &this->mean_;
Variable *batch_var = &this->var_;
// Common inputs wrt. gradient.
const Tw *dy = outputs[0]->get_grad_pointer<Tw>(this->ctx_);
const void *m =
batch_mean->data()->get(DRV_BN_T(), this->ctx_)->const_pointer();
const void *v =
batch_var->data()->get(DRV_BN_T(), this->ctx_)->const_pointer();
const Tw *x = inputs[0]->get_data_pointer<Tw>(this->ctx_);
auto a_data = get_cudnn_scalar_arg<T>(propagate_down[0] ? 1 : 0);
auto b_data = get_cudnn_scalar_arg<T>(accum[0] && propagate_down[0] ? 1 : 0);
auto a_param = get_cudnn_scalar_arg<T>(pd_beta || pd_gamma ? 1 : 0);
auto b_param = a_param;
if (!(accum_beta || accum_gamma)) {
b_param = 0;
}
size_t prop_down_workspace_size = 0;
if (!propagate_down[0]) {
prop_down_workspace_size = std::max(
prop_down_workspace_size, inputs[0]->size() * sizeof_dtype(DRV_BN_T()));
}
if (!pd_beta || !pd_gamma) {
prop_down_workspace_size = std::max(
prop_down_workspace_size, inputs[1]->size() * sizeof_dtype(DRV_BN_T()));
}
void *prop_down_buf = nullptr;
NdArray prop_down_workspace;
if (prop_down_workspace_size) {
prop_down_workspace.reshape({static_cast<Size_t>(prop_down_workspace_size)},
true);
prop_down_buf = prop_down_workspace.cast(dtypes::BYTE, this->ctx_, true)
->pointer<void>();
}
Tw *dx = propagate_down[0]
? inputs[0]->cast_grad_and_get_pointer<Tw>(this->ctx_, !accum[0])
: (Tw *)prop_down_buf;
// dummy beta, gamma variables
Variable beta_dummy, gamma_dummy;
const auto param_shape = this->mean_.shape();
if (this->no_bias_) {
beta_dummy.reshape(param_shape, true);
beta_dummy.data()->zero();
}
if (this->no_scale_) {
gamma_dummy.reshape(param_shape, true);
gamma_dummy.data()->fill(1.);
}
const void *beta =
this->no_bias_
? beta_dummy.data()->get(DRV_BN_T(), this->ctx_)->const_pointer()
: inputs[this->b_idx_]
->data()
->get(DRV_BN_T(), this->ctx_)
->const_pointer();
const void *gamma =
this->no_scale_
? gamma_dummy.data()->get(DRV_BN_T(), this->ctx_)->const_pointer()
: inputs[this->g_idx_]
->data()
->get(DRV_BN_T(), this->ctx_)
->const_pointer();
// Specify write only flag to prevent unnecessary memset.
const bool param_diff_write = b_param == 0;
void *db = pd_beta
? inputs[this->b_idx_]
->grad()
->cast(DRV_BN_T(), this->ctx_, param_diff_write)
->pointer()
: prop_down_buf;
void *dg = pd_gamma
? inputs[this->g_idx_]
->grad()
->cast(DRV_BN_T(), this->ctx_, param_diff_write)
->pointer()
: prop_down_buf;
double eps = std::max((double)this->eps_, CUDNN_BN_MIN_EPSILON);
#if CUDNN_VERSION >= 7400
if (can_use_bn_ex_) {
// Get buffers.
NdArray workspace(Shape_t{(Size_t)backward_workspace_size_});
NBLA_CHECK(reserve_, error_code::value, "Forward is not called.");
void *workspace_ptr =
workspace.cast(DRV_BN_T(), this->ctx_, true)->pointer();
void *reserve_ptr =
reserve_->cast(DRV_BN_T(), this->ctx_, false /* rw access */)
->pointer();
// Execute backward.
NBLA_CUDNN_CHECK(cudnnBatchNormalizationBackwardEx(
this->cudnn_handle_, this->mode_, this->ops_, &a_data, &b_data,
&a_param, &b_param, input_desc_.desc, x, /* x */
nullptr, nullptr, /* y */
output_desc_.desc, dy, /* dy */
nullptr, nullptr, /* dz == null */
input_desc_.desc, dx, /* dx */
this->bn_scale_bias_mean_var_desc_.desc, gamma, beta, dg, db, eps, m, v,
this->act_desc_.desc, /* activation descriptor */
workspace_ptr, /* workspace pointer */
backward_workspace_size_, /* workspace size */
reserve_ptr, /* reserve space pointer */
reserve_size_ /* reserve space size */
));
// Clear reserved buffer for backward
reserve_ = nullptr;
return;
}
#endif
NBLA_CUDNN_CHECK(cudnnBatchNormalizationBackward(
cudnn_handle_, mode_, &a_data, &b_data, &a_param, &b_param,
input_desc_.desc, x, output_desc_.desc, dy, input_desc_.desc, dx,
bn_scale_bias_mean_var_desc_.desc, gamma, dg, db, eps, m, v));
}
} // namespace nbla
|
the_stack
|
#include <mxnet/base.h>
#include <mxnet/operator.h>
#include <algorithm>
#include <cstring>
#include <vector>
#include "../../mxnet_op.h"
#include "../../../common/cuda_utils.h"
namespace mxnet {
namespace op {
template <typename DType>
__device__ DType deformable_im2col_bilinear(const DType* bottom_data,
const index_t data_width,
const index_t height,
const index_t width,
DType h, DType w) {
index_t h_low = floor(h);
index_t w_low = floor(w);
index_t h_high;
index_t w_high;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = static_cast<DType>(h_low);
} else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = static_cast<DType>(w_low);
} else {
w_high = w_low + 1;
}
DType lh = h - h_low;
DType lw = w - w_low;
DType hh = 1 - lh, hw = 1 - lw;
DType v1 = bottom_data[h_low * data_width + w_low];
DType v2 = bottom_data[h_low * data_width + w_high];
DType v3 = bottom_data[h_high * data_width + w_low];
DType v4 = bottom_data[h_high * data_width + w_high];
DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename DType>
__device__ DType get_gradient_weight(DType argmax_h, DType argmax_w,
const index_t h, const index_t w,
const index_t height, const index_t width) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) {
//empty
return 0;
}
argmax_h = max(argmax_h, static_cast<DType>(0.0f));
argmax_w = max(argmax_w, static_cast<DType>(0.0f));
index_t argmax_h_low = static_cast<index_t>(argmax_h);
index_t argmax_w_low = static_cast<index_t>(argmax_w);
index_t argmax_h_high;
index_t argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = static_cast<DType>(argmax_h_low);
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1)
{
argmax_w_high = argmax_w_low = width - 1;
argmax_w = static_cast<DType>(argmax_w_low);
} else {
argmax_w_high = argmax_w_low + 1;
}
DType weight = 0;
if (h == argmax_h_low) {
if (w == argmax_w_low) {
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
}
} else if (h == argmax_h_high) {
if (w == argmax_w_low) {
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
} else if (w == argmax_w_high) {
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
}
}
return weight;
}
template <typename DType>
__device__ DType get_coordinate_weight(DType argmax_h, DType argmax_w,
const index_t height, const index_t width,
const DType* im_data,
const index_t data_width,
const index_t bp_dir) {
if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width)
{
//empty
return 0;
}
if (argmax_h < 0) argmax_h = 0;
if (argmax_w < 0) argmax_w = 0;
index_t argmax_h_low = static_cast<index_t>(argmax_h);
index_t argmax_w_low = static_cast<index_t>(argmax_w);
index_t argmax_h_high;
index_t argmax_w_high;
if (argmax_h_low >= height - 1) {
argmax_h_high = argmax_h_low = height - 1;
argmax_h = static_cast<DType>(argmax_h_low);
} else {
argmax_h_high = argmax_h_low + 1;
}
if (argmax_w_low >= width - 1) {
argmax_w_high = argmax_w_low = width - 1;
argmax_w = static_cast<DType>(argmax_w_low);
} else {
argmax_w_high = argmax_w_low + 1;
}
DType weight = 0;
DType im_ll = im_data[argmax_h_low * data_width + argmax_w_low];
DType im_lh = im_data[argmax_h_low * data_width + argmax_w_high];
DType im_hl = im_data[argmax_h_high * data_width + argmax_w_low];
DType im_hh = im_data[argmax_h_high * data_width + argmax_w_high];
if (bp_dir == 0) {
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_ll;
weight += -1 * (argmax_w - argmax_w_low) * im_lh;
weight += (argmax_w_low + 1 - argmax_w) * im_hl;
weight += (argmax_w - argmax_w_low) * im_hh;
} else if (bp_dir == 1) {
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_ll;
weight += (argmax_h_low + 1 - argmax_h) * im_lh;
weight += -1 * (argmax_h - argmax_h_low) * im_hl;
weight += (argmax_h - argmax_h_low) * im_hh;
}
return weight;
}
/*!
* \brief deformable_im2col gpu kernel.
* DO NOT call this directly. Use wrapper function im2col() instead;
*/
template <typename DType>
__global__ void deformable_im2col_gpu_kernel(const index_t n, const DType* data_im,
const DType* data_offset,
const index_t height, const index_t width,
const index_t kernel_h, const index_t kernel_w,
const index_t pad_h, const index_t pad_w,
const index_t stride_h, const index_t stride_w,
const index_t dilation_h, const index_t dilation_w,
const index_t channel_per_group,
const index_t height_col, const index_t width_col,
DType* data_col) {
CUDA_KERNEL_LOOP(index, n) {
// index index of output matrix
const index_t w_col = index % width_col;
const index_t h_col = (index / width_col) % height_col;
const index_t c_im = (index / width_col) / height_col;
const index_t c_col = c_im * kernel_h * kernel_w;
const index_t group_index = c_im / channel_per_group;
const index_t group_offset_step = 2 * kernel_h * kernel_w * height_col * width_col;
const index_t h_in = h_col * stride_h - pad_h;
const index_t w_in = w_col * stride_w - pad_w;
DType* data_col_ptr = data_col + (c_col * height_col + h_col) * width_col + w_col;
const DType* data_im_ptr = data_im + (c_im * height + h_in) * width + w_in;
const DType* data_offset_ptr = data_offset + group_index * group_offset_step;
for (index_t i = 0; i < kernel_h; ++i) {
for (index_t j = 0; j < kernel_w; ++j) {
const index_t data_offset_h_ptr = ((2 * (i * kernel_w + j)) *
height_col + h_col) * width_col + w_col;
const index_t data_offset_w_ptr = data_offset_h_ptr + height_col * width_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType val = static_cast<DType>(0);
const DType h_im = h_in + i * dilation_h + offset_h;
const DType w_im = w_in + j * dilation_w + offset_w;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
const DType map_h = i * dilation_h + offset_h;
const DType map_w = j * dilation_w + offset_w;
const index_t cur_height = height - h_in;
const index_t cur_width = width - w_in;
val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
}
*data_col_ptr = val;
data_col_ptr += height_col * width_col;
}
}
}
}
/*!\brief
* cpu function of deformable_im2col algorithm
* \param s device stream
* \param data_im pointer of an image (C, H, W, ...) in the image batch
* \param data_offset pointer of offset (C, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape (#channels, output_im_height, output_im_width, ...)
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param deformable_group #offset group that deformable convolution use
* \param data_col column buffer pointer
*/
template <typename DType>
inline void deformable_im2col(mshadow::Stream<gpu>* s,
const DType* data_im,
const DType* data_offset,
const mxnet::TShape& im_shape,
const mxnet::TShape& col_shape,
const mxnet::TShape& kernel_shape,
const mxnet::TShape& pad,
const mxnet::TShape& stride,
const mxnet::TShape& dilation,
const index_t deformable_group,
DType* data_col) {
// num_axes should be smaller than block size
const int num_spatial_axes = kernel_shape.ndim();
CHECK_LT(num_spatial_axes, mshadow::cuda::kBaseThreadNum);
index_t channel_per_group = im_shape[1] / deformable_group;
index_t num_kernels = im_shape[1] * col_shape.ProdShape(1, col_shape.ndim());
using namespace mxnet_op;
switch (num_spatial_axes) {
case 2:
deformable_im2col_gpu_kernel<DType> // NOLINT_NEXT_LINE(whitespace/operators)
<<<cuda_get_num_blocks(num_kernels), mshadow::cuda::kBaseThreadNum,
0, mshadow::Stream<gpu>::GetStream(s)>>>(num_kernels, data_im, data_offset,
im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1],
pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1],
channel_per_group,
col_shape[1], col_shape[2], data_col);
MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_im2col_gpu_kernel);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
/*!
* \brief deformable_col2im gpu kernel.
* \brief DO NOT call this directly. Use wrapper function deformable_col2im() instead;
*/
template <typename DType>
__global__ void deformable_col2im_gpu_kernel(const index_t n, const DType* data_col,
const DType* data_offset, const index_t channels,
const index_t height, const index_t width,
const index_t kernel_h, const index_t kernel_w,
const index_t pad_h, const index_t pad_w,
const index_t stride_h, const index_t stride_w,
const index_t dilation_h, const index_t dilation_w,
const index_t channel_per_group,
const index_t height_col, const index_t width_col,
DType* grad_im) {
CUDA_KERNEL_LOOP(index, n) {
const index_t j = (index / width_col / height_col) % kernel_w;
const index_t i = (index / width_col / height_col / kernel_w) % kernel_h;
const index_t c = index / width_col / height_col / kernel_w / kernel_h;
// compute the start and end of the output
const index_t group_index = c / channel_per_group;
const index_t group_offset_step = 2 * kernel_h * kernel_w * height_col * width_col;
index_t w_col = index % width_col;
index_t h_col = (index / width_col) % height_col;
index_t w_in = w_col * stride_w - pad_w;
index_t h_in = h_col * stride_h - pad_h;
const DType* data_offset_ptr = data_offset + group_index * group_offset_step;
const index_t data_offset_h_ptr = ((2 * (i * kernel_w + j)) *
height_col + h_col) * width_col + w_col;
const index_t data_offset_w_ptr = data_offset_h_ptr + height_col * width_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
const DType cur_inv_h_data = h_in + i * dilation_h + offset_h;
const DType cur_inv_w_data = w_in + j * dilation_w + offset_w;
const DType cur_top_grad = data_col[index];
const index_t cur_h = static_cast<index_t>(cur_inv_h_data);
const index_t cur_w = static_cast<index_t>(cur_inv_w_data);
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1
) {
index_t cur_bottom_grad_pos = (c * height + cur_h + dy) * width + cur_w + dx;
DType weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data,
cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
/*!\brief
* gpu function of deformable_col2im algorithm
* \param s device stream
* \param data_col start pointer of the column buffer to be filled
* \param data_offset pointer of offset (C, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param deformable_group #offset group that deformable convolution use
* \param grad_im pointer of a image (C, H, W,...) in the image batch
*/
template <typename DType>
inline void deformable_col2im(mshadow::Stream<gpu>* s,
const DType* data_col,
const DType* data_offset,
const mxnet::TShape& im_shape,
const mxnet::TShape& col_shape,
const mxnet::TShape& kernel_shape,
const mxnet::TShape& pad,
const mxnet::TShape& stride,
const mxnet::TShape& dilation,
const index_t deformable_group,
DType* grad_im) {
const int num_spatial_axes = kernel_shape.ndim();
index_t im_size = im_shape.ProdShape(1, im_shape.ndim());
index_t channel_per_group = im_shape[1] / deformable_group;
index_t num_kernels = col_shape.ProdShape(0, col_shape.ndim());
// num_axes should be smaller than block size
CHECK_LT(num_spatial_axes, mshadow::cuda::kBaseThreadNum);
using namespace mxnet_op;
switch (num_spatial_axes) {
case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
deformable_col2im_gpu_kernel<DType>
<<<cuda_get_num_blocks(num_kernels), mshadow::cuda::kBaseThreadNum,
0, mshadow::Stream<gpu>::GetStream(s)>>>(num_kernels, data_col, data_offset,
im_shape[1], im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1],
pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1],
channel_per_group,
col_shape[1], col_shape[2], grad_im);
MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_col2im_gpu_kernel);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
/*!
* \brief deformable_col2im_coord gpu kernel.
* \brief DO NOT call this directly. Use wrapper function deformable_col2im_coord() instead;
*/
template <typename DType>
__global__ void deformable_col2im_coord_gpu_kernel(const index_t n, const DType* data_col,
const DType* data_im,
const DType* data_offset,
const index_t channels,
const index_t height, const index_t width,
const index_t kernel_h, const index_t kernel_w,
const index_t pad_h, const index_t pad_w,
const index_t stride_h, const index_t stride_w,
const index_t dilation_h, const index_t dilation_w,
const index_t channel_per_group,
const index_t height_col, const index_t width_col,
DType* grad_offset) {
CUDA_KERNEL_LOOP(index, n) {
DType val = 0;
index_t w = index % width_col;
index_t h = (index / width_col) % height_col;
index_t c = index / width_col / height_col;
// compute the start and end of the output
const index_t group_index = c / (2 * kernel_h * kernel_w);
const index_t group_col_step = channel_per_group * width_col * height_col;
const index_t group_im_step = channel_per_group / kernel_h / kernel_w * height * width;
const index_t group_offset_step = 2 * kernel_h * kernel_w * height_col * width_col;
const index_t col_step = kernel_h * kernel_w;
const DType* data_col_ptr = data_col + group_index * group_col_step;
const DType* data_im_ptr = data_im + group_index * group_im_step;
const DType* data_offset_ptr = data_offset + group_index * group_offset_step;
index_t cnt = 0;
const index_t offset_c = c - group_index * 2 * kernel_h * kernel_w;
for (index_t col_c = (offset_c / 2); col_c < channel_per_group; col_c += col_step) {
const index_t col_pos = ((col_c * height_col) + h) * width_col + w;
const index_t bp_dir = offset_c % 2;
index_t j = (col_pos / width_col / height_col) % kernel_w;
index_t i = (col_pos / width_col / height_col / kernel_w) % kernel_h;
index_t w_col = col_pos % width_col;
index_t h_col = (col_pos / width_col) % height_col;
index_t w_in = w_col * stride_w - pad_w;
index_t h_in = h_col * stride_h - pad_h;
const index_t data_offset_h_ptr = ((2 * (i * kernel_w + j)) *
height_col + h_col) * width_col + w_col;
const index_t data_offset_w_ptr = data_offset_h_ptr + height_col * width_col;
const DType offset_h = data_offset_ptr[data_offset_h_ptr];
const DType offset_w = data_offset_ptr[data_offset_w_ptr];
DType inv_h = h_in + i * dilation_h + offset_h;
DType inv_w = w_in + j * dilation_w + offset_w;
if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -1;
}
const DType weight = get_coordinate_weight(inv_h, inv_w, height, width,
data_im_ptr + cnt * height * width,
width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
/*!\brief
* gpu function of deformable_col2im_coord algorithm
* \param s device stream
* \param data_col start pointer of the column buffer to be filled
* \param data_im pointer of an image (C, H, W, ...) in the image batch
* \param data_offset pointer of offset (C, H, W, ...) in the offset batch
* \param im_shape input image shape in dimensions (N, C, H, W,)
* \param col_shape column buffer shape
* \param kernel_shape kernel filter shape
* \param pad pad shape
* \param stride stride shape
* \param dilation dilation shape
* \param deformable_group #offset group that deformable convolution use
* \param grad_offset pointer of the offset (C, H, W,...) in the offset batch
*/
template <typename DType>
inline void deformable_col2im_coord(mshadow::Stream<gpu>* s,
const DType* data_col,
const DType* data_im,
const DType* data_offset,
const mxnet::TShape& im_shape,
const mxnet::TShape& col_shape,
const mxnet::TShape& kernel_shape,
const mxnet::TShape& pad,
const mxnet::TShape& stride,
const mxnet::TShape& dilation,
const index_t deformable_group,
DType* grad_offset) {
const int num_spatial_axes = kernel_shape.ndim();
index_t num_kernels = col_shape[1] * col_shape[2] * 2 *
kernel_shape[0] * kernel_shape[1] * deformable_group;
index_t channel_per_group = col_shape[0] / deformable_group;
// num_axes should be smaller than block size
CHECK_LT(num_spatial_axes, mshadow::cuda::kBaseThreadNum);
using namespace mxnet_op;
switch (num_spatial_axes) {
case 2:
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
deformable_col2im_coord_gpu_kernel<DType>
<<<cuda_get_num_blocks(num_kernels), mshadow::cuda::kBaseThreadNum,
0, mshadow::Stream<gpu>::GetStream(s)>>>(num_kernels, data_col, data_im, data_offset,
im_shape[1], im_shape[2], im_shape[3],
kernel_shape[0], kernel_shape[1],
pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1],
channel_per_group,
col_shape[1], col_shape[2], grad_offset);
MSHADOW_CUDA_POST_KERNEL_CHECK(deformable_col2im_coord_gpu_kernel);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_CONTRIB_NN_DEFORMABLE_IM2COL_CUH_
|
the_stack
|
#define _VOLATILE_
typedef long long ll_t;
typedef unsigned long long ull_t;
typedef struct __builtin_align__(32) {
float s0, s1, s2, s3, s4, s5, s6, s7;
} _float8;
typedef union {
_float8 f8;
float val[8];
} float8;
__device__ void madd(
float a,
float b,
float &c
) {
c = fmaf(a, b, c);
}
__device__ void nmadd(
float a,
float b,
float &c
) {
c = fmaf(-a, b, c);
}
__device__ void squared_l2(
float a,
float b,
float &c
){
float dif = a - b;
c = fmaf(dif, dif, c);
}
__device__ void negative_squared_l2(
float a,
float b,
float &c
){
float dif = a - b;
c = fmaf(-dif, dif, c);
}
__device__ void l1(
float a,
float b,
float &c
){
c += fabsf(a - b);
}
__device__ void negative_l1(
float a,
float b,
float &c
){
c -= fabsf(a - b);
}
__device__ void thread_matmul_v4(
_VOLATILE_ float aSM[8][128+4],
_VOLATILE_ float bSM[8][128+4],
float8 cCache[8],
int vx, int vy
) {
float aCache1[8];
float aCache2[8];
#pragma unroll
for (int mi=0; mi<8; mi++){
aCache1[mi] = aSM[0][8*vy + mi];
}
#pragma unroll
for (int ki=0; ki<8; ki++){
int is_odd = ki & 1;
if (is_odd == 0){
if (likely(ki < 7)){
#pragma unroll
for (int mi=0; mi<8; mi++){
aCache2[mi] = aSM[ki+1][8*vy + mi];
}
}
#pragma unroll
for (int ni=0; ni<8; ni++){
float b = bSM[ki][vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<8; mi++){
float a = aCache1[mi];
cCache[mi].val[ni] = fmaf(a, b, cCache[mi].val[ni]);
}
}
} else {
if (likely(ki < 7)){
#pragma unroll
for (int mi=0; mi<8; mi++){
aCache1[mi] = aSM[ki+1][8*vy + mi];
}
}
#pragma unroll
for (int ni=0; ni<8; ni++){
float b = bSM[ki][vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<8; mi++){
float a = aCache2[mi];
cCache[mi].val[ni] = fmaf(a, b, cCache[mi].val[ni]);
}
}
}
}
}
__device__ void thread_matmul_16_v3(
_VOLATILE_ float aSM[16][128+4],
_VOLATILE_ float bSM[16][128+4],
float8 cCache[8],
int vx, int vy
) {
float aCache[8];
#pragma unroll
for (int ki=0; ki<16; ki++){
#pragma unroll
for (int mi=0; mi<8; mi++){
aCache[mi] = aSM[ki][8*vy + mi];
}
#pragma unroll
for (int ni=0; ni<8; ni++){
float b = bSM[ki][vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<8; mi++){
float a = aCache[mi];
__DISTANCE_FN__(a, b, cCache[mi].val[ni]);
}
}
}
}
__device__ void thread_matmul_v3(
_VOLATILE_ float aSM[8][128+4],
_VOLATILE_ float bSM[8][128+4],
float8 cCache[8],
int vx, int vy
) {
float aCache[8];
#pragma unroll
for (int ki=0; ki<8; ki++){
#pragma unroll
for (int mi=0; mi<8; mi++){
aCache[mi] = aSM[ki][8*vy + mi];
}
#pragma unroll
for (int ni=0; ni<8; ni++){
float b = bSM[ki][vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<8; mi++){
float a = aCache[mi];
__DISTANCE_FN__(a, b, cCache[mi].val[ni]);
}
}
}
}
__device__ void init_cCache(
float8 cCache[8]
) {
#pragma unroll
for (int i=0; i<8; i++){
#pragma unroll
for (int j=0; j<8; j++){
cCache[i].val[j] = 0.f;
}
}
}
// Unsafe
__device__ void write_c(
float8 cCache[8],
float* C,
int gStartx, int gStarty,
int vx, int vy, int bid,
int M, int N
) {
#pragma unroll
for (int i=0; i<8; i++){
int iM = gStarty + vy*8 + i;
if (likely(iM < M)){
int iN_start = gStartx + vx*8;
reinterpret_cast<float8*>(C + (bid)*M*N + (iM)*N + (iN_start))[0] = cCache[i];
/*
if (likely(iN_start + 7 < N)){
reinterpret_cast<float8*>(C + (bid)*M*N + (iM)*N + (iN_start))[0] = cCache[i];
} else {
#pragma unroll
for (int j=0; j<8; j++){
int iN = iN_start + j;
if (iN < N){
C[(bid)*M*N + (iM)*N + (iN)] = cCache[i].val[j];
}
}
}
*/
}
}
}
__device__ void write_c_v3(
float8 cCache[8],
float* C,
int gStartx, int gStarty,
int vx, int vy, int bid,
int M, int N
) {
__shared__ volatile float cSM[16][128];
#pragma unroll
for (int mi=0; mi<8; mi++){
int iM = gStarty + vy*8 + mi;
// Store 1 row from cCache to cSM
if (iM < M){
#pragma unroll
for (int ni=0; ni<8; ni++){
cSM[vy][vx*8 + ni] = cCache[mi].val[ni];
}
// Store to C
#pragma unroll
for (int ni=0; ni<8; ni++){
int iN = gStartx + 16*ni + vx;
if (iN < N){
float cVal = cSM[vy][16*ni + vx];
store(C+(bid)*M*N + (iM)*N + (iN), cVal);
}
}
}
}
}
__device__ void load_ab_nn(
const float* A,
const float* B,
float aBuffer1[4],
float aBuffer2[4],
float bBuffer1[4],
float bBuffer2[4],
int bid, int gStartx, int gStarty, int gStartk,
int M, int N, int K
){
int tid = threadIdx.x;
int wx = tid % 32;
int wy = tid / 32;
int dx = tid % 8;
int dy = tid / 8;
int iKA = gStartk + dx;
int iKB = gStartk + wy;
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + dy + i*32;
int iN = gStartx + wx + i*32;
if (likely(iM < M)){
if (likely(iKA < K)){
aBuffer1[i] = load(A + (bid)*M*K + (iM)*K + (iKA));
} else {
aBuffer1[i] = 0.f;
}
if (likely(iKA+8 < K)){
aBuffer2[i] = load(A + (bid)*M*K + (iM)*K + (iKA+8));
} else {
aBuffer2[i] = 0.f;
}
}
if (likely(iN < N)){
if (likely(iKB < K)){
bBuffer1[i] = load(B + (bid)*N*K + (iKB)*N + (iN));
} else {
bBuffer1[i] = 0.f;
}
if (likely(iKB+8 < K)){
bBuffer2[i] = load(B + (bid)*N*K + (iKB+8)*N + (iN));
} else {
bBuffer2[i] = 0.f;
}
}
}
}
__device__ void load_ab_tt(
const float* A,
const float* B,
float aBuffer1[4],
float aBuffer2[4],
float bBuffer1[4],
float bBuffer2[4],
int bid, int gStartx, int gStarty, int gStartk,
int M, int N, int K
){
int tid = threadIdx.x;
int wx = tid % 32;
int wy = tid / 32;
int dx = tid % 8;
int dy = tid / 8;
int iKA = gStartk + wy;
int iKB = gStartk + dx;
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + wx + i*32;
int iN = gStartx + dy + i*32;
if (likely(iM < M)){
if (likely(iKA < K)){
aBuffer1[i] = load(A + (bid)*M*K + (iKA)*M + (iM));
} else {
aBuffer1[i] = 0.f;
}
if (likely(iKA+8 < K)){
aBuffer2[i] = load(A + (bid)*M*K + (iKA+8)*M + (iM));
} else {
aBuffer2[i] = 0.f;
}
}
if (likely(iN < N)){
if (likely(iKB < K)){
bBuffer1[i] = load(B + (bid)*N*K + (iN)*K + (iKB));
} else {
bBuffer1[i] = 0.f;
}
if (likely(iKB+8 < K)){
bBuffer2[i] = load(B + (bid)*N*K + (iN)*K + (iKB+8));
} else {
bBuffer2[i] = 0.f;
}
}
}
}
__device__ void load_ab_nt(
const float* A,
const float* B,
float aBuffer1[4],
float aBuffer2[4],
float bBuffer1[4],
float bBuffer2[4],
int bid, int gStartx, int gStarty, int gStartk,
int M, int N, int K
){
int tid = threadIdx.x;
int wx = tid % 32;
int wy = tid / 32;
int dx = tid % 8;
int dy = tid / 8;
int iKA = gStartk + dx;
int iKB = gStartk + dx;
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + dy + i*32;
int iN = gStartx + dy + i*32;
if (likely(iM < M)){
if (likely(iKA < K)){
aBuffer1[i] = load(A + (bid)*M*K + (iM)*K + (iKA));
} else {
aBuffer1[i] = 0.f;
}
if (likely(iKA+8 < K)){
aBuffer2[i] = load(A + (bid)*M*K + (iM)*K + (iKA+8));
} else {
aBuffer2[i] = 0.f;
}
}
if (likely(iN < N)){
if (likely(iKB < K)){
bBuffer1[i] = load(B + (bid)*N*K + (iN)*K + (iKB));
} else {
bBuffer1[i] = 0.f;
}
if (likely(iKB+8 < K)){
bBuffer2[i] = load(B + (bid)*N*K + (iN)*K + (iKB+8));
} else {
bBuffer2[i] = 0.f;
}
}
}
}
__device__ void load_ab_tn(
const float* A,
const float* B,
float aBuffer1[4],
float aBuffer2[4],
float bBuffer1[4],
float bBuffer2[4],
int bid, int gStartx, int gStarty, int gStartk,
int M, int N, int K
){
int tid = threadIdx.x;
int wx = tid % 32;
int wy = tid / 32;
int dx = tid % 8;
int dy = tid / 8;
int iKA = gStartk + wy;
int iKB = gStartk + wy;
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + wx + i*32;
int iN = gStartx + wx + i*32;
if (likely(iM < M)){
if (likely(iKA < K)){
aBuffer1[i] = load(A + (bid)*M*K + (iKA)*M + (iM));
} else {
aBuffer1[i] = 0.f;
}
if (likely(iKA+8 < K)){
aBuffer2[i] = load(A + (bid)*M*K + (iKA+8)*M + (iM));
} else {
aBuffer2[i] = 0.f;
}
}
if (likely(iN < N)){
if (likely(iKB < K)){
bBuffer1[i] = load(B + (bid)*N*K + (iKB)*N + (iN));
} else {
bBuffer1[i] = 0.f;
}
if (likely(iKB+8 < K)){
bBuffer2[i] = load(B + (bid)*N*K + (iKB+8)*N + (iN));
} else {
bBuffer2[i] = 0.f;
}
}
}
}
__device__ void buffer2smem_nn(
_VOLATILE_ float aSM1[8][128+4],
_VOLATILE_ float aSM2[8][128+4],
_VOLATILE_ float bSM1[8][128+4],
_VOLATILE_ float bSM2[8][128+4],
float aBuffer1[4],
float aBuffer2[4],
float bBuffer1[4],
float bBuffer2[4]
){
int tid = threadIdx.x;
int wx = tid % 32;
int wy = tid / 32;
int dx = tid % 8;
int dy = tid / 8;
#pragma unroll
for (int i=0; i<4; i++){
// Store buffered tiles into shared memory
aSM1[dx][dy+i*32] = aBuffer1[i];
bSM1[wy][wx+i*32+i] = bBuffer1[i];
aSM2[dx][dy+i*32] = aBuffer2[i];
bSM2[wy][wx+i*32+i] = bBuffer2[i];
}
}
__device__ void buffer2smem_tt(
_VOLATILE_ float aSM1[8][128+4],
_VOLATILE_ float aSM2[8][128+4],
_VOLATILE_ float bSM1[8][128+4],
_VOLATILE_ float bSM2[8][128+4],
float aBuffer1[4],
float aBuffer2[4],
float bBuffer1[4],
float bBuffer2[4]
){
int tid = threadIdx.x;
int wx = tid % 32;
int wy = tid / 32;
int dx = tid % 8;
int dy = tid / 8;
#pragma unroll
for (int i=0; i<4; i++){
// Store buffered tiles into shared memory
aSM1[wy][wx+i*32] = aBuffer1[i];
aSM2[wy][wx+i*32] = aBuffer2[i];
bSM1[dx][dy+i*32+i] = bBuffer1[i];
bSM2[dx][dy+i*32+i] = bBuffer2[i];
}
}
__device__ void buffer2smem_nt(
_VOLATILE_ float aSM1[8][128+4],
_VOLATILE_ float aSM2[8][128+4],
_VOLATILE_ float bSM1[8][128+4],
_VOLATILE_ float bSM2[8][128+4],
float aBuffer1[4],
float aBuffer2[4],
float bBuffer1[4],
float bBuffer2[4]
){
int tid = threadIdx.x;
int wx = tid % 32;
int wy = tid / 32;
int dx = tid % 8;
int dy = tid / 8;
#pragma unroll
for (int i=0; i<4; i++){
// Store buffered tiles into shared memory
aSM1[dx][dy+i*32] = aBuffer1[i];
aSM2[dx][dy+i*32] = aBuffer2[i];
bSM1[dx][dy+i*32+i] = bBuffer1[i];
bSM2[dx][dy+i*32+i] = bBuffer2[i];
}
}
__device__ void buffer2smem_tn(
_VOLATILE_ float aSM1[8][128+4],
_VOLATILE_ float aSM2[8][128+4],
_VOLATILE_ float bSM1[8][128+4],
_VOLATILE_ float bSM2[8][128+4],
float aBuffer1[4],
float aBuffer2[4],
float bBuffer1[4],
float bBuffer2[4]
){
int tid = threadIdx.x;
int wx = tid % 32;
int wy = tid / 32;
int dx = tid % 8;
int dy = tid / 8;
#pragma unroll
for (int i=0; i<4; i++){
// Store buffered tiles into shared memory
aSM1[wy][wx+i*32] = aBuffer1[i];
aSM2[wy][wx+i*32] = aBuffer2[i];
bSM1[wy][wx+i*32+i] = bBuffer1[i];
bSM2[wy][wx+i*32+i] = bBuffer2[i];
}
}
__device__ void buffer2smem_16_nn(
_VOLATILE_ float aSM[16][128+4],
_VOLATILE_ float bSM[16][128+4],
float aBuffer1[4],
float aBuffer2[4],
float bBuffer1[4],
float bBuffer2[4]
){
int tid = threadIdx.x;
int wx = tid % 32;
int wy = tid / 32;
int dx = tid % 8;
int dy = tid / 8;
#pragma unroll
for (int i=0; i<4; i++){
// Store buffered tiles into shared memory
aSM[dx][dy+i*32] = aBuffer1[i];
aSM[dx+8][dy+i*32] = aBuffer2[i];
bSM[wy][wx+i*32+i] = bBuffer1[i];
bSM[wy+8][wx+i*32+i] = bBuffer2[i];
}
}
__device__ void buffer2smem_16_tt(
_VOLATILE_ float aSM[16][128+4],
_VOLATILE_ float bSM[16][128+4],
float aBuffer1[4],
float aBuffer2[4],
float bBuffer1[4],
float bBuffer2[4]
){
int tid = threadIdx.x;
int wx = tid % 32;
int wy = tid / 32;
int dx = tid % 8;
int dy = tid / 8;
#pragma unroll
for (int i=0; i<4; i++){
// Store buffered tiles into shared memory
aSM[wy][wx+i*32] = aBuffer1[i];
aSM[wy+8][wx+i*32] = aBuffer2[i];
bSM[dx][dy+i*32+i] = bBuffer1[i];
bSM[dx+8][dy+i*32+i] = bBuffer2[i];
}
}
__device__ void buffer2smem_16_nt(
_VOLATILE_ float aSM[16][128+4],
_VOLATILE_ float bSM[16][128+4],
float aBuffer1[4],
float aBuffer2[4],
float bBuffer1[4],
float bBuffer2[4]
){
int tid = threadIdx.x;
int wx = tid % 32;
int wy = tid / 32;
int dx = tid % 8;
int dy = tid / 8;
#pragma unroll
for (int i=0; i<4; i++){
// Store buffered tiles into shared memory
aSM[dx][dy+i*32] = aBuffer1[i];
aSM[dx+8][dy+i*32] = aBuffer2[i];
bSM[dx][dy+i*32+i] = bBuffer1[i];
bSM[dx+8][dy+i*32+i] = bBuffer2[i];
}
}
__device__ void buffer2smem_16_tn(
_VOLATILE_ float aSM[16][128+4],
_VOLATILE_ float bSM[16][128+4],
float aBuffer1[4],
float aBuffer2[4],
float bBuffer1[4],
float bBuffer2[4]
){
int tid = threadIdx.x;
int wx = tid % 32;
int wy = tid / 32;
int dx = tid % 8;
int dy = tid / 8;
#pragma unroll
for (int i=0; i<4; i++){
// Store buffered tiles into shared memory
aSM[wy][wx+i*32] = aBuffer1[i];
aSM[wy+8][wx+i*32] = aBuffer2[i];
bSM[wy][wx+i*32+i] = bBuffer1[i];
bSM[wy+8][wx+i*32+i] = bBuffer2[i];
}
}
|
the_stack
|
namespace cg = cooperative_groups;
#include <helper_cuda.h>
#include "FunctionPointers_kernels.h"
// Texture object for reading image
cudaTextureObject_t tex;
extern __shared__ unsigned char LocalBlock[];
static cudaArray *array = NULL;
#define RADIUS 1
// pixel value used for thresholding function,
// works well with sample image 'lena'
#define THRESHOLD 150.0f
#ifdef FIXED_BLOCKWIDTH
#define BlockWidth 80
#define SharedPitch 384
#endif
// A function pointer can be declared explicitly like this line:
//__device__ unsigned char (*pointFunction)(unsigned char, float ) = NULL;
// or by using typedef's like below:
typedef unsigned char (*blockFunction_t)(unsigned char, unsigned char,
unsigned char, unsigned char,
unsigned char, unsigned char,
unsigned char, unsigned char,
unsigned char, float);
typedef unsigned char (*pointFunction_t)(unsigned char, float);
__device__ blockFunction_t blockFunction;
__device__ unsigned char ComputeSobel(unsigned char ul, // upper left
unsigned char um, // upper middle
unsigned char ur, // upper right
unsigned char ml, // middle left
unsigned char mm, // middle (unused)
unsigned char mr, // middle right
unsigned char ll, // lower left
unsigned char lm, // lower middle
unsigned char lr, // lower right
float fScale) {
short Horz = ur + 2 * mr + lr - ul - 2 * ml - ll;
short Vert = ul + 2 * um + ur - ll - 2 * lm - lr;
short Sum = (short)(fScale * (abs((int)Horz) + abs((int)Vert)));
return (unsigned char)((Sum < 0) ? 0 : ((Sum > 255) ? 255 : Sum));
}
// define a function pointer and initialize to NULL
__device__ unsigned char (*varFunction)(unsigned char, unsigned char,
unsigned char, unsigned char,
unsigned char, unsigned char,
unsigned char, unsigned char,
unsigned char, float x) = NULL;
__device__ unsigned char ComputeBox(unsigned char ul, // upper left
unsigned char um, // upper middle
unsigned char ur, // upper right
unsigned char ml, // middle left
unsigned char mm, // middle...middle
unsigned char mr, // middle right
unsigned char ll, // lower left
unsigned char lm, // lower middle
unsigned char lr, // lower right
float fscale) {
short Sum = (short)(ul + um + ur + ml + mm + mr + ll + lm + lr) / 9;
Sum *= fscale;
return (unsigned char)((Sum < 0) ? 0 : ((Sum > 255) ? 255 : Sum));
}
__device__ unsigned char Threshold(unsigned char in, float thresh) {
if (in > thresh) {
return 0xFF;
} else {
return 0;
}
}
// Declare function tables, one for the point function chosen, one for the
// block function chosen. The number of entries is determined by the
// enum in FunctionPointers_kernels.h
__device__ blockFunction_t blockFunction_table[LAST_BLOCK_FILTER];
__device__ pointFunction_t pointFunction_table[LAST_POINT_FILTER];
// Declare device side function pointers. We retrieve them later with
// cudaMemcpyFromSymbol to set our function tables above in some
// particular order specified at runtime.
__device__ blockFunction_t pComputeSobel = ComputeSobel;
__device__ blockFunction_t pComputeBox = ComputeBox;
__device__ pointFunction_t pComputeThreshold = Threshold;
// Allocate host side tables to mirror the device side, and later, we
// fill these tables with the function pointers. This lets us send
// the pointers to the kernel on invocation, as a method of choosing
// which function to run.
blockFunction_t h_blockFunction_table[2];
pointFunction_t h_pointFunction_table[2];
// Perform a filter operation on the data, using shared memory
// The actual operation performed is
// determined by the function pointer "blockFunction" and selected
// by the integer argument "blockOperation" and has access
// to an apron around the current pixel being processed.
// Following the block operation, a per-pixel operation,
// pointed to by pPointFunction is performed before the final
// pixel is produced.
__global__ void SobelShared(uchar4 *pSobelOriginal, unsigned short SobelPitch,
#ifndef FIXED_BLOCKWIDTH
short BlockWidth, short SharedPitch,
#endif
short w, short h, float fScale, int blockOperation,
pointFunction_t pPointFunction,
cudaTextureObject_t tex) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
short u = 4 * blockIdx.x * BlockWidth;
short v = blockIdx.y * blockDim.y + threadIdx.y;
short ib;
int SharedIdx = threadIdx.y * SharedPitch;
for (ib = threadIdx.x; ib < BlockWidth + 2 * RADIUS; ib += blockDim.x) {
LocalBlock[SharedIdx + 4 * ib + 0] = tex2D<unsigned char>(
tex, (float)(u + 4 * ib - RADIUS + 0), (float)(v - RADIUS));
LocalBlock[SharedIdx + 4 * ib + 1] = tex2D<unsigned char>(
tex, (float)(u + 4 * ib - RADIUS + 1), (float)(v - RADIUS));
LocalBlock[SharedIdx + 4 * ib + 2] = tex2D<unsigned char>(
tex, (float)(u + 4 * ib - RADIUS + 2), (float)(v - RADIUS));
LocalBlock[SharedIdx + 4 * ib + 3] = tex2D<unsigned char>(
tex, (float)(u + 4 * ib - RADIUS + 3), (float)(v - RADIUS));
}
if (threadIdx.y < RADIUS * 2) {
//
// copy trailing RADIUS*2 rows of pixels into shared
//
SharedIdx = (blockDim.y + threadIdx.y) * SharedPitch;
for (ib = threadIdx.x; ib < BlockWidth + 2 * RADIUS; ib += blockDim.x) {
LocalBlock[SharedIdx + 4 * ib + 0] =
tex2D<unsigned char>(tex, (float)(u + 4 * ib - RADIUS + 0),
(float)(v + blockDim.y - RADIUS));
LocalBlock[SharedIdx + 4 * ib + 1] =
tex2D<unsigned char>(tex, (float)(u + 4 * ib - RADIUS + 1),
(float)(v + blockDim.y - RADIUS));
LocalBlock[SharedIdx + 4 * ib + 2] =
tex2D<unsigned char>(tex, (float)(u + 4 * ib - RADIUS + 2),
(float)(v + blockDim.y - RADIUS));
LocalBlock[SharedIdx + 4 * ib + 3] =
tex2D<unsigned char>(tex, (float)(u + 4 * ib - RADIUS + 3),
(float)(v + blockDim.y - RADIUS));
}
}
cg::sync(cta);
u >>= 2; // index as uchar4 from here
uchar4 *pSobel = (uchar4 *)(((char *)pSobelOriginal) + v * SobelPitch);
SharedIdx = threadIdx.y * SharedPitch;
blockFunction = blockFunction_table[blockOperation];
for (ib = threadIdx.x; ib < BlockWidth; ib += blockDim.x) {
uchar4 out;
unsigned char pix00 = LocalBlock[SharedIdx + 4 * ib + 0 * SharedPitch + 0];
unsigned char pix01 = LocalBlock[SharedIdx + 4 * ib + 0 * SharedPitch + 1];
unsigned char pix02 = LocalBlock[SharedIdx + 4 * ib + 0 * SharedPitch + 2];
unsigned char pix10 = LocalBlock[SharedIdx + 4 * ib + 1 * SharedPitch + 0];
unsigned char pix11 = LocalBlock[SharedIdx + 4 * ib + 1 * SharedPitch + 1];
unsigned char pix12 = LocalBlock[SharedIdx + 4 * ib + 1 * SharedPitch + 2];
unsigned char pix20 = LocalBlock[SharedIdx + 4 * ib + 2 * SharedPitch + 0];
unsigned char pix21 = LocalBlock[SharedIdx + 4 * ib + 2 * SharedPitch + 1];
unsigned char pix22 = LocalBlock[SharedIdx + 4 * ib + 2 * SharedPitch + 2];
out.x = (*blockFunction)(pix00, pix01, pix02, pix10, pix11, pix12, pix20,
pix21, pix22, fScale);
pix00 = LocalBlock[SharedIdx + 4 * ib + 0 * SharedPitch + 3];
pix10 = LocalBlock[SharedIdx + 4 * ib + 1 * SharedPitch + 3];
pix20 = LocalBlock[SharedIdx + 4 * ib + 2 * SharedPitch + 3];
out.y = (*blockFunction)(pix01, pix02, pix00, pix11, pix12, pix10, pix21,
pix22, pix20, fScale);
pix01 = LocalBlock[SharedIdx + 4 * ib + 0 * SharedPitch + 4];
pix11 = LocalBlock[SharedIdx + 4 * ib + 1 * SharedPitch + 4];
pix21 = LocalBlock[SharedIdx + 4 * ib + 2 * SharedPitch + 4];
out.z = (*blockFunction)(pix02, pix00, pix01, pix12, pix10, pix11, pix22,
pix20, pix21, fScale);
pix02 = LocalBlock[SharedIdx + 4 * ib + 0 * SharedPitch + 5];
pix12 = LocalBlock[SharedIdx + 4 * ib + 1 * SharedPitch + 5];
pix22 = LocalBlock[SharedIdx + 4 * ib + 2 * SharedPitch + 5];
out.w = (*blockFunction)(pix00, pix01, pix02, pix10, pix11, pix12, pix20,
pix21, pix22, fScale);
if (pPointFunction != NULL) {
out.x = (*pPointFunction)(out.x, THRESHOLD);
out.y = (*pPointFunction)(out.y, THRESHOLD);
out.z = (*pPointFunction)(out.z, THRESHOLD);
out.w = (*pPointFunction)(out.w, THRESHOLD);
}
if (u + ib < w / 4 && v < h) {
pSobel[u + ib] = out;
}
}
cg::sync(cta);
}
__global__ void SobelCopyImage(Pixel *pSobelOriginal, unsigned int Pitch, int w,
int h, float fscale, cudaTextureObject_t tex) {
unsigned char *pSobel =
(unsigned char *)(((char *)pSobelOriginal) + blockIdx.x * Pitch);
for (int i = threadIdx.x; i < w; i += blockDim.x) {
pSobel[i] = min(
max((tex2D<unsigned char>(tex, (float)i, (float)blockIdx.x) * fscale),
0.f),
255.f);
}
}
// Perform block and pointer filtering using texture lookups.
// The block and point operations are determined by the
// input argument (see comment above for "SobelShared" function)
__global__ void SobelTex(Pixel *pSobelOriginal, unsigned int Pitch, int w,
int h, float fScale, int blockOperation,
pointFunction_t pPointOperation,
cudaTextureObject_t tex) {
unsigned char *pSobel =
(unsigned char *)(((char *)pSobelOriginal) + blockIdx.x * Pitch);
unsigned char tmp = 0;
for (int i = threadIdx.x; i < w; i += blockDim.x) {
unsigned char pix00 =
tex2D<unsigned char>(tex, (float)i - 1, (float)blockIdx.x - 1);
unsigned char pix01 =
tex2D<unsigned char>(tex, (float)i + 0, (float)blockIdx.x - 1);
unsigned char pix02 =
tex2D<unsigned char>(tex, (float)i + 1, (float)blockIdx.x - 1);
unsigned char pix10 =
tex2D<unsigned char>(tex, (float)i - 1, (float)blockIdx.x + 0);
unsigned char pix11 =
tex2D<unsigned char>(tex, (float)i + 0, (float)blockIdx.x + 0);
unsigned char pix12 =
tex2D<unsigned char>(tex, (float)i + 1, (float)blockIdx.x + 0);
unsigned char pix20 =
tex2D<unsigned char>(tex, (float)i - 1, (float)blockIdx.x + 1);
unsigned char pix21 =
tex2D<unsigned char>(tex, (float)i + 0, (float)blockIdx.x + 1);
unsigned char pix22 =
tex2D<unsigned char>(tex, (float)i + 1, (float)blockIdx.x + 1);
tmp = (*(blockFunction_table[blockOperation]))(
pix00, pix01, pix02, pix10, pix11, pix12, pix20, pix21, pix22, fScale);
if (pPointOperation != NULL) {
tmp = (*pPointOperation)(tmp, 150.0);
}
pSobel[i] = tmp;
}
}
extern "C" void setupTexture(int iw, int ih, Pixel *data, int Bpp) {
cudaChannelFormatDesc desc;
if (Bpp == 1) {
desc = cudaCreateChannelDesc<unsigned char>();
} else {
desc = cudaCreateChannelDesc<uchar4>();
}
checkCudaErrors(cudaMallocArray(&array, &desc, iw, ih));
checkCudaErrors(cudaMemcpy2DToArray(
array, 0, 0, data, iw * Bpp * sizeof(Pixel), iw * Bpp * sizeof(Pixel), ih,
cudaMemcpyHostToDevice));
cudaResourceDesc texRes;
memset(&texRes, 0, sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = array;
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
checkCudaErrors(cudaCreateTextureObject(&tex, &texRes, &texDescr, NULL));
}
extern "C" void deleteTexture(void) {
checkCudaErrors(cudaFreeArray(array));
checkCudaErrors(cudaDestroyTextureObject(tex));
}
// Copy the pointers from the function tables to the host side
void setupFunctionTables() {
// Dynamically assign the function table.
// Copy the function pointers to their appropriate locations according to the
// enum
checkCudaErrors(cudaMemcpyFromSymbol(&h_blockFunction_table[SOBEL_FILTER],
pComputeSobel, sizeof(blockFunction_t)));
checkCudaErrors(cudaMemcpyFromSymbol(&h_blockFunction_table[BOX_FILTER],
pComputeBox, sizeof(blockFunction_t)));
// do the same for the point function, where the 2nd function is NULL ("no-op"
// filter, skipped in kernel code)
checkCudaErrors(cudaMemcpyFromSymbol(&h_pointFunction_table[THRESHOLD_FILTER],
pComputeThreshold,
sizeof(pointFunction_t)));
h_pointFunction_table[NULL_FILTER] = NULL;
// now copy the function tables back to the device, so if we wish we can use
// an index into the table to choose them
// We have now set the order in the function table according to our enum.
checkCudaErrors(
cudaMemcpyToSymbol(blockFunction_table, h_blockFunction_table,
sizeof(blockFunction_t) * LAST_BLOCK_FILTER));
checkCudaErrors(
cudaMemcpyToSymbol(pointFunction_table, h_pointFunction_table,
sizeof(pointFunction_t) * LAST_POINT_FILTER));
}
// Wrapper for the __global__ call that sets up the texture and threads
// Below two methods for selecting the image processing function to run are
// shown.
// BlockOperation is an integer kernel argument used as an index into the
// blockFunction_table on the device side
// pPointOp is itself a function pointer passed as a kernel argument, retrieved
// from a host side copy of the function table
extern "C" void sobelFilter(Pixel *odata, int iw, int ih,
enum SobelDisplayMode mode, float fScale,
int blockOperation, int pointOperation) {
pointFunction_t pPointOp = h_pointFunction_table[pointOperation];
switch (mode) {
case SOBELDISPLAY_IMAGE:
SobelCopyImage<<<ih, 384>>>(odata, iw, iw, ih, fScale, tex);
break;
case SOBELDISPLAY_SOBELTEX:
SobelTex<<<ih, 384>>>(odata, iw, iw, ih, fScale, blockOperation, pPointOp,
tex);
break;
case SOBELDISPLAY_SOBELSHARED: {
dim3 threads(16, 4);
#ifndef FIXED_BLOCKWIDTH
int BlockWidth = 80; // must be divisible by 16 for coalescing
#endif
dim3 blocks = dim3(iw / (4 * BlockWidth) + (0 != iw % (4 * BlockWidth)),
ih / threads.y + (0 != ih % threads.y));
int SharedPitch = ~0x3f & (4 * (BlockWidth + 2 * RADIUS) + 0x3f);
int sharedMem = SharedPitch * (threads.y + 2 * RADIUS);
// for the shared kernel, width must be divisible by 4
iw &= ~3;
SobelShared<<<blocks, threads, sharedMem>>>(
(uchar4 *)odata, iw,
#ifndef FIXED_BLOCKWIDTH
BlockWidth, SharedPitch,
#endif
iw, ih, fScale, blockOperation, pPointOp, tex);
} break;
}
}
|
the_stack
|
namespace anakin{
namespace saber{
//const float bbox_clip_default = std::log(1000.0 / 16.0);
template<typename Dtype>
__global__ void ker_nchw_to_nhwc(Dtype * out_data,
const int n,
const int c,
const int hw,
const int row_block_num_per_im,
const Dtype* in_data)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int im_id = blockIdx.y / row_block_num_per_im;
int block_id_y = blockIdx.y % row_block_num_per_im;
int x_index = blockIdx.x * TILE_DIM + threadIdx.x;
int y_index = block_id_y * TILE_DIM + threadIdx.y;
int index_in = im_id * c * hw + x_index + y_index * hw;
if (x_index < hw && y_index < c) {
tile[threadIdx.y][threadIdx.x] = in_data[index_in];
}
__syncthreads();
x_index = block_id_y * TILE_DIM + threadIdx.x;
y_index = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = im_id * hw * c + x_index + y_index * c;
if (x_index < c && y_index < hw) {
out_data[index_out] = tile[threadIdx.x][threadIdx.y];
}
}
template<typename Dtype>
void trans(Tensor<NV>* in_tensor, Tensor<NV>* out_tensor, cudaStream_t stream) {
int n = in_tensor->num();
int c = in_tensor->channel();
int hw = in_tensor->height() * in_tensor->width();
auto in_data = (const Dtype*)in_tensor->data();
auto out_data = (Dtype*)out_tensor->mutable_data();
dim3 block_dim(TILE_DIM, TILE_DIM);
dim3 grid_dim((hw + TILE_DIM -1) / TILE_DIM, n * (c + TILE_DIM -1) / TILE_DIM);
int row_block_num_per_im = (c + TILE_DIM -1) / TILE_DIM;
ker_nchw_to_nhwc<Dtype><<<grid_dim, block_dim, 0, stream>>>(out_data,
n,
c,
hw,
row_block_num_per_im,
in_data);
}
__global__ void index_init(int* out_data, int h, int w) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = idx; i < h * w; i += blockDim.x * gridDim.x) {
int w_id = i % w;
out_data[i] = w_id;
}
}
template <typename Dtype>
void sort_descending(Tensor<NV>* out_value,
Tensor<NV>* out_index,
Tensor<NV>* in_value,
Tensor<NV>* in_index,
const int pre_nms_num,
cudaStream_t stream) {
in_index->reshape(in_value->valid_shape());
out_value->reshape(Shape({in_value->num(), pre_nms_num, 1, 1}, Layout_NCHW));
out_index->reshape(Shape({in_value->num(), pre_nms_num, 1, 1}, Layout_NCHW));
in_index->set_dtype(AK_INT32);
out_index->set_dtype(AK_INT32);
int sort_length = in_value->valid_size() / in_value->num();
index_init<<<CUDA_GET_BLOCKS(in_value->valid_size()), CUDA_NUM_THREADS, 0, stream>>>((int*)in_index->mutable_data(), in_value->num(), sort_length);
Tensor<X86> in_h(in_value->valid_shape());
Tensor<X86> index_h(in_index->valid_shape());
cudaMemcpyAsync(in_h.data(), in_value->data(), sizeof(Dtype) * in_value->valid_size(), cudaMemcpyDeviceToHost, stream);
cudaMemcpyAsync(index_h.data(), in_index->data(), sizeof(int) * in_index->valid_size(), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
auto in_score = (Dtype*)in_h.mutable_data();
auto out_score = (Dtype*) out_value->mutable_data();
auto in_index_data = (int*)index_h.mutable_data();
auto out_index_data = (int *) out_index->mutable_data();
auto compare = [in_score](const int &i, const int &j) {
return in_score[i] > in_score[j];
};
std::vector<Dtype> sorted_scores;
std::vector<int> sorted_index;
for (int i = 0; i < in_value->num(); i++) {
std::partial_sort(in_index_data, in_index_data + pre_nms_num, in_index_data + sort_length, compare);
for (int j = 0; j < pre_nms_num; j++) {
sorted_scores.push_back(in_score[in_index_data[j]]);
sorted_index.push_back(in_index_data[j]);
}
in_score += sort_length;
in_index_data += sort_length;
}
cudaMemcpyAsync(out_index_data, &sorted_index[0], sizeof(int)*out_index->valid_size(), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(out_score, &sorted_scores[0], sizeof(Dtype)*out_value->valid_size(), cudaMemcpyHostToDevice, stream);
}
//template<typename Dtype>
//void sort_descending(Tensor<NV>* out_value,
// Tensor<NV>* out_index,
// Tensor<NV>* in_value,
// Tensor<NV>* in_index,
// cudaStream_t stream) {
// in_index->set_dtype(AK_INT32);
// out_index->set_dtype(AK_INT32);
// in_index->reshape(in_value->valid_shape());
// out_value->reshape(in_value->valid_shape());
// out_index->reshape(in_value->valid_shape());
// auto in_data = (Dtype*)in_value->mutable_data();
// auto out_data = (Dtype*) out_value->mutable_data();
// auto in_index_data = (int*)in_index->mutable_data();
// auto out_index_data = (int *) out_index->mutable_data();
// int sort_length = in_value->valid_size()/in_value->num();
// int count = in_value->valid_size();
// index_init<<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream>>>(in_index_data, in_value->num(), sort_length);
// cudaMemcpyAsync(out_data, in_data, sizeof(Dtype) * in_value->valid_size(), cudaMemcpyDeviceToDevice, stream);
// cudaStreamSynchronize(stream);
//
// size_t temp_storage_bytes = 0;
// void* temp_storage = NULL;
// cub::DoubleBuffer<Dtype> d_keys(in_data, out_data);
// cub::DoubleBuffer<int> d_values(in_index_data, out_index_data);
// cub::DeviceRadixSort::SortPairsDescending<Dtype, int>(
// temp_storage, temp_storage_bytes, d_keys, d_values, sort_length);
// cudaMalloc((void**)&temp_storage, temp_storage_bytes);
// for (int i = 0; i < in_value->num(); i++) {
// cub::DoubleBuffer<Dtype> d_keys(in_data, out_data);
// cub::DoubleBuffer<int> d_values(in_index_data, out_index_data);
// size_t temp_storage_bytes = 0;
// cub::DeviceRadixSort::SortPairsDescending<Dtype, int>(
// temp_storage, temp_storage_bytes, d_keys, d_values, sort_length);
// // thrust::device_vector <Dtype> D(sort_length);
// // thrust::device_vector <int > Index(sort_length);
// // thrust::sequence(Index.begin(), Index.end ());
// // thrust::stable_sort_by_key<Dtype, int>(D.begin(), D.end(), Index.begin, thrust::greater<Dtype>());
//
// //thrust::stable_sort_by_key<Dtype, int>(out_data, out_data + sort_length, out_index_data, thrust::greater<Dtype>());
// in_data += sort_length;
// out_data += sort_length;
// in_index_data += sort_length;
// out_index_data += sort_length;
// }
//}
template <typename T>
__device__ T Min(T a, T b) { return a > b ? b : a; }
template <typename T>
__device__ T Max(T a, T b) { return a > b ? a : b; }
template <typename Dtype>
__global__ void ker_box_decode_and_clip(Dtype* proposals_data,
const Dtype* anchors_data,
const Dtype* deltas_data,
const Dtype* var_data,
const int* index_data,
const Dtype* im_info_data,
const float bbox_clip_default,
const int img_num,
const int index_length,
const int anchor_num,
const int count) {
CUDA_KERNEL_LOOP(tid, count) {
int im_id = tid / index_length;
int anchor_id = index_data[tid];
auto cur_anchor = anchors_data + anchor_id * 4;
auto cur_delta = deltas_data + anchor_id * 4 + im_id * anchor_num * 4;
auto cur_proposal = proposals_data + tid * 5;
auto cur_im_info = im_info_data + im_id * 3;
Dtype axmin = cur_anchor[0];
Dtype aymin = cur_anchor[1];
Dtype axmax = cur_anchor[2];
Dtype aymax = cur_anchor[3];
auto w = axmax - axmin + 1.0;
auto h = aymax - aymin + 1.0;
auto cx = axmin + 0.5 * w;
auto cy = aymin + 0.5 * h;
auto dxmin = cur_delta[0];
auto dymin = cur_delta[1];
auto dxmax = cur_delta[2];
auto dymax = cur_delta[3];
Dtype d_cx, d_cy, d_w, d_h;
if (var_data) {
auto cur_var = var_data + anchor_id * 4;
d_cx = cx + dxmin * w * cur_var[0];
d_cy = cy + dymin * h * cur_var[1];
d_w = exp(Min(dxmax * cur_var[2], bbox_clip_default)) * w;
d_h = exp(Min(dymax * cur_var[3], bbox_clip_default)) * h;
} else {
d_cx = cx + dxmin * w;
d_cy = cy + dymin * h;
d_w = exp(Min(dxmax, bbox_clip_default)) * w;
d_h = exp(Min(dymax, bbox_clip_default)) * h;
}
auto oxmin = d_cx - d_w * 0.5;
auto oymin = d_cy - d_h * 0.5;
auto oxmax = d_cx + d_w * 0.5 - 1.;
auto oymax = d_cy + d_h * 0.5 - 1.;
cur_proposal[0] = im_id;
cur_proposal[1] = Max(Min(oxmin, cur_im_info[1] - 1.), 0.);
cur_proposal[2] = Max(Min(oymin, cur_im_info[0] - 1.), 0.);
cur_proposal[3] = Max(Min(oxmax, cur_im_info[1] - 1.), 0.);
cur_proposal[4] = Max(Min(oymax, cur_im_info[0] - 1.), 0.);
}
}
template<typename Dtype>
void box_decode_and_clip(Tensor<NV>* proposals,
const Tensor<NV>* anchors,
const Tensor<NV>* deltas,
const Tensor<NV>* variances,
const Tensor<NV>* index,
const Tensor<NV>* im_info,
cudaStream_t stream) {
int img_num = index->num();
int anchor_num = anchors->valid_size() / 4;
auto anchors_data = (const Dtype*)anchors->data();
auto deltas_data = (const Dtype*) deltas->data();
auto var_data = (const Dtype*) variances->data();
auto index_data = (const int*) index->data();
auto im_info_data = (const Dtype*) im_info->data();
int index_valid_size = index->valid_size();
int index_length = index->channel();
proposals->reshape(Shape({img_num * index_length, 5, 1, 1}));
auto proposals_data = (Dtype*) proposals->mutable_data();
const float bbox_clip_default = std::log(1000.0 / 16.0);
ker_box_decode_and_clip<Dtype><<<CUDA_GET_BLOCKS(index_valid_size), CUDA_NUM_THREADS, 0, stream>>>(
proposals_data, anchors_data, deltas_data, var_data, index_data,
im_info_data, bbox_clip_default, img_num, index_length, anchor_num, index->valid_size());
}
template<typename Dtype>
__global__ void ker_filter_bboxes(
int *keep,
int *keep_num,
const Dtype* bboxes,
const Dtype* im_info,
const Dtype min_size,
const int img_num,
const int pre_nms_num) {
int im_id = blockIdx.x;
Dtype im_h = im_info[0];
Dtype im_w = im_info[1];
Dtype im_scale = im_info[2];
int cnt = 0;
__shared__ int keep_index[CUDA_NUM_THREADS];
for (int tid = threadIdx.x; tid < pre_nms_num; tid += blockDim.x) {
keep_index[threadIdx.x] = -1;
__syncthreads();
auto bboxes_tmp = bboxes + (tid + blockIdx.x * pre_nms_num) * 5;
Dtype xmin = bboxes_tmp[1];
Dtype ymin = bboxes_tmp[2];
Dtype xmax = bboxes_tmp[3];
Dtype ymax = bboxes_tmp[4];
Dtype w = xmax - xmin + 1.0;
Dtype h = ymax - ymin + 1.0;
Dtype cx = xmin + w / 2.;
Dtype cy = ymin + h / 2.;
Dtype w_s = (xmax - xmin) / im_scale + 1.;
Dtype h_s = (ymax - ymin) / im_scale + 1.;
if (w_s >= min_size && h_s >= min_size && cx <= im_w && cy <= im_h) {
keep_index[threadIdx.x] = tid;
}
__syncthreads();
if (threadIdx.x == 0) {
int size = (pre_nms_num - tid) < CUDA_NUM_THREADS ? pre_nms_num - tid : CUDA_NUM_THREADS;
for (int j = 0; j < size; ++j) {
if (keep_index[j] > -1) {
keep[im_id * pre_nms_num + cnt++] = keep_index[j];
}
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
keep_num[im_id] = cnt;
}
}
template<typename Dtype>
void filter_bboxes(Tensor<NV>* keep_num,
Tensor<NV>* keep,
Tensor<NV>* proposals,
Tensor<NV>* im_info,
const Dtype min_size,
const int img_num,
const int pre_nms_num,
cudaStream_t stream) {
keep_num->reshape(Shape({img_num, 1, 1, 1}, Layout_NCHW));
keep->reshape(Shape({img_num, pre_nms_num, 1, 1}, Layout_NCHW));
keep->set_dtype(AK_INT32);
keep_num->set_dtype(AK_INT32);
auto proposals_data = (const Dtype*)proposals->data();
auto im_info_data = (const Dtype*)im_info->data();
auto keep_num_data = (int*)keep_num->data();
auto keep_data = (int*)keep->data();
Dtype min_size_final = std::max(min_size, 1.0f);
ker_filter_bboxes<Dtype><<<img_num, CUDA_NUM_THREADS, 0, stream>>>(
keep_data,
keep_num_data,
proposals_data,
im_info_data,
min_size_final,
img_num,
pre_nms_num);
}
template <typename Dtype>
__device__ inline Dtype IoU(const Dtype *a, const Dtype *b) {
Dtype left = max(a[0], b[0]), right = min(a[2], b[2]);
Dtype top = max(a[1], b[1]), bottom = min(a[3], b[3]);
Dtype width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
Dtype inter_s = width * height;
Dtype s_a = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
Dtype s_b = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return inter_s / (s_a + s_b - inter_s);
}
__global__ void NMSKernel(uint64_t *dev_mask,
const int n_boxes,
const int* keep_index,
const float nms_overlap_thresh,
const int col_blocks,
const float *dev_boxes) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int row_size =
min(n_boxes - row_start * NMS_THREADS_PER_BLOCK, NMS_THREADS_PER_BLOCK);
const int col_size =
min(n_boxes - col_start * NMS_THREADS_PER_BLOCK, NMS_THREADS_PER_BLOCK);
__shared__ float block_boxes[NMS_THREADS_PER_BLOCK * 4];
if (threadIdx.x < col_size) {
int box_id = keep_index[NMS_THREADS_PER_BLOCK * col_start + threadIdx.x];
block_boxes[threadIdx.x * 4 + 0] = dev_boxes[box_id * 5 + 1];
block_boxes[threadIdx.x * 4 + 1] = dev_boxes[box_id * 5 + 2];
block_boxes[threadIdx.x * 4 + 2] = dev_boxes[box_id * 5 + 3];
block_boxes[threadIdx.x * 4 + 3] = dev_boxes[box_id * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = NMS_THREADS_PER_BLOCK * row_start + threadIdx.x;
const float *cur_box = dev_boxes + keep_index[cur_box_idx] * 5 + 1;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (IoU(cur_box, block_boxes + i * 4) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
template <typename Dtype>
void NMS(Tensor<NV> *keep_out,
const Tensor<NV> *proposals,
const int boxes_num,
const int* keep_index,
const Dtype nms_threshold,
const int post_nms_top_n,
cudaStream_t stream) {
const int col_blocks = DIVUP(boxes_num, NMS_THREADS_PER_BLOCK);
dim3 blocks(DIVUP(boxes_num, NMS_THREADS_PER_BLOCK),
DIVUP(boxes_num, NMS_THREADS_PER_BLOCK));
dim3 threads(NMS_THREADS_PER_BLOCK);
keep_out->set_dtype(AK_INT32);
Tensor<NV> mask(Shape({boxes_num, col_blocks, 1, 1}, Layout_NCHW), AK_UINT64);
auto boxes_data = (const Dtype*)proposals->data();
auto mask_data = (uint64_t*) mask.mutable_data();
NMSKernel<<<blocks, threads, 0, stream>>>(mask_data,
boxes_num, keep_index, nms_threshold, col_blocks, boxes_data);
Tensor<X86> mask_h(Shape({boxes_num, col_blocks, 1, 1}, Layout_NCHW), AK_UINT64);
auto mask_data_h = (uint64_t*) mask_h.mutable_data();
cudaMemcpyAsync(mask_data_h, mask_data, sizeof(uint64_t) * mask.valid_size(), cudaMemcpyDeviceToHost, stream);
std::vector<int> keep_index_h(boxes_num);
cudaMemcpyAsync(keep_index_h.data(), keep_index, sizeof(int)* boxes_num, cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
std::vector<int> keep_vec;
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / NMS_THREADS_PER_BLOCK;
int inblock = i % NMS_THREADS_PER_BLOCK;
if (num_to_keep >= post_nms_top_n) {
break;
}
if (!(remv[nblock] & (1ULL << inblock))) {
++num_to_keep;
keep_vec.push_back(keep_index_h[i]);
uint64_t *p = mask_data_h + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
keep_out->reshape(Shape({num_to_keep, 1, 1, 1}, Layout_NCHW));
cudaMemcpyAsync(keep_out->mutable_data(), &keep_vec[0], sizeof(int)*num_to_keep, cudaMemcpyHostToDevice, stream);
}
template <typename Dtype>
__global__ void ker_gather(Dtype* boxes_out,
const Dtype* proposals,
const int box_num,
const int box_dim,
const int* keep_index) {
CUDA_KERNEL_LOOP(tid, box_num * box_dim) {
int box_id = tid / box_dim;
int dim_id = tid % box_dim;
boxes_out[tid] = proposals[keep_index[box_id] * box_dim + dim_id];
}
}
template <typename Dtype>
void gather_box(Tensor<NV> *boxes_out,
const Tensor<NV>*proposals,
const int* index,
const int num,
cudaStream_t stream) {
const Dtype* proposals_data = (const Dtype*) proposals->data();
boxes_out->reshape(std::vector<int>{num, 5, 1, 1});
Dtype* boxes_out_data = (Dtype*) boxes_out->mutable_data();
ker_gather<Dtype><<<CUDA_GET_BLOCKS(boxes_out->valid_size()), CUDA_NUM_THREADS, 0, stream>>>(boxes_out_data, proposals_data, num, 5, index);
}
template <typename Dtype>
void gather_score(Tensor<NV> *scores_out,
const Tensor<NV>*scores,
const int* index,
const int num,
cudaStream_t stream) {
const Dtype* scores_data = (const Dtype*) scores->data();
scores_out->reshape(Shape({num, 1, 1, 1}, Layout_NCHW));
Dtype* scores_out_data = (Dtype*) scores_out->mutable_data();
ker_gather<Dtype><<<CUDA_GET_BLOCKS(scores_out->valid_size()), CUDA_NUM_THREADS, 0, stream>>>(scores_out_data, scores_data, num, 1, index);
}
template <DataType OpDtype>
SaberStatus SaberGenerateProposals<NV, OpDtype>::dispatch( \
const std::vector<Tensor<NV>*>& inputs,
std::vector<Tensor<NV>*>& outputs,
GenerateProposalsParam<NV>& param) {
cudaStream_t cuda_stream = this->_ctx->get_compute_stream();
auto anchors = *inputs[0];
auto bbox_deltas = *inputs[1];
auto im_info = *inputs[2];
auto scores = *inputs[3];
auto variances = *inputs[4];
auto rpn_rois = outputs[0];
auto rpn_roi_probs = outputs[1];
int pre_nms_top_n = param.pre_nms_top_n;
int post_nms_top_n = param.post_nms_top_n;
float nms_threshold = param.nms_thresh;
float min_size = param.min_size;
float eta = param.eta;
CHECK_EQ(eta, 1.0f) << "eta is not equal to 1, now other param has not been supported";
Shape scores_shape = scores.valid_shape();
Shape scores_swap_shape({scores_shape[0], scores_shape[2], scores_shape[3] , scores_shape[1]}, Layout_NCHW);
Shape bbox_deltas_shape = bbox_deltas.valid_shape();
Shape bbox_deltas_swap_shape({bbox_deltas_shape[0], bbox_deltas_shape[2],
bbox_deltas_shape[3] , bbox_deltas_shape[1]}, Layout_NCHW);
_scores_swap.reshape(scores_swap_shape);
_bbox_deltas_swap.reshape(bbox_deltas_swap_shape);
/*swap and sort*/
trans<OpDataType>(&scores, &_scores_swap, cuda_stream);
trans<OpDataType>(&bbox_deltas, &_bbox_deltas_swap, cuda_stream);
cudaStreamSynchronize(cuda_stream);
int bbox_num = bbox_deltas.valid_size() / 4;
rpn_rois->reshape(std::vector<int>{post_nms_top_n, 5, 1, 1});
rpn_roi_probs->reshape(std::vector<int>{post_nms_top_n, 1, 1, 1});
int pre_nms_num = (_scores_swap.valid_size() <= 0 || _scores_swap.valid_size() > pre_nms_top_n) ? pre_nms_top_n : _scores_swap.valid_size();
int img_num = _scores_swap.num();
sort_descending<OpDataType>(&_sorted_scores, &_sorted_index, &_scores_swap, &_scores_index, pre_nms_num, cuda_stream);
// 2. box decode and clipping
box_decode_and_clip<OpDataType>(&_proposals,
&anchors, &_bbox_deltas_swap,
&variances,
&_sorted_index,
&im_info,
cuda_stream);
// 3. filter bbox
filter_bboxes<OpDataType>(&_keep_num, &_keep, &_proposals, &im_info,
min_size, img_num, pre_nms_num,
cuda_stream);
// 4. NMS
std::vector<int> keep_num_vec;
keep_num_vec.resize(img_num);
cudaMemcpyAsync(&keep_num_vec[0], _keep_num.data(), sizeof(int)*img_num, cudaMemcpyDeviceToHost, cuda_stream);
int total_boxes = 0;
std::vector<int> seq_offset;
seq_offset.push_back(0);
for (int i = 0; i < img_num; i++) {
Shape score_slice_shape = _sorted_scores.valid_shape();
Shape proposals_slice_shape = _proposals.valid_shape();
proposals_slice_shape[0] = pre_nms_num;
score_slice_shape[0] = 1;
Tensor<NV> sorted_scores_slice((void*)((OpDataType*)_sorted_scores.mutable_data() + i * _sorted_scores.get_stride()[0]), NV(), this->_ctx->get_device_id(), score_slice_shape);
Tensor<NV> proposals_slice((void*)((OpDataType*)_proposals.mutable_data() + i * pre_nms_num * _proposals.get_stride()[0]), NV(), this->_ctx->get_device_id(), proposals_slice_shape);
auto keep_data = (const int*)_keep.data() + i * pre_nms_num;
auto keep_num = keep_num_vec[i];
if (nms_threshold <= 0) {
gather_box<OpDataType>(&_boxes_out, &proposals_slice, keep_data, keep_num, cuda_stream);
gather_score<OpDataType>(&_scores_out, &sorted_scores_slice, keep_data, keep_num, cuda_stream);
total_boxes += keep_num;
} else {
NMS<OpDataType>(&_keep_nms, &proposals_slice, keep_num, keep_data, nms_threshold, post_nms_top_n, cuda_stream);
auto keep_nms_data = (const int*)_keep_nms.data();
auto keep_nms_num = _keep_nms.valid_size();
gather_box<OpDataType>(&_boxes_out, &proposals_slice, keep_nms_data, keep_nms_num, cuda_stream);
gather_score<OpDataType>(&_scores_out, &sorted_scores_slice, keep_nms_data, keep_nms_num, cuda_stream);
}
cudaMemcpyAsync((OpDataType*)rpn_rois->mutable_data() + total_boxes * 5,
(const OpDataType*)_boxes_out.data(),
sizeof(OpDataType) * _boxes_out.valid_size(),
cudaMemcpyDefault,
cuda_stream);
cudaMemcpyAsync((OpDataType*)rpn_roi_probs->mutable_data() + total_boxes,
(const OpDataType*)_scores_out.data(),
sizeof(OpDataType) * _scores_out.valid_size(),
cudaMemcpyDefault,
cuda_stream);
total_boxes += _keep_nms.valid_size();
seq_offset.push_back(total_boxes);
}
rpn_rois->reshape(std::vector<int>{total_boxes, 5, 1, 1});
rpn_roi_probs->reshape(std::vector<int>{total_boxes, 1, 1, 1});
rpn_rois->set_seq_offset({seq_offset});
CUDA_POST_KERNEL_CHECK;
return SaberSuccess;
}
template class SaberGenerateProposals<NV, AK_FLOAT>;
DEFINE_OP_TEMPLATE(SaberGenerateProposals, GenerateProposalsParam, NV, AK_INT8);
DEFINE_OP_TEMPLATE(SaberGenerateProposals, GenerateProposalsParam, NV, AK_HALF);
}
}
|
the_stack
|
#pragma once
#include <ptx_primitives.cuh>
template<int num_uints, int cols, int rows>
struct IBitMask;
template<typename T, int cols, int rows>
struct RowRepeater
{
static constexpr T Repeater = (RowRepeater<T, cols, rows - 1>::Repeater << cols) | 1;
};
template<typename T, int cols>
struct RowRepeater<T, cols, 1>
{
static constexpr T Repeater = 1;
};
template<typename T, int cols, int rows, int offset, int initoffset = 0>
struct RowRepeaterOffset
{
static constexpr T Repeater = RowRepeaterOffset<T, cols, (rows - offset > 0 ? rows - offset : 0), offset, cols*offset + initoffset>::Repeater | (static_cast<T>(1u) << static_cast<T>(initoffset));
};
template<typename T, int cols, int offset, int initoffset >
struct RowRepeaterOffset<T, cols, 0, offset, initoffset>
{
static constexpr T Repeater = 0;
};
template<int cols, int rows>
struct IBitMask<1, cols, rows>
{
static constexpr int Cols = cols;
static constexpr int Rows = rows;
unsigned int mask;
IBitMask() = default;
__device__
IBitMask(unsigned int mask)
: mask(mask)
{
}
__device__
void setOn()
{
mask = 0xFFFFFFFFU;
}
__device__
void unmarkBits(int from, int to)
{
int num = to - from;
unsigned int tmask = (0xFFFFFFFFU >> (32 - num)) << from;
mask = mask & ~tmask;
}
__device__
void unmarkRow(int row, unsigned int rowmask)
{
mask = mask & (~(rowmask << (cols*row)));
}
__device__
void unmark(const IBitMask& other)
{
mask = mask & (~other.mask);
}
__device__
void unmarkStride(int begin, int end)
{
mask = mask & (~(((0x1U << (end - begin)) - 1) << begin));
}
__device__
void andStride(int begin, int end)
{
mask = mask & (((0x1U << (end - begin)) - 1) << begin);
}
__device__
void repeatRow(int begin, int end)
{
unsigned int row = ((0x1U << (end - begin)) - 1) << begin;
mask = RowRepeater<unsigned int, cols, rows>::Repeater * row;
}
__device__
bool isset(int col, int row) const
{
return 1U & (mask >> (cols*row + col));
}
__device__
int count() const
{
return __popc(mask);
}
__device__
int2 getBitCoordsWarp(int setBitId) const
{
return bitToCoord(getSetBitWarp(setBitId));
}
__device__
int getSetBitWarp(int setBitId) const
{
// find the thread that has the right number of bits set
return __ffs(__ballot_sync(~0U, __popc(mask & lanemask_lt()) == setBitId)) - 1;
}
__device__
int2 getBitCoords(int setBitId) const
{
return bitToCoord(getSetBit(setBitId));
}
__device__
int getSetBit(int setBitId) const
{
// find the nth set bit
int invset = __popc(mask) - setBitId - 1;
unsigned int p = 16;
#pragma unroll
for (unsigned int offset = p / 2; offset > 0; offset /= 2)
p = (__popc(mask >> p) <= invset) ? (p - offset) : (p + offset);
p = (__popc(mask >> p) == invset) ? (p - 1) : p;
return p;
}
__device__
static int2 bitToCoord(int bit)
{
return make_int2(bit % cols, bit / cols);
}
__device__
IBitMask shfl(int i, int Mod = WARP_SIZE) const
{
IBitMask other;
other.mask = __shfl_sync(~0U, mask, i, Mod);
return other;
}
__device__ bool overlap(IBitMask other) const
{
return (mask & other.mask) != 0U;
}
__device__
static IBitMask Empty()
{
IBitMask e;
e.mask = 0x00000000U;
return e;
}
static constexpr unsigned int SecondRowMask = RowRepeaterOffset<unsigned int, cols, rows, 2, 0>::Repeater * ((1u << cols) - 1);
static constexpr unsigned int Rights = RowRepeater<unsigned int, 2, cols*rows / 2>::Repeater;
static constexpr unsigned int Lefts = Rights << 1u;
//__device__
//int countQuads() const
//{
// //merge every second row
// unsigned int rowmerged = (mask & SecondRowMask) | ((mask >> cols) & SecondRowMask);
// ////mask out the right ones and shift to the left
// //unsigned int expandedLeft = (rowmerged & Lefts) << 1u;
// ////mask out the left ones and shift to the right
// //unsigned int expandedRight = (rowmerged & Rights) >> 1u;
// //unsigned int quadsDouble = expandedLeft | rowmerged | expandedRight;
// //return __popc(quadsDouble) / 2;
// unsigned int quadMask = (rowmerged & Rights) | ((rowmerged & Lefts) >> 1u)
//}
__device__
IBitMask quadMask() const
{
unsigned int rowmerged = (mask & SecondRowMask) | ((mask >> cols) & SecondRowMask);
unsigned int quadMask = (rowmerged & Rights) | ((rowmerged & Lefts) >> 1u);
IBitMask res;
res.mask = quadMask;
return res;
}
__device__
IBitMask operator ^=(const IBitMask& b)
{
mask ^= b.mask;
return *this;
}
__device__
IBitMask operator &=(const IBitMask& b)
{
mask &= b.mask;
return *this;
}
__device__
IBitMask operator |=(const IBitMask& b)
{
mask |= b.mask;
return *this;
}
__device__
IBitMask friend operator ~(const IBitMask& b)
{
return { ~b.mask };
}
__device__
IBitMask friend operator ^(IBitMask a, const IBitMask& b)
{
return a ^= b;
}
__device__
IBitMask friend operator &(IBitMask a, const IBitMask& b)
{
return a &= b;
}
__device__
IBitMask friend operator |(IBitMask a, const IBitMask& b)
{
return a |= b;
}
};
template<int cols, int rows>
struct IBitMask<2, cols, rows>
{
static constexpr int Cols = cols;
static constexpr int Rows = rows;
unsigned long long mask;
IBitMask() = default;
__device__
IBitMask(unsigned long long mask)
: mask(mask)
{
}
__device__
void setOn()
{
mask = 0xFFFFFFFFFFFFFFFFULL;
}
__device__
void unmarkBits(int from, int to)
{
int num = to - from;
unsigned long long int tmask = (0xFFFFFFFFFFFFFFFFULL >> (64 - num)) << from;
mask = mask & (~tmask);
}
__device__
void unmarkRow(int row, unsigned long long int rowmask)
{
mask = mask & (~(rowmask << (cols*row)));
}
__device__
void unmark(const IBitMask& other)
{
mask = mask & (~other.mask);
}
__device__
void unmarkStride(int begin, int end)
{
mask = mask & (~(((0x1ULL << (end - begin)) - 1) << begin));
}
__device__
void andStride(int begin, int end)
{
mask = mask & (((0x1ULL << (end - begin)) - 1) << begin);
}
__device__
void repeatRow(int begin, int end)
{
unsigned int row = ((0x1U << (end - begin)) - 1) << begin;
mask = RowRepeater<unsigned long long int, cols, rows>::Repeater * row;
}
__device__
bool isset(int col, int row) const
{
return 1ULL & (mask >> (cols*row + col));
}
__device__
int count() const
{
return __popcll(mask);
}
__device__
int2 getBitCoordsWarp(int setBitId) const
{
return bitToCoord(getSetBitWarp(setBitId));
}
__device__
int getSetBitWarp(int setBitId) const
{
// find the thread that has the right number of bits set
unsigned int lower = mask & 0xFFFFFFFFU;
unsigned int lowernum = __popc(lower);
bool is_in_high = lowernum <= setBitId;
unsigned int checkmask = is_in_high ? (mask >> 32U) : lower;
setBitId = is_in_high ? setBitId - lowernum : setBitId;
int fieldoffset = is_in_high ? 32 : 0;
int offset = fieldoffset + __ffs(__ballot_sync(~0U, __popc(checkmask & lanemask_le()) == setBitId + 1)) - 1;
return offset;
}
__device__
int2 getBitCoords(int setBitId) const
{
return bitToCoord(getSetBit(setBitId));
}
__device__
int getSetBit(int setBitId) const
{
// find the nth set bit
int invset = __popcll(mask) - setBitId - 1;
unsigned int p = 32;
#pragma unroll
for (unsigned int offset = p / 2; offset > 0; offset /= 2)
p = (__popcll(mask >> p) <= invset) ? (p - offset) : (p + offset);
p = (__popcll(mask >> p) == invset) ? (p - 1) : p;
return p;
}
__device__
static int2 bitToCoord(int bit)
{
return make_int2(bit % cols, bit / cols);
}
__device__
IBitMask shfl(int i, int Mod = WARP_SIZE) const
{
IBitMask other;
other.mask = (static_cast<unsigned long long int>(__shfl_sync(~0U, static_cast<unsigned int>(mask >> 32), i, Mod)) << 32U) |
static_cast<unsigned long long int>(__shfl_sync(~0U, static_cast<unsigned int>(mask & 0xFFFFFFFFULL), i, Mod));
return other;
}
__device__
bool overlap(IBitMask other) const
{
return (mask & other.mask) != 0ULL;
}
__device__
static IBitMask Empty()
{
IBitMask e;
e.mask = 0x0000000000000000ULL;
return e;
}
static constexpr unsigned long long int SecondRowMask = RowRepeaterOffset<unsigned long long int, cols, rows, 2, 0>::Repeater * ((1u << cols) - 1);
static constexpr unsigned long long int Rights = RowRepeater<unsigned long long int, 2, cols*rows / 2>::Repeater;
static constexpr unsigned long long int Lefts = Rights << 1u;
__device__
IBitMask quadMask() const
{
unsigned long long int rowmerged = (mask & SecondRowMask) | ((mask >> cols) & SecondRowMask);
unsigned long long int quadMask = (rowmerged & Rights) | ((rowmerged & Lefts) >> 1u);
IBitMask res;
res.mask = quadMask;
return res;
}
__device__
IBitMask operator ^=(const IBitMask& b)
{
mask ^= b.mask;
return *this;
}
__device__
IBitMask operator &=(const IBitMask& b)
{
mask &= b.mask;
return *this;
}
__device__
IBitMask operator |=(const IBitMask& b)
{
mask |= b.mask;
return *this;
}
__device__
IBitMask friend operator ~(const IBitMask& b)
{
return { ~b.mask };
}
__device__
IBitMask friend operator ^(IBitMask a, const IBitMask& b)
{
return a ^= b;
}
__device__
IBitMask friend operator &(IBitMask a, const IBitMask& b)
{
return a &= b;
}
__device__
IBitMask friend operator |(IBitMask a, const IBitMask& b)
{
return a |= b;
}
};
template <class BinTileSpace>
class TileBitMask : public IBitMask<static_divup<BinTileSpace::TilesPerBinX*BinTileSpace::TilesPerBinY, 32>::value, BinTileSpace::TilesPerBinX, BinTileSpace::TilesPerBinY>
{
public:
TileBitMask() = default;
__device__ TileBitMask(const IBitMask<static_divup<BinTileSpace::TilesPerBinX*BinTileSpace::TilesPerBinY, 32>::value, BinTileSpace::TilesPerBinX, BinTileSpace::TilesPerBinY> & other)
{
mask = other.mask;
}
__device__
bool operator == (const TileBitMask & other)
{
return mask == other.mask;
}
};
template <class BinTileSpace>
class StampBitMask : public IBitMask<static_divup<BinTileSpace::StampsPerTileX*BinTileSpace::StampsPerTileY, 32>::value, BinTileSpace::StampsPerTileX, BinTileSpace::StampsPerTileY>
{
public:
StampBitMask() = default;
__device__ StampBitMask(const IBitMask<static_divup<BinTileSpace::StampsPerTileX*BinTileSpace::StampsPerTileY, 32>::value, BinTileSpace::StampsPerTileX, BinTileSpace::StampsPerTileY> & other)
{
mask = other.mask;
}
__device__
bool operator == (const StampBitMask & other)
{
return mask == other.mask;
}
};
#endif
|
the_stack
|
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <stdio.h>
#include <vector>
#include <math.h>
#include <float.h>
#include "rroi_align_kernel.h"
#define DIVUP(m, n) ((m) / (m) + ((m) % (n) > 0))
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// /*
// rroi代码
template <typename scalar_t>
__global__ void RROIAlignForward(
const int nthreads,
const scalar_t* bottom_data,
const float spatial_scale,
const int height,
const int width,
const int channels,
const int pooled_height,
const int pooled_width,
const scalar_t* bottom_rois,
scalar_t* top_data,
scalar_t* con_idx_x,
scalar_t* con_idx_y)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
// +0.5 shift removed
int imageWidth = width;
int imageHeight = height;
// (n, c, ph, pw) is an element in the pooled output
int n = index;
int pw = n % pooled_width;
n /= pooled_width;
int ph = n % pooled_height;
n /= pooled_height;
int c = n % channels;
n /= channels;
const scalar_t* offset_bottom_rois = bottom_rois + n * 6; // 标注信息
int roi_batch_ind = offset_bottom_rois[0];
scalar_t cx = offset_bottom_rois[1];
scalar_t cy = offset_bottom_rois[2];
scalar_t h = offset_bottom_rois[3];
scalar_t w = offset_bottom_rois[4];
scalar_t angle = offset_bottom_rois[5]/180.0*3.1415926535;
//TransformPrepare
scalar_t roi_pooled_width = pooled_height * w / h; // 不同的高宽比
scalar_t dx = -roi_pooled_width/2.0;
scalar_t dy = -pooled_height/2.0;
scalar_t Sx = w*spatial_scale/roi_pooled_width;
scalar_t Sy = h*spatial_scale/pooled_height;
scalar_t Alpha = cos(angle);
scalar_t Beta = sin(angle);
scalar_t Dx = cx*spatial_scale;
scalar_t Dy = cy*spatial_scale;
scalar_t M[2][3]; // 旋转矩阵
M[0][0] = Alpha*Sx;
M[0][1] = Beta*Sy;
M[0][2] = Alpha*Sx*dx+Beta*Sy*dy+Dx;
M[1][0] = -Beta*Sx;
M[1][1] = Alpha*Sy;
M[1][2] = -Beta*Sx*dx+Alpha*Sy*dy+Dy;
scalar_t P[8]; // 求原roi中4个点的坐标8个值
P[0] = M[0][0]*pw+M[0][1]*ph+M[0][2];
P[1] = M[1][0]*pw+M[1][1]*ph+M[1][2];
P[2] = M[0][0]*pw+M[0][1]*(ph+1)+M[0][2];
P[3] = M[1][0]*pw+M[1][1]*(ph+1)+M[1][2];
P[4] = M[0][0]*(pw+1)+M[0][1]*ph+M[0][2];
P[5] = M[1][0]*(pw+1)+M[1][1]*ph+M[1][2];
P[6] = M[0][0]*(pw+1)+M[0][1]*(ph+1)+M[0][2];
P[7] = M[1][0]*(pw+1)+M[1][1]*(ph+1)+M[1][2];
// 求原rroi的中心,并用双线性插值求出f(x,y)
scalar_t leftMost = (max(round(min(min(P[0],P[2]),min(P[4],P[6]))),0.0));
scalar_t rightMost= (min(round(max(max(P[0],P[2]),max(P[4],P[6]))),imageWidth-1.0));
scalar_t topMost= (max(round(min(min(P[1],P[3]),min(P[5],P[7]))),0.0));
scalar_t bottomMost= (min(round(max(max(P[1],P[3]),max(P[5],P[7]))),imageHeight-1.0));
const scalar_t* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
scalar_t bin_cx = (leftMost + rightMost) / 2.0; // rroi的中心
scalar_t bin_cy = (topMost + bottomMost) / 2.0;
const bool in_rroi = pw <= roi_pooled_width; // 是否在rroi之内
if (in_rroi){
int bin_l = (int)floor(bin_cx);
int bin_r = (int)ceil(bin_cx);
int bin_t = (int)floor(bin_cy);
int bin_b = (int)ceil(bin_cy);
scalar_t lt_value = 0.0;
if (bin_t > 0 && bin_l > 0 && bin_t < height && bin_l < width)
lt_value = offset_bottom_data[bin_t * width + bin_l];
scalar_t rt_value = 0.0;
if (bin_t > 0 && bin_r > 0 && bin_t < height && bin_r < width)
rt_value = offset_bottom_data[bin_t * width + bin_r];
scalar_t lb_value = 0.0;
if (bin_b > 0 && bin_l > 0 && bin_b < height && bin_l < width)
lb_value = offset_bottom_data[bin_b * width + bin_l];
scalar_t rb_value = 0.0;
if (bin_b > 0 && bin_r > 0 && bin_b < height && bin_r < width)
rb_value = offset_bottom_data[bin_b * width + bin_r];
scalar_t rx = bin_cx - floor(bin_cx);
scalar_t ry = bin_cy - floor(bin_cy);
scalar_t wlt = (1.0 - rx) * (1.0 - ry);
scalar_t wrt = rx * (1.0 - ry);
scalar_t wrb = rx * ry;
scalar_t wlb = (1.0 - rx) * ry;
scalar_t inter_val = 0.0;
inter_val += lt_value * wlt;
inter_val += rt_value * wrt;
inter_val += rb_value * wrb;
inter_val += lb_value * wlb;
atomicAdd(top_data + index, static_cast<float>(inter_val));
atomicAdd(con_idx_x + index, static_cast<float>(bin_cx));
atomicAdd(con_idx_y + index, static_cast<float>(bin_cy));
//top_data[index] = static_cast<float>(inter_val);
//con_idx_x[index] = bin_cx;
//con_idx_y[index] = bin_cy;
}
else{
// float inter_val = 0.0;
// float bin_cx = 0.0; // -2只是为了反向传播时做标记,其他值也是可以的
// float bin_cy = 0.0;
// atomicAdd(top_data + index, static_cast<float>(inter_val)); // 可能多个点加了-2
// atomicAdd(con_idx_x + index, static_cast<float>(bin_cx));
// atomicAdd(con_idx_y + index, static_cast<float>(bin_cy));
continue;
}
}
}
// 反向传播
template <typename scalar_t>
__global__ void RROIAlignBackward(
const int nthreads,
const scalar_t* top_diff,
const scalar_t* con_idx_x,
const scalar_t* con_idx_y,
const int num_rois,
const float spatial_scale,
const int height,
const int width,
const int channels,
const int pooled_height,
const int pooled_width,
scalar_t* bottom_diff,
const scalar_t* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads){
// (n, c, ph, pw) is an element in the pooled output
// int n = index;
// //int w = n % width;
// n /= pooled_width;
// //int h = n % height;
// n /= pooled_height;
// int c = n % channels;
// n /= channels;
int n = index;
int pw = n % pooled_width;
n /= pooled_width;
// int ph = n % pooled_height;
n /= pooled_height;
int c = n % channels;
n /= channels;
const scalar_t* offset_bottom_rois = bottom_rois + n * 6; // 第i个rroi
int roi_batch_ind = offset_bottom_rois[0];
scalar_t h = offset_bottom_rois[3];
scalar_t w = offset_bottom_rois[4];
scalar_t* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; // 反向梯度的索引
scalar_t bin_cx = con_idx_x[index]; // 每个rroi中心点的坐标
scalar_t bin_cy = con_idx_y[index];
// check whether in rroi
float roi_pooled_width = pooled_height * w / h; // 不同的高宽比
const bool not_in_rroi = (pw > roi_pooled_width); // 可能多个点多次加了-2, 所以不能采用这种方式判断
if (not_in_rroi){ // 如果不再rroi内则跳过当前循环,否则就按原来的操作
continue;
}
else{
scalar_t rx = bin_cx - floor(bin_cx);
scalar_t ry = bin_cy - floor(bin_cy);
scalar_t wlt = (1.0 - rx) * (1.0 - ry);
scalar_t wrt = rx * (1.0 - ry);
scalar_t wrb = rx * ry;
scalar_t wlb = (1.0 - rx) * ry;
int min_x = (int)floor(bin_cx);
int max_x = (int)ceil(bin_cx);
int min_y = (int)floor(bin_cy);
int max_y = (int)ceil(bin_cy);
scalar_t top_diff_of_bin = top_diff[index];
scalar_t v1 = wlt * top_diff_of_bin;
scalar_t v2 = wrt * top_diff_of_bin;
scalar_t v3 = wrb * top_diff_of_bin;
scalar_t v4 = wlb * top_diff_of_bin;
// Atomic add
if (min_y > 0 && min_x > 0 && min_y < height - 1 && min_x < width - 1)
atomicAdd(offset_bottom_diff + min_y * width + min_x, static_cast<float>(v1)); // 多个roi会重复操作
if (min_y > 0 && max_x < width - 1 && min_y < height - 1 && max_x > 0)
atomicAdd(offset_bottom_diff + min_y * width + max_x, static_cast<float>(v2));
if (max_y < height - 1 && max_x < width - 1 && max_y > 0 && max_x > 0)
atomicAdd(offset_bottom_diff + max_y * width + max_x, static_cast<float>(v3));
if (max_y < height - 1 && min_x > 0 && max_y > 0 && min_x < width - 1)
atomicAdd(offset_bottom_diff + max_y * width + min_x, static_cast<float>(v4));
}
}
}
int RROIAlignForwardLaucher(
const at::Tensor& bottom_data,
const float spatial_scale,
const int num_rois,
const int height,
const int width,
const int channels,
const int pooled_height,
const int pooled_width,
const at::Tensor& bottom_rois,
at::Tensor& top_data,
at::Tensor& con_idx_x,
at::Tensor& con_idx_y,
cudaStream_t stream)
{
const int kThreadsPerBlock = 1024;
const int output_size = num_rois * pooled_height * pooled_width * channels;
AT_DISPATCH_FLOATING_TYPES(bottom_data.scalar_type(), "RROIAlignForwardLaucher", [&]{
RROIAlignForward<scalar_t><<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(
output_size,
bottom_data.data_ptr<scalar_t>(),
spatial_scale,
height,
width,
channels,
pooled_height,
pooled_width,
bottom_rois.data_ptr<scalar_t>(),
top_data.data_ptr<scalar_t>(),
con_idx_x.data_ptr<scalar_t>(),
con_idx_y.data_ptr<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return 1;
}
// */
int RROIAlignBackwardLaucher(
const at::Tensor& top_diff,
const float spatial_scale,
const int batch_size,
const int num_rois,
const int height,
const int width,
const int channels,
const int pooled_height,
const int pooled_width,
const at::Tensor& bottom_rois,
at::Tensor& bottom_diff,
const at::Tensor& con_idx_x,
const at::Tensor& con_idx_y,
cudaStream_t stream)
{
const int kThreadsPerBlock = 1024;
const int output_size = num_rois * pooled_height * pooled_width * channels;//batch_size * height * width * channels;
AT_DISPATCH_FLOATING_TYPES(top_diff.scalar_type(), "RROIAlignForward", [&]{
RROIAlignBackward<scalar_t><<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(
output_size,
top_diff.data_ptr<scalar_t>(),
con_idx_x.data_ptr<scalar_t>(),
con_idx_y.data_ptr<scalar_t>(),
num_rois,
spatial_scale,
height,
width,
channels,
pooled_height,
pooled_width,
bottom_diff.data_ptr<scalar_t>(),
bottom_rois.data_ptr<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return 1;
}
|
the_stack
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <Eigen/Core>
#include <sophus/se3.hpp>
#include <vector>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
inline __device__ __host__ float lerp(float a, float b, float t)
{
return a + t*(b-a);
}
__device__ __host__ float3 operator+(const float3 &a, const float3 &b)
{
return make_float3(a.x+b.x, a.y+b.y, a.z+b.z);
}
__device__ __host__ float3 operator-(const float3 &a, const float3 &b)
{
return make_float3(a.x-b.x, a.y-b.y, a.z-b.z);
}
template <typename Dtype>
inline __device__ __host__ const Dtype & getValue(const int3 & v, const int3 & dim, const Dtype* sdf_grids)
{
return sdf_grids[v.x * dim.y * dim.z + v.y * dim.z + v.z];
}
template <typename Dtype>
inline __device__ __host__ Dtype getValueInterpolated(const float3 & pGrid, const int3 & dim, const Dtype* sdf_grids)
{
const int x0 = (int)(pGrid.x - 0.5); const float fx = (pGrid.x - 0.5) - x0;
const int y0 = (int)(pGrid.y - 0.5); const float fy = (pGrid.y - 0.5) - y0;
const int z0 = (int)(pGrid.z - 0.5); const float fz = (pGrid.z - 0.5) - z0;
const int x1 = x0 + 1;
const int y1 = y0 + 1;
const int z1 = z0 + 1;
if ( !(x0 >= 0 && x1 < dim.x && y0 >= 0 && y1 < dim.y && z0 >=0 && z1 < dim.z) )
return 0.1;
const float dx00 = lerp( getValue(make_int3(x0,y0,z0), dim, sdf_grids), getValue(make_int3(x1,y0,z0), dim, sdf_grids), fx);
const float dx01 = lerp( getValue(make_int3(x0,y0,z1), dim, sdf_grids), getValue(make_int3(x1,y0,z1), dim, sdf_grids), fx);
const float dx10 = lerp( getValue(make_int3(x0,y1,z0), dim, sdf_grids), getValue(make_int3(x1,y1,z0), dim, sdf_grids), fx);
const float dx11 = lerp( getValue(make_int3(x0,y1,z1), dim, sdf_grids), getValue(make_int3(x1,y1,z1), dim, sdf_grids), fx);
const float dxy0 = lerp( dx00, dx10, fy );
const float dxy1 = lerp( dx01, dx11, fy );
float dxyz = lerp( dxy0, dxy1, fz );
// penalize inside objects
//if (dxyz < 0)
// dxyz *= 10;
return dxyz;
}
template <typename Dtype>
inline __device__ __host__ float3 getGradientInterpolated(const float3 & pGrid, const int3 & dim, const Dtype* sdf_grids)
{
const float3 delta_x = make_float3(1,0,0);
const float3 delta_y = make_float3(0,1,0);
const float3 delta_z = make_float3(0,0,1);
Dtype f_px = getValueInterpolated(pGrid + delta_x, dim, sdf_grids);
Dtype f_py = getValueInterpolated(pGrid + delta_y, dim, sdf_grids);
Dtype f_pz = getValueInterpolated(pGrid + delta_z, dim, sdf_grids);
Dtype f_mx = getValueInterpolated(pGrid - delta_x, dim, sdf_grids);
Dtype f_my = getValueInterpolated(pGrid - delta_y, dim, sdf_grids);
Dtype f_mz = getValueInterpolated(pGrid - delta_z, dim, sdf_grids);
float3 grad;
grad.x = 0.5*(f_px - f_mx);
grad.y = 0.5*(f_py - f_my);
grad.z = 0.5*(f_pz - f_mz);
return grad;
}
template <typename Dtype>
__global__ void SDFdistanceForward(const int nthreads, const Dtype* pose_delta, const Dtype* pose_init,
const Dtype* sdf_grids, const Dtype* sdf_limits, const Dtype* points,
const int num_points, const int d0, const int d1, const int d2, Dtype* losses, Dtype* top_values, Dtype* diffs, Dtype* top_se3)
{
typedef Sophus::SE3<Dtype> SE3;
typedef Eigen::Matrix<Dtype,3,1,Eigen::DontAlign> Vec3;
// index is the index of point
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
// convert delta pose
Eigen::Matrix<Dtype,6,1> deltaPose;
deltaPose << pose_delta[0], pose_delta[1], pose_delta[2], pose_delta[3], pose_delta[4], pose_delta[5];
SE3 deltaPoseMatrix = SE3::exp(deltaPose);
// convert initial pose
Eigen::Matrix<Dtype,4,4> initialPose;
initialPose << pose_init[0], pose_init[1], pose_init[2], pose_init[3],
pose_init[4], pose_init[5], pose_init[6], pose_init[7],
pose_init[8], pose_init[9], pose_init[10], pose_init[11],
pose_init[12], pose_init[13], pose_init[14], pose_init[15];
SE3 initialPoseMatrix = SE3(initialPose);
if (index == 0)
{
SE3 pose = deltaPoseMatrix * initialPoseMatrix;
Eigen::Matrix<Dtype,3,4> matrix = pose.matrix3x4();
int count = 0;
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 4; j++)
top_se3[count++] = matrix(i, j);
}
top_se3[15] = 1.0;
}
// convert point
Vec3 point;
point << points[3 * index], points[3 * index + 1], points[3 * index + 2];
// transform the point
const Vec3 updatedPoint = deltaPoseMatrix * initialPoseMatrix * point;
// obtain sdf value
float px = (updatedPoint(0) - sdf_limits[0]) / (sdf_limits[3] - sdf_limits[0]) * d0;
float py = (updatedPoint(1) - sdf_limits[1]) / (sdf_limits[4] - sdf_limits[1]) * d1;
float pz = (updatedPoint(2) - sdf_limits[2]) / (sdf_limits[5] - sdf_limits[2]) * d2;
float3 pGrid = make_float3(px, py, pz);
int3 dim = make_int3(d0, d1, d2);
Dtype value = getValueInterpolated(pGrid, dim, sdf_grids);
int flag = 1;
if (value < 0)
flag = -1;
losses[index] = flag * value;
top_values[index] = losses[index];
// L2 penalty on translation
float lambda = 0.001;
losses[index] += 0.5 * lambda * (pose_delta[0] * pose_delta[0] + pose_delta[1] * pose_delta[1] + pose_delta[2] * pose_delta[2]);
// compute gradient
float3 grad = getGradientInterpolated(pGrid, dim, sdf_grids);
Vec3 sdfUpdate;
sdfUpdate << grad.x, grad.y, grad.z;
Eigen::Matrix<Dtype,3,6> dUpdate;
dUpdate << 1, 0, 0, 0, updatedPoint(2), -updatedPoint(1),
0, 1, 0, -updatedPoint(2), 0, updatedPoint(0),
0, 0, 1, updatedPoint(1), -updatedPoint(0), 0;
Eigen::Matrix<Dtype,1,6> grad_pose = sdfUpdate.transpose() * dUpdate;
// assign gradient
for (int i = 0; i < 6; i++)
diffs[6 * index + i] = flag * grad_pose(i);
// L2 penalty on translation
diffs[6 * index + 0] += lambda * pose_delta[0];
diffs[6 * index + 1] += lambda * pose_delta[1];
diffs[6 * index + 2] += lambda * pose_delta[2];
}
}
/* diffs: num_points x num_channels */
/* bottom_diff: num_channels */
template <typename Dtype>
__global__ void sum_gradients(const int nthreads, const Dtype* diffs, const int num_points, Dtype* bottom_diff)
{
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
bottom_diff[index] = 0;
int num_channels = 6;
for (int p = 0; p < num_points; p++)
{
int index_diff = p * num_channels + index;
bottom_diff[index] += diffs[index_diff];
}
}
}
/***************************/
/* pose_delta: 1 x 6 */
/* pose_init: 4 x 4 */
/* sdf_grid: c x h x w */
/* points: n x 3 */
/***************************/
std::vector<at::Tensor> sdf_loss_cuda_forward(
at::Tensor pose_delta,
at::Tensor pose_init,
at::Tensor sdf_grids,
at::Tensor sdf_limits,
at::Tensor points)
{
// run kernels
cudaError_t err;
const int kThreadsPerBlock = 512;
const int num_channels = 6;
int output_size;
// temp losses
const int num_points = points.size(0);
const int3 dim = make_int3(sdf_grids.size(0), sdf_grids.size(1), sdf_grids.size(2));
auto losses = at::zeros({num_points}, points.options());
auto top_values = at::zeros({num_points}, points.options());
auto top_data = at::zeros({1}, points.options());
auto top_se3 = at::zeros({4, 4}, points.options());
// temp diffs
auto diffs = at::zeros({num_points, num_channels}, points.options());
auto bottom_diff = at::zeros({num_channels}, points.options());
// compute the losses and gradients
output_size = num_points;
SDFdistanceForward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>(
output_size, pose_delta.data<float>(), pose_init.data<float>(), sdf_grids.data<float>(), sdf_limits.data<float>(),
points.data<float>(), num_points, dim.x, dim.y, dim.z, losses.data<float>(), top_values.data<float>(), diffs.data<float>(), top_se3.data<float>());
cudaDeviceSynchronize();
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed: %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
// sum the diffs
output_size = num_channels;
sum_gradients<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>(
output_size, diffs.data<float>(), num_points, bottom_diff.data<float>());
cudaDeviceSynchronize();
// sum the loss
thrust::device_ptr<float> losses_ptr(losses.data<float>());
float loss = thrust::reduce(losses_ptr, losses_ptr + num_points) / num_points;
cudaMemcpy(top_data.data<float>(), &loss, sizeof(float), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed: %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
return {top_data, top_values, top_se3, bottom_diff};
}
template <typename Dtype>
__global__ void SDFdistanceBackward(const int nthreads, const Dtype* top_diff,
const Dtype* bottom_diff, Dtype* output)
{
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
output[index] = top_diff[0] * bottom_diff[index];
}
}
std::vector<at::Tensor> sdf_loss_cuda_backward(
at::Tensor grad_loss,
at::Tensor bottom_diff)
{
cudaError_t err;
const int kThreadsPerBlock = 512;
int output_size;
const int batch_size = bottom_diff.size(0);
auto grad_pose = at::zeros({batch_size}, bottom_diff.options());
output_size = batch_size;
SDFdistanceBackward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock>>>(
output_size, grad_loss.data<float>(), bottom_diff.data<float>(), grad_pose.data<float>());
cudaDeviceSynchronize();
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
return {grad_pose};
}
|
the_stack
|
#pragma once
#include <gunrock/util/device_intrinsics.cuh>
#include <gunrock/util/cta_work_progress.cuh>
#include <gunrock/util/io/modified_load.cuh>
#include <gunrock/util/io/modified_store.cuh>
#include <gunrock/util/io/load_tile.cuh>
#include <gunrock/util/operators.cuh>
#include <gunrock/util/soa_tuple.cuh>
#include <gunrock/util/scan/soa/cooperative_soa_scan.cuh>
#include <gunrock/oprtr/advance/advance_base.cuh>
// TODO: use CUB for SOA scan
namespace gunrock {
namespace oprtr {
namespace TWC {
/**
* @brief Tile of incoming frontier to process
*
* @tparam LOG_LOADS_PER_TILE Size of the loads per tile.
* @tparam LOG_LOAD_VEC_SIZE Size of the vector size per load.
*/
// template<int LOG_LOADS_PER_TILE, int LOG_LOAD_VEC_SIZE>
template <typename _CtaT>
struct Tile {
/**
* Typedefs and Constants
*/
typedef _CtaT CtaT;
typedef typename CtaT::VertexT VertexT;
typedef typename CtaT::SizeT SizeT;
typedef typename CtaT::InKeyT InKeyT;
typedef typename CtaT::OutKeyT OutKeyT;
typedef typename CtaT::ValueT ValueT;
typedef typename CtaT::KernelPolicyT KernelPolicyT;
typedef typename KernelPolicyT::SmemStorage SmemStorage;
typedef typename util::VecType<SizeT, 2>::Type Vec2SizeT;
typedef Tile<CtaT> TileT;
enum {
FLAG = KernelPolicyT::FLAG,
LOADS_PER_TILE = 1 << KernelPolicyT::LOG_LOADS_PER_TILE,
LOAD_VEC_SIZE = 1 << KernelPolicyT::LOG_LOAD_VEC_SIZE
};
/**
* @brief Iterate over vertex ids in tile.
*/
template <
// typename TileT,
int LOAD, int VEC, int dummy = 0>
struct Iterate {
// typedef typename TileT::CtaT CtaT;
/**
* @brief Tile data initialization
*/
// template <typename TileT>
static __device__ __forceinline__ void Init(TileT *tile) {
tile->row_length[LOAD][VEC] = 0;
tile->row_progress[LOAD][VEC] = 0;
Iterate<LOAD, VEC + 1>::Init(tile);
}
/**
* @brief Inspect the neighbor list size of each node in the frontier,
* prepare for neighbor list expansion.
* @tparam Cta CTA tile-processing abstraction type
* @tparam Tile Tile structure type
* @param[in] cta Pointer to CTA object
* @param[in] tile Pointer to Tile object
*/
// template <typename CtaT>//, typename TileT>
static __device__ __forceinline__ void Inspect(CtaT *cta, TileT *tile) {
// if (tile->vertex_id[LOAD][VEC] != -1) {
if (util::isValid(tile->keys_in[LOAD][VEC])) {
// Translate vertex-id into local gpu row-id (currently stride of
// num_gpu)
VertexT row_id = 0; // / cta->num_gpus;
// Load neighbor row range from d_row_offsets
// Vec2SizeT row_range;
// SizeT row_id1;
// if (ADVANCE_TYPE == gunrock::oprtr::advance::V2V ||
// ADVANCE_TYPE == gunrock::oprtr::advance::V2E)
if ((FLAG & OprtrType_V2V) != 0 || (FLAG & OprtrType_V2E) != 0) {
row_id = tile->keys_in[LOAD][VEC];
// row_range.x = tex1Dfetch(cta->ts_rowoffset[0], row_id);
// util::io::ModifiedLoad<Problem::COLUMN_READ_MODIFIER>::Ld(
// row_range.x,
// cta->d_row_offsets + row_id);
// row_range.x = graph.GetNeighborListOffset(row_id);
// tile->row_offset[LOAD][VEC] =
// cta -> graph.GetNeighborListOffset(row_id);
// row_range.y = tex1Dfetch(cta->ts_rowoffset[0], row_id + 1);
// util::io::ModifiedLoad<Problem::COLUMN_READ_MODIFIER>::Ld(
// row_range.y,
// cta->d_row_offsets + row_id+1);
// tile->row_length[LOAD][VEC] =
// cta -> graph.GetNeighborListLength(row_id);
}
// if (ADVANCE_TYPE == gunrock::oprtr::advance::E2V ||
// ADVANCE_TYPE == gunrock::oprtr::advance::E2E)
if ((FLAG & OprtrType_E2V) != 0 || (FLAG & OprtrType_E2E) != 0) {
// row_id1 = (cta->input_inverse_graph)
// ? cta -> d_inverse_column_indices[row_id]
// : cta -> d_column_indices[row_id];
InKeyT edge_id = tile->keys_in[LOAD][VEC];
row_id = cta->graph.GetEdgeDest(edge_id);
// row_range.x = tex1Dfetch(cta->ts_rowoffset[0], row_id1);
// util::io::ModifiedLoad<Problem::COLUMN_READ_MODIFIER>::Ld(
// row_range.x,
// cta->d_row_offsets + row_id1);
// row_range.y = tex1Dfetch(cta->ts_rowoffset[0], row_id1+1);
// util::io::ModifiedLoad<Problem::COLUMN_READ_MODIFIER>::Ld(
// row_range.y,
// cta->d_row_offsets + row_id1+1);
}
// compute row offset and length
// tile->row_offset[LOAD][VEC] = row_range.x;
// tile->row_length[LOAD][VEC] = row_range.y - row_range.x;
tile->row_offset[LOAD][VEC] = cta->graph.GetNeighborListOffset(row_id);
tile->row_length[LOAD][VEC] = cta->graph.GetNeighborListLength(row_id);
}
tile->fine_row_rank[LOAD][VEC] =
(tile->row_length[LOAD][VEC] < KernelPolicyT::WARP_GATHER_THRESHOLD)
? tile->row_length[LOAD][VEC]
: 0;
tile->coarse_row_rank[LOAD][VEC] =
(tile->row_length[LOAD][VEC] < KernelPolicyT::WARP_GATHER_THRESHOLD)
? 0
: tile->row_length[LOAD][VEC];
Iterate<LOAD, VEC + 1>::Inspect(cta, tile);
} // end of Inspect
/**
* @brief Expand the node's neighbor list using the whole CTA.
* @tparam Cta CTA tile-processing abstraction type
* @tparam Tile Tile structure type
* @param[in] cta Pointer to CTA object
* @param[in] tile Pointer to Tile object
*/
// template <typename CtaT>//, typename TileT>
template <typename AdvanceOpT>
static __device__ __forceinline__ void CtaExpand(CtaT *cta, TileT *tile,
AdvanceOpT advance_op) {
// CTA-based expansion/loading
while (true) {
// All threads in block vie for the control of the block
if (tile->row_length[LOAD][VEC] >=
KernelPolicyT::CTA_GATHER_THRESHOLD) {
cta->smem_storage.state.cta_comm = threadIdx.x;
}
__syncthreads();
// Check
int owner = cta->smem_storage.state.cta_comm;
if (owner == KernelPolicyT::THREADS) {
// All threads in the block has less neighbor number for CTA Expand
break;
}
__syncthreads();
if (owner == threadIdx.x) {
// Got control of the CTA: command it
cta->smem_storage.state.warp_comm[0][0] =
tile->row_offset[LOAD][VEC]; // start
cta->smem_storage.state.warp_comm[0][1] =
tile->coarse_row_rank[LOAD][VEC]; // queue rank
cta->smem_storage.state.warp_comm[0][2] =
tile->row_offset[LOAD][VEC] + tile->row_length[LOAD][VEC]; // oob
// if (ADVANCE_TYPE == gunrock::oprtr::advance::V2V ||
// ADVANCE_TYPE == gunrock::oprtr::advance::V2E)
if ((FLAG & OprtrType_V2V) != 0 || (FLAG & OprtrType_V2E) != 0) {
cta->smem_storage.state.warp_comm[0][3] = tile->keys_in[LOAD][VEC];
}
// if (ADVANCE_TYPE == gunrock::oprtr::advance::E2V ||
// ADVANCE_TYPE == gunrock::oprtr::advance::E2E)
if ((FLAG & OprtrType_E2V) != 0 || (FLAG & OprtrType_E2E) != 0) {
cta->smem_storage.state.warp_comm[0][3]
//= cta -> input_inverse_graph
//? cta -> d_inverse_column_indices[tile->vertex_id[LOAD][VEC]]
//: cta -> d_column_indices[tile->vertex_id[LOAD][VEC]];
= cta->graph.GetEdgeDest(tile->keys_in[LOAD][VEC]);
}
cta->smem_storage.state.warp_comm[0][4] = tile->keys_in[LOAD][VEC];
// Unset row length
tile->row_length[LOAD][VEC] = 0;
// Unset my command
cta->smem_storage.state.cta_comm = KernelPolicyT::THREADS;
// So that we won't repeatedly expand this node
}
__syncthreads();
// Read commands
SizeT coop_offset = cta->smem_storage.state.warp_comm[0][0];
SizeT coop_rank = cta->smem_storage.state.warp_comm[0][1] + threadIdx.x;
SizeT coop_oob = cta->smem_storage.state.warp_comm[0][2];
VertexT pred_id;
VertexT input_item = cta->smem_storage.state.warp_comm[0][4];
// if (Problem::MARK_PREDECESSORS)
pred_id = cta->smem_storage.state.warp_comm[0][3];
// else
// pred_id = util::InvalidValue<VertexId>();//cta->label;
//__syncthreads();
while (coop_offset + threadIdx.x < coop_oob) {
// Gather
VertexT neighbor_id;
// util::io::ModifiedLoad<Problem::COLUMN_READ_MODIFIER>::Ld(
// neighbor_id,
// cta->d_column_indices + coop_offset + threadIdx.x);
SizeT edge_id = coop_offset + threadIdx.x;
neighbor_id = cta->graph.GetEdgeDest(edge_id);
// ProcessNeighbor
// <KernelPolicy, Problem, Functor,
// ADVANCE_TYPE, R_TYPE, R_OP> (
// pred_id,
// neighbor_id,
// cta -> d_data_slice,
// (SizeT)(coop_offset + threadIdx.x),
// util::InvalidValue<SizeT>(), // input_pos
// input_item,
// cta -> smem_storage.state.coarse_enqueue_offset + coop_rank,
// cta -> label,
// cta -> d_keys_out,
// cta -> d_values_out,
// cta -> d_value_to_reduce,
// cta -> d_reduce_frontier);
// ProcessNeighbor(
// cta, tile,
// coop_offset + threadIdx.x,
// pred_id, edge_id,
// cta->smem_storage.state.coarse_enqueue_offset + coop_rank);
SizeT output_pos =
cta->smem_storage.state.coarse_enqueue_offset + coop_rank;
ProcessNeighbor<FLAG, VertexT, InKeyT, OutKeyT, SizeT, ValueT>(
pred_id, neighbor_id, edge_id,
util::PreDefinedValues<SizeT>::InvalidValue, input_item,
output_pos, cta->keys_out, cta->values_out, NULL,
NULL, // cta -> reduce_values_in, cta -> reduce_values_out,
advance_op);
coop_offset += KernelPolicyT::THREADS;
coop_rank += KernelPolicyT::THREADS;
}
} // end of while (true)
__syncthreads();
// Next vector element
Iterate<LOAD, VEC + 1>::CtaExpand(cta, tile, advance_op);
} // end of CtaExpand
/**
* @brief Expand the node's neighbor list using a warp. (Currently disabled
* in the enactor)
* @tparam Cta CTA tile-processing abstraction type
* @tparam Tile Tile structure type
* @param[in] cta Pointer to CTA object
* @param[in] tile Pointer to Tile object
*/
// template<typename CtaT>//, typename TileT>
template <typename AdvanceOpT>
static __device__ __forceinline__ void WarpExpand(CtaT *cta, TileT *tile,
AdvanceOpT advance_op) {
if (KernelPolicyT::WARP_GATHER_THRESHOLD <
KernelPolicyT::CTA_GATHER_THRESHOLD) {
// Warp-based expansion/loading
int warp_id = threadIdx.x >> GR_LOG_WARP_THREADS(CUDA_ARCH);
int lane_id = util::LaneId();
while (::_any(tile->row_length[LOAD][VEC] >=
KernelPolicyT::WARP_GATHER_THRESHOLD)) {
if (tile->row_length[LOAD][VEC] >=
KernelPolicyT::WARP_GATHER_THRESHOLD) {
// All threads inside one warp vie for control of the warp
cta->smem_storage.state.warp_comm[warp_id][0] = lane_id;
}
if (lane_id == cta->smem_storage.state.warp_comm[warp_id][0]) {
// Got control of the warp
cta->smem_storage.state.warp_comm[warp_id][0] =
tile->row_offset[LOAD][VEC]; // start
cta->smem_storage.state.warp_comm[warp_id][1] =
tile->coarse_row_rank[LOAD][VEC]; // queue rank
cta->smem_storage.state.warp_comm[warp_id][2] =
tile->row_offset[LOAD][VEC] +
tile->row_length[LOAD][VEC]; // oob
// if (ADVANCE_TYPE == gunrock::oprtr::advance::V2V ||
// ADVANCE_TYPE == gunrock::oprtr::advance::V2E)
if ((FLAG & OprtrType_V2V) != 0 || (FLAG & OprtrType_V2E) != 0) {
cta->smem_storage.state.warp_comm[warp_id][3] =
tile->keys_in[LOAD][VEC];
}
// if (ADVANCE_TYPE == gunrock::oprtr::advance::E2V ||
// ADVANCE_TYPE == gunrock::oprtr::advance::E2E)
if ((FLAG & OprtrType_E2V) != 0 || (FLAG & OprtrType_E2E) != 0) {
cta->smem_storage.state.warp_comm[warp_id][3]
//= cta -> input_inverse_graph
//? cta ->
//d_inverse_column_indices[tile->vertex_id[LOAD][VEC]] : cta
//-> d_column_indices[tile->vertex_id[LOAD][VEC]];
= cta->graph.GetEdgeDest(tile->keys_in[LOAD][VEC]);
}
cta->smem_storage.state.warp_comm[warp_id][4] =
tile->keys_in[LOAD][VEC];
// Unset row length
tile->row_length[LOAD][VEC] =
0; // So that we won't repeatedly expand this node
}
SizeT coop_offset = cta->smem_storage.state.warp_comm[warp_id][0];
SizeT coop_rank =
cta->smem_storage.state.warp_comm[warp_id][1] + lane_id;
SizeT coop_oob = cta->smem_storage.state.warp_comm[warp_id][2];
VertexT pred_id;
VertexT input_item = cta->smem_storage.state.warp_comm[warp_id][4];
// if (Problem::MARK_PREDECESSORS)
pred_id = cta->smem_storage.state.warp_comm[warp_id][3];
// else
// pred_id = util::InvalidValue<VertexT>();//cta->label;
while (coop_offset + lane_id < coop_oob) {
VertexT neighbor_id;
// util::io::ModifiedLoad<Problem::COLUMN_READ_MODIFIER>::Ld(
// neighbor_id,
// cta->d_column_indices + coop_offset + lane_id);
neighbor_id = cta->graph.GetEdgeDest(coop_offset + lane_id);
// ProcessNeighbor
// <KernelPolicy, Problem, Functor,
// ADVANCE_TYPE, R_TYPE, R_OP> (
// pred_id,
// neighbor_id,
// cta -> d_data_slice,
// coop_offset + lane_id,
// util::InvalidValue<SizeT>(), // input_pos
// input_item,
// cta -> smem_storage.state.coarse_enqueue_offset + coop_rank,
// cta -> label,
// cta -> d_keys_out,
// cta -> d_values_out,
// cta -> d_value_to_reduce,
// cta -> d_reduce_frontier);
// ProcessNeighbor(
// cta, tile,
// coop_offset + lane_id,
// pred_id, edge_id,
// cta->smem_storage.state.coarse_enqueue_offset + coop_rank);
SizeT output_pos =
cta->smem_storage.state.coarse_enqueue_offset + coop_rank;
ProcessNeighbor<FLAG, VertexT, InKeyT, OutKeyT, SizeT, ValueT>(
pred_id, neighbor_id, coop_offset + lane_id,
util::PreDefinedValues<SizeT>::InvalidValue, input_item,
output_pos, cta->keys_out, cta->values_out, NULL,
NULL, // cta -> reduce_values_in, cta -> reduce_values_out,
advance_op);
coop_offset += GR_WARP_THREADS(CUDA_ARCH);
coop_rank += GR_WARP_THREADS(CUDA_ARCH);
}
}
// Next vector element
Iterate<LOAD, VEC + 1>::WarpExpand(cta, tile, advance_op);
}
} // end of WarpExpand
/**
* @brief Expand the node's neighbor list using a single thread (Scan).
* @tparam Cta CTA tile-processing abstraction type
* @tparam Tile Tile structure type
* @param[in] cta Pointer to CTA object
* @param[in] tile Pointer to Tile object
*/
// template <typename CtaT>//, typename TileT>
static __device__ __forceinline__ void ThreadExpand(CtaT *cta,
TileT *tile) {
// Expand the neighbor list into scratch space
SizeT scratch_offset = tile->fine_row_rank[LOAD][VEC] +
tile->row_progress[LOAD][VEC] - tile->progress;
while ((tile->row_progress[LOAD][VEC] < tile->row_length[LOAD][VEC]) &&
(scratch_offset < SmemStorage::GATHER_ELEMENTS)) {
// Put gather offset into scratch space
cta->smem_storage.gather_offsets[scratch_offset] =
tile->row_offset[LOAD][VEC] + tile->row_progress[LOAD][VEC];
cta->smem_storage.gather_edges[scratch_offset] =
tile->keys_in[LOAD][VEC];
// if (Problem::MARK_PREDECESSORS)
// if ((FLAG & OprtrOption_Mark_Predecessors) != 0)
{
// if (ADVANCE_TYPE == gunrock::oprtr::advance::E2V ||
// ADVANCE_TYPE == gunrock::oprtr::advance::E2E)
if ((FLAG & OprtrType_E2V) != 0 || (FLAG & OprtrType_E2E) != 0) {
cta->smem_storage.gather_predecessors[scratch_offset]
//= cta -> input_inverse_graph
//? cta -> d_inverse_column_indices[tile->vertex_id[LOAD][VEC]]
//: cta -> d_column_indices[tile->vertex_id[LOAD][VEC]];
= cta->graph.GetEdgeDest(tile->keys_in[LOAD][VEC]);
cta->smem_storage.gather_edges[scratch_offset] =
tile->keys_in[LOAD][VEC];
}
// if (ADVANCE_TYPE == gunrock::oprtr::advance::V2V ||
// ADVANCE_TYPE == gunrock::oprtr::advance::V2E)
if ((FLAG & OprtrType_V2V) != 0 || (FLAG & OprtrType_V2E) != 0)
cta->smem_storage.gather_predecessors[scratch_offset] =
tile->keys_in[LOAD][VEC];
}
tile->row_progress[LOAD][VEC]++;
scratch_offset++;
}
// Next vector element
Iterate<LOAD, VEC + 1>::ThreadExpand(cta, tile);
}
}; // end of struct Iterate
/**
* Iterate next load
*/
template <
// typename TileT,
int LOAD, int dummy>
struct Iterate<LOAD, LOAD_VEC_SIZE, dummy> {
// typedef typename TileT::CtaT CtaT;
// Init
// template <typename TileT>
static __device__ __forceinline__ void Init(TileT *tile) {
Iterate<LOAD + 1, 0>::Init(tile);
}
// Inspect
// template <typename CtaT>//, typename TileT>
static __device__ __forceinline__ void Inspect(CtaT *cta, TileT *tile) {
Iterate<LOAD + 1, 0>::Inspect(cta, tile);
}
// CTA Expand
// template <typename CtaT>//, typename TileT>
template <typename AdvanceOpT>
static __device__ __forceinline__ void CtaExpand(CtaT *cta, TileT *tile,
AdvanceOpT advance_op) {
Iterate<LOAD + 1, 0>::CtaExpand(cta, tile, advance_op);
}
// Warp Expand
// template <typename CtaT>//, typename TileT>
template <typename AdvanceOpT>
static __device__ __forceinline__ void WarpExpand(CtaT *cta, TileT *tile,
AdvanceOpT advance_op) {
Iterate<LOAD + 1, 0>::WarpExpand(cta, tile, advance_op);
}
// Single Thread Expand
// template <typename CtaT>//, typename TileT>
static __device__ __forceinline__ void ThreadExpand(CtaT *cta,
TileT *tile) {
Iterate<LOAD + 1, 0>::ThreadExpand(cta, tile);
}
};
/**
* Terminate Iterate
*/
template <int dummy>
struct Iterate<LOADS_PER_TILE, 0, dummy> {
// typedef typename TileT::CtaT CtaT;
// Init
// template <typename TileT>
static __device__ __forceinline__ void Init(TileT *tile) {}
// Inspect
// template <typename CtaT>//, typename TileT>
static __device__ __forceinline__ void Inspect(CtaT *cta, TileT *tile) {}
// CtaExpand
// template<typename CtaT>//, typename TileT>
template <typename AdvanceOpT>
static __device__ __forceinline__ void CtaExpand(CtaT *cta, TileT *tile,
AdvanceOpT advance_op) {}
// WarpExpand
// template<typename CtaT>//, typename TileT>
template <typename AdvanceOpT>
static __device__ __forceinline__ void WarpExpand(CtaT *cta, TileT *tile,
AdvanceOpT advance_op) {}
// SingleThreadExpand
// template<typename CtaT>//, typename TileT>
static __device__ __forceinline__ void ThreadExpand(CtaT *cta,
TileT *tile) {}
};
/**
* Members
*/
// Dequeued vertex ids
InKeyT keys_in[LOADS_PER_TILE][LOAD_VEC_SIZE];
SizeT row_offset[LOADS_PER_TILE][LOAD_VEC_SIZE];
SizeT row_length[LOADS_PER_TILE][LOAD_VEC_SIZE];
// Global scatter offsets. Coarse for CTA/warp-based scatters, fine for
// scan-based scatters
SizeT fine_count;
SizeT coarse_row_rank[LOADS_PER_TILE][LOAD_VEC_SIZE];
SizeT fine_row_rank[LOADS_PER_TILE][LOAD_VEC_SIZE];
// Progress for scan-based forward edge map gather offsets
SizeT row_progress[LOADS_PER_TILE][LOAD_VEC_SIZE];
SizeT progress;
// Iterate Interface
// Constructor
__device__ __forceinline__ Tile() { Iterate<0, 0>::Init(this); }
// Inspect dequeued nodes
// template <typename CtaT>
__device__ __forceinline__ void Inspect(CtaT *cta) {
Iterate<0, 0>::Inspect(cta, this);
}
// CTA Expand
// template <typename CtaT>
template <typename AdvanceOpT>
__device__ __forceinline__ void CtaExpand(CtaT *cta, AdvanceOpT advance_op) {
Iterate<0, 0>::CtaExpand(cta, this, advance_op);
}
// Warp Expand
// template <typename CtaT>
template <typename AdvanceOpT>
__device__ __forceinline__ void WarpExpand(CtaT *cta, AdvanceOpT advance_op) {
Iterate<0, 0>::WarpExpand(cta, this, advance_op);
}
// Single Thread Expand
// template <typename CtaT>
__device__ __forceinline__ void ThreadExpand(CtaT *cta) {
Iterate<0, 0>::ThreadExpand(cta, this);
}
}; // end of struct Tile
/**
* @brief CTA tile-processing abstraction for the vertex mapping operator.
*
* @tparam KernelPolicy Kernel policy type for the vertex mapping.
* @tparam ProblemData Problem data type for the vertex mapping.
* @tparam Functor Functor type for the specific problem type.
*
*/
template <typename _GraphT, typename _KernelPolicyT>
struct Cta {
/**
* Typedefs
*/
typedef _GraphT GraphT;
typedef _KernelPolicyT KernelPolicyT;
typedef typename KernelPolicyT::VertexT VertexT;
typedef typename KernelPolicyT::InKeyT InKeyT;
typedef typename KernelPolicyT::OutKeyT OutKeyT;
typedef typename KernelPolicyT::SizeT SizeT;
typedef typename KernelPolicyT::ValueT ValueT;
typedef typename KernelPolicyT::SmemStorage SmemStorage;
typedef typename KernelPolicyT::SoaScanOp SoaScanOp;
typedef typename KernelPolicyT::RakingSoaDetails RakingSoaDetails;
typedef typename KernelPolicyT::TileTuple TileTuple;
typedef util::Tuple<SizeT (*)[KernelPolicyT::LOAD_VEC_SIZE],
SizeT (*)[KernelPolicyT::LOAD_VEC_SIZE]>
RankSoa;
typedef Cta<GraphT, KernelPolicyT> CtaT;
typedef Tile<CtaT> TileT;
/**
* Members
*/
// Graph
const GraphT &graph;
// Input and output device pointers
const InKeyT *&keys_in; // Incoming frontier
ValueT *&values_out;
OutKeyT *&keys_out; // Outgoing frontier
// Work progress
const VertexT &queue_index; // Current frontier queue counter index
util::CtaWorkProgress<SizeT> &work_progress; // Atomic queueing counters
// SizeT max_out_frontier; // Maximum size (in
// elements) of outgoing frontier LabelT label; // Current
// label of the frontier
const SizeT &input_queue_length;
// gunrock::oprtr::advance::TYPE advance_type;
// bool input_inverse_graph;
// gunrock::oprtr::advance::REDUCE_TYPE r_type;
// gunrock::oprtr::advance::REDUCE_OP r_op;
// Value *d_value_to_reduce;
// const ValueT *&reduce_values_in;
// ValueT *&reduce_values_out;
// Value *d_reduce_frontier;
// Operational details for raking grid
RakingSoaDetails raking_soa_details;
// Shared memory for the CTA
SmemStorage &smem_storage;
// Methods
/**
* @brief CTA default constructor
*/
__device__ __forceinline__
Cta(const GraphT &graph,
// bool queue_reset,
// LabelT label,
// SizeT *d_row_offsets,
// SizeT *d_inverse_row_offsets,
// VertexT *d_column_indices,
// VertexT *d_inverse_column_indices,
const InKeyT *&keys_in, const SizeT &input_queue_length,
OutKeyT *&keys_out, ValueT *&values_out, const VertexT &queue_index,
// DataSlice *d_data_slice,
// SizeT max_in_frontier,
// SizeT max_out_frontier,
// const ValueT *&reduce_values_in,
// ValueT *&reduce_values_out,
util::CtaWorkProgress<SizeT> &work_progress, SmemStorage &smem_storage)
: // gunrock::oprtr::advance::TYPE ADVANCE_TYPE,
// bool input_inverse_graph,
// gunrock::oprtr::advance::REDUCE_TYPE R_TYPE,
// gunrock::oprtr::advance::REDUCE_OP R_OP,
// Value *d_value_to_reduce,
// Value *d_reduce_frontier) :
// queue_reset (queue_reset),
graph(graph),
queue_index(queue_index),
// label (label),
// d_row_offsets (d_row_offsets),
// d_inverse_row_offsets (d_inverse_row_offsets),
// d_column_indices (d_column_indices),
// d_inverse_column_indices(d_inverse_column_indices),
keys_in(keys_in),
keys_out(keys_out),
values_out(values_out),
// d_data_slice (d_data_slice),
input_queue_length(input_queue_length),
// max_out_frontier (max_out_frontier),
// reduce_values_in (reduce_values_in),
// reduce_values_out (reduce_values_out),
work_progress(work_progress),
smem_storage(smem_storage),
// input_inverse_graph (input_inverse_graph),
// d_value_to_reduce (d_value_to_reduce),
// d_reduce_frontier (d_reduce_frontier),
raking_soa_details(typename RakingSoaDetails::GridStorageSoa(
smem_storage.coarse_raking_elements,
smem_storage.fine_raking_elements),
typename RakingSoaDetails::WarpscanSoa(
smem_storage.state.coarse_warpscan,
smem_storage.state.fine_warpscan),
TileTuple(0, 0))
// advance_type(ADVANCE_TYPE),
// r_type(R_TYPE),
// r_op(R_OP),
{
if (threadIdx.x == 0) {
smem_storage.state.cta_comm = KernelPolicyT::THREADS;
smem_storage.state.overflowed = false;
}
}
/**
* @brief Process a single, full tile.
*
* @param[in] cta_offset Offset within the CTA where we want to start the tile
* processing.
* @param[in] guarded_elements The guarded elements to prevent the
* out-of-bound visit.
*/
template <typename AdvanceOpT>
__device__ __forceinline__ void ProcessTile(
SizeT cta_offset,
SizeT guarded_elements, // = KernelPolicyT::TILE_ELEMENTS)
AdvanceOpT advance_op) {
TileT tile;
// Load tile
util::io::LoadTile<
KernelPolicyT::LOG_LOADS_PER_TILE, KernelPolicyT::LOG_LOAD_VEC_SIZE,
KernelPolicyT::THREADS, QUEUE_READ_MODIFIER,
false>::LoadValid(tile.keys_in, const_cast<InKeyT *>(keys_in),
cta_offset, guarded_elements,
util::PreDefinedValues<InKeyT>::InvalidValue);
// Inspect dequeued nodes, updating label and obtaining
// edge-list details
tile.Inspect(this);
// CooperativeSoaTileScan, put result in totals (done)
SoaScanOp scan_op;
TileTuple totals;
gunrock::util::scan::soa::CooperativeSoaTileScan<
KernelPolicyT::LOAD_VEC_SIZE>::ScanTile(totals, raking_soa_details,
RankSoa(tile.coarse_row_rank,
tile.fine_row_rank),
scan_op);
SizeT coarse_count = totals.t0;
tile.fine_count = totals.t1;
// Set input queue length and check for overflow
if (threadIdx.x == 0) {
SizeT enqueue_amt = coarse_count + tile.fine_count;
SizeT enqueue_offset =
work_progress.Enqueue(enqueue_amt, queue_index + 1);
// printf("(%4d, %4d) outputs = %lld + %lld = %lld, offset = %lld\n",
// blockIdx.x, threadIdx.x,
// coarse_count, tile.fine_count,
// enqueue_amt, enqueue_offset);
smem_storage.state.coarse_enqueue_offset = enqueue_offset;
smem_storage.state.fine_enqueue_offset = enqueue_offset + coarse_count;
// Check for queue overflow due to redundant expansion
// if (enqueue_offset + enqueue_amt > max_out_frontier)
//{
// smem_storage.state.overflowed = true;
// work_progress.SetOverflow();
//}
}
// Protect overflowed flag
__syncthreads();
// Quit if overflow
// if (smem_storage.state.overflowed) {
// util::ThreadExit();
//}
if (coarse_count > 0) {
// Enqueue valid edge lists into outgoing queue by CTA
tile.CtaExpand(this, advance_op);
// Enqueue valid edge lists into outgoing queue by Warp
tile.WarpExpand(this, advance_op);
}
// Enqueue the adjacency lists of unvisited node-IDs by repeatedly
// gathering edges into scratch space, and then
// having the entire CTA copy the scratch pool into the outgoing
// frontier queue.
//
tile.progress = 0;
while (tile.progress < tile.fine_count) {
// Fill the scratch space with gather-offsets for neighbor-lists
tile.ThreadExpand(this);
__syncthreads();
// Copy scratch space into queue
SizeT scratch_remainder =
GR_MIN(SmemStorage::GATHER_ELEMENTS, tile.fine_count - tile.progress);
for (SizeT scratch_offset = threadIdx.x;
scratch_offset < scratch_remainder;
scratch_offset += KernelPolicyT::THREADS) {
// Gather a neighbor
VertexT neighbor_id;
SizeT edge_id = smem_storage.gather_offsets[scratch_offset];
// neighbor_id = tex1Dfetch(ts_columnindices[0],
// smem_storage.gather_offsets[scratch_offset]);
// util::io::ModifiedLoad<Problem::COLUMN_READ_MODIFIER>::Ld(
// neighbor_id,
// d_column_indices + edge_id);//
// smem_storage.gather_offsets[scratch_offset]);
neighbor_id = graph.GetEdgeDest(edge_id);
VertexT predecessor_id;
// if (Problem::MARK_PREDECESSORS)
// if ((KernelPolicyT::FLAG & OprtrOption_Mark_Predecessors) != 0)
predecessor_id = smem_storage.gather_predecessors[scratch_offset];
// else
// predecessor_id =
// util::PreDefinedValues<VertexT>::InvalidValue;//label;
// if Cond(neighbor_id) returns true
// if Cond(neighbor_id) returns false or Apply returns false
// set neighbor_id to -1 for invalid
VertexT input_item = smem_storage.gather_edges[scratch_offset];
// ProcessNeighbor
// <KernelPolicy, Problem, Functor,
// ADVANCE_TYPE, R_TYPE, R_OP> (
// predecessor_id,
// neighbor_id,
// d_data_slice,
// edge_id,
// util::InvalidValue<SizeT>(), // input_pos
// input_item,
// smem_storage.state.fine_enqueue_offset + tile.progress +
// scratch_offset, label, d_keys_out, d_values_out,
// d_value_to_reduce,
// d_reduce_frontier);
SizeT output_pos = smem_storage.state.fine_enqueue_offset +
tile.progress + scratch_offset;
// printf("(%4d, %4d) output_pos = %lld + %lld + %lld = %lld\n",
// blockIdx.x, threadIdx.x,
// smem_storage.state.fine_enqueue_offset,
// tile.progress, scratch_offset, output_pos);
ProcessNeighbor<KernelPolicyT::FLAG, VertexT, InKeyT, OutKeyT, SizeT,
ValueT>(predecessor_id, neighbor_id, edge_id,
util::PreDefinedValues<SizeT>::InvalidValue,
input_item, output_pos, keys_out, values_out,
NULL,
NULL, // reduce_values_in, reduce_values_out,
advance_op);
}
tile.progress += SmemStorage::GATHER_ELEMENTS;
__syncthreads();
}
}
}; // struct cta
} // namespace TWC
} // namespace oprtr
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
extern "C" {
__constant__ LaunchParams launch_params;
}
struct RayPayload {
// payload registers 0, 1
float2 uv;
// payload register 2
float t_hit;
// payload register 3
uint32_t material_id;
// payload registers 4, 5, 6
float3 normal;
};
__device__ RayPayload make_ray_payload() {
RayPayload p;
p.uv = make_float2(0.f);
p.t_hit = -1.f;
p.material_id = 0;
p.normal = make_float3(0.f);
return p;
}
__device__ float textured_scalar_param(const float x, const float2 &uv) {
const uint32_t mask = __float_as_int(x);
if (IS_TEXTURED_PARAM(mask)) {
const uint32_t tex_id = GET_TEXTURE_ID(mask);
const uint32_t channel = GET_TEXTURE_CHANNEL(mask);
return component(tex2D<float4>(launch_params.textures[tex_id], uv.x, uv.y), channel);
}
return x;
}
__device__ void unpack_material(const MaterialParams &p, float2 uv, DisneyMaterial &mat) {
uint32_t mask = __float_as_int(p.base_color.x);
if (IS_TEXTURED_PARAM(mask)) {
const uint32_t tex_id = GET_TEXTURE_ID(mask);
mat.base_color = make_float3(tex2D<float4>(launch_params.textures[tex_id], uv.x, uv.y));
} else {
mat.base_color = p.base_color;
}
mat.metallic = textured_scalar_param(p.metallic, uv);
mat.specular = textured_scalar_param(p.specular, uv);
mat.roughness = textured_scalar_param(p.roughness, uv);
mat.specular_tint = textured_scalar_param(p.specular_tint, uv);
mat.anisotropy = textured_scalar_param(p.anisotropy, uv);
mat.sheen = textured_scalar_param(p.sheen, uv);
mat.sheen_tint = textured_scalar_param(p.sheen_tint, uv);
mat.clearcoat = textured_scalar_param(p.clearcoat, uv);
mat.clearcoat_gloss = textured_scalar_param(p.clearcoat_gloss, uv);
mat.ior = textured_scalar_param(p.ior, uv);
mat.specular_transmission = textured_scalar_param(p.specular_transmission, uv);
}
__device__ float3 sample_direct_light(const DisneyMaterial &mat, const float3 &hit_p,
const float3 &n, const float3 &v_x, const float3 &v_y, const float3 &w_o,
const QuadLight *lights, const uint32_t num_lights, uint16_t &ray_count, LCGRand &rng)
{
float3 illum = make_float3(0.f);
uint32_t light_id = lcg_randomf(rng) * num_lights;
light_id = min(light_id, num_lights - 1);
QuadLight light = lights[light_id];
const uint32_t occlusion_flags = OPTIX_RAY_FLAG_DISABLE_ANYHIT
| OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT
| OPTIX_RAY_FLAG_DISABLE_CLOSESTHIT;
// Sample the light to compute an incident light ray to this point
{
float3 light_pos = sample_quad_light_position(light,
make_float2(lcg_randomf(rng), lcg_randomf(rng)));
float3 light_dir = light_pos - hit_p;
float light_dist = length(light_dir);
light_dir = normalize(light_dir);
float light_pdf = quad_light_pdf(light, light_pos, hit_p, light_dir);
float bsdf_pdf = disney_pdf(mat, n, w_o, light_dir, v_x, v_y);
uint32_t shadow_hit = 1;
optixTrace(launch_params.scene, hit_p, light_dir, EPSILON, light_dist, 0.f,
0xff, occlusion_flags, PRIMARY_RAY, 1, OCCLUSION_RAY,
shadow_hit);
#ifdef REPORT_RAY_STATS
++ray_count;
#endif
if (light_pdf >= EPSILON && bsdf_pdf >= EPSILON && !shadow_hit) {
float3 bsdf = disney_brdf(mat, n, w_o, light_dir, v_x, v_y);
float w = power_heuristic(1.f, light_pdf, 1.f, bsdf_pdf);
illum = bsdf * light.emission * fabs(dot(light_dir, n)) * w / light_pdf;
}
}
// Sample the BRDF to compute a light sample as well
{
float3 w_i;
float bsdf_pdf;
float3 bsdf = sample_disney_brdf(mat, n, w_o, v_x, v_y, rng, w_i, bsdf_pdf);
float light_dist;
float3 light_pos;
if (!all_zero(bsdf) && bsdf_pdf >= EPSILON && quad_intersect(light, hit_p, w_i, light_dist, light_pos)) {
float light_pdf = quad_light_pdf(light, light_pos, hit_p, w_i);
if (light_pdf >= EPSILON) {
float w = power_heuristic(1.f, bsdf_pdf, 1.f, light_pdf);
uint32_t shadow_hit = 1;
optixTrace(launch_params.scene, hit_p, w_i, EPSILON, light_dist, 0.f,
0xff, occlusion_flags, PRIMARY_RAY, 1, OCCLUSION_RAY,
shadow_hit);
#ifdef REPORT_RAY_STATS
++ray_count;
#endif
if (!shadow_hit) {
illum = illum + bsdf * light.emission * fabs(dot(w_i, n)) * w / bsdf_pdf;
}
}
}
}
return illum;
}
extern "C" __global__ void __raygen__perspective_camera() {
const RayGenParams ¶ms = get_shader_params<RayGenParams>();
const uint2 pixel = make_uint2(optixGetLaunchIndex().x, optixGetLaunchIndex().y);
const uint2 screen = make_uint2(optixGetLaunchDimensions().x, optixGetLaunchDimensions().y);
const uint32_t pixel_idx = pixel.x + pixel.y * screen.x;
LCGRand rng = get_rng(launch_params.frame_id);
const float2 d = make_float2(pixel.x + lcg_randomf(rng), pixel.y + lcg_randomf(rng)) / make_float2(screen);
float3 ray_dir = normalize(d.x * make_float3(launch_params.cam_du)
+ d.y * make_float3(launch_params.cam_dv) + make_float3(launch_params.cam_dir_top_left));
float3 ray_origin = make_float3(launch_params.cam_pos);
DisneyMaterial mat;
uint16_t ray_count = 0;
const float3 light_emission = make_float3(1.f);
int bounce = 0;
float3 illum = make_float3(0.f);
float3 path_throughput = make_float3(1.f);
do {
RayPayload payload = make_ray_payload();
optixTrace(launch_params.scene,
ray_origin,
ray_dir,
EPSILON,
1e20f,
0.f,
0xff,
OPTIX_RAY_FLAG_DISABLE_ANYHIT,
PRIMARY_RAY,
1,
PRIMARY_RAY,
reinterpret_cast<uint32_t&>(payload.uv.x),
reinterpret_cast<uint32_t&>(payload.uv.y),
reinterpret_cast<uint32_t&>(payload.t_hit),
payload.material_id,
reinterpret_cast<uint32_t&>(payload.normal.x),
reinterpret_cast<uint32_t&>(payload.normal.y),
reinterpret_cast<uint32_t&>(payload.normal.z));
#ifdef REPORT_RAY_STATS
++ray_count;
#endif
if (payload.t_hit <= 0.f) {
illum = illum + path_throughput * payload.normal;
break;
}
unpack_material(params.materials[payload.material_id], payload.uv, mat);
const float3 w_o = -ray_dir;
const float3 hit_p = ray_origin + payload.t_hit * ray_dir;
float3 v_x, v_y;
float3 v_z = payload.normal;
if (mat.specular_transmission == 0.f && dot(w_o, v_z) < 0.f) {
v_z = -v_z;
}
ortho_basis(v_x, v_y, v_z);
illum = illum + path_throughput * sample_direct_light(mat, hit_p, v_z, v_x, v_y, w_o,
params.lights, params.num_lights, ray_count, rng);
float3 w_i;
float pdf;
float3 bsdf = sample_disney_brdf(mat, v_z, w_o, v_x, v_y, rng, w_i, pdf);
if (pdf == 0.f || all_zero(bsdf)) {
break;
}
path_throughput = path_throughput * bsdf * fabs(dot(w_i, v_z)) / pdf;
ray_origin = hit_p;
ray_dir = w_i;
++bounce;
// Russian roulette termination
if (bounce > 3) {
const float q = max(0.05f, 1.f - max(path_throughput.x, max(path_throughput.y, path_throughput.z)));
if (lcg_randomf(rng) < q) {
break;
}
path_throughput = path_throughput / (1.f - q);
}
} while (bounce < MAX_PATH_DEPTH);
const float3 prev_color = make_float3(launch_params.accum_buffer[pixel_idx]);
const float3 accum_color = (illum + launch_params.frame_id * prev_color) / (launch_params.frame_id + 1);
launch_params.accum_buffer[pixel_idx] = make_float4(accum_color, 1.f);
launch_params.framebuffer[pixel_idx] = make_uchar4(
clamp(linear_to_srgb(accum_color.x) * 255.f, 0.f, 255.f),
clamp(linear_to_srgb(accum_color.y) * 255.f, 0.f, 255.f),
clamp(linear_to_srgb(accum_color.z) * 255.f, 0.f, 255.f), 255);
#ifdef REPORT_RAY_STATS
launch_params.ray_stats_buffer[pixel_idx] = ray_count;
#endif
}
extern "C" __global__ void __miss__miss() {
optixSetPayload_1(float_as_int(-1.f));
float3 dir = optixGetWorldRayDirection();
// Apply our miss "shader" to draw the checkerboard background
float u = (1.f + atan2(dir.x, -dir.z) * M_1_PI) * 0.5f;
float v = acos(dir.y) * M_1_PI;
int check_x = u * 10.f;
int check_y = v * 10.f;
if (dir.y > -0.1f && (check_x + check_y) % 2 == 0) {
optixSetPayload_4(float_as_int(0.5f));
optixSetPayload_5(float_as_int(0.5f));
optixSetPayload_6(float_as_int(0.5f));
} else {
optixSetPayload_4(float_as_int(0.1f));
optixSetPayload_5(float_as_int(0.1f));
optixSetPayload_6(float_as_int(0.1f));
}
}
extern "C" __global__ void __miss__occlusion_miss() {
optixSetPayload_0(0);
}
extern "C" __global__ void __closesthit__closest_hit() {
const HitGroupParams ¶ms = get_shader_params<HitGroupParams>();
const float2 bary = optixGetTriangleBarycentrics();
const uint3 indices = params.index_buffer[optixGetPrimitiveIndex()];
const float3 v0 = params.vertex_buffer[indices.x];
const float3 v1 = params.vertex_buffer[indices.y];
const float3 v2 = params.vertex_buffer[indices.z];
float3 normal = normalize(cross(v1 - v0, v2 - v0));
normal = normalize(optixTransformNormalFromObjectToWorldSpace(normal));
float2 uv = make_float2(0.f);
if (params.uv_buffer) {
float2 uva = params.uv_buffer[indices.x];
float2 uvb = params.uv_buffer[indices.y];
float2 uvc = params.uv_buffer[indices.z];
uv = (1.f - bary.x - bary.y) * uva
+ bary.x * uvb + bary.y * uvc;
}
optixSetPayload_0(float_as_int(uv.x));
optixSetPayload_1(float_as_int(uv.y));
optixSetPayload_2(float_as_int(optixGetRayTmax()));
optixSetPayload_3(params.material_id);
optixSetPayload_4(float_as_int(normal.x));
optixSetPayload_5(float_as_int(normal.y));
optixSetPayload_6(float_as_int(normal.z));
}
|
the_stack
|
//
// This file contains dimension reduction operation functions and
// kernels that work on both contiguous and non-contiguous tensor
// arguments of arbitrary (up to MAX_CUTORCH_DIMS) dimensioned
// arguments without copying or temporary storage, for reducing an
// entire tensor to one value.
//
#include "THCReduceApplyUtils.cuh"
// Size per each reduction block
#define THC_REDUCE_ALL_BLOCK_SIZE 1024L
// Cutoff size for two-pass reduction
#define THC_TWO_PASS_REDUCTION_SIZE 2048L
// Kernel that handles an entire reduction of a tensor in one pass
template <typename ModifyOp,
typename ReduceOp,
typename ReduceAccOp,
typename InT,
typename AccT,
typename IndexType,
int ADims>
__global__ void
kernelReduceAll(TensorInfo<InT, IndexType> in,
IndexType totalElements,
AccT init,
ModifyOp modifyOp,
ReduceOp reduceOp,
ReduceAccOp reduceAccOp,
AccT* out) {
// With a block-wide stride, have each thread perform its own reduction.
AccT r = init;
for (IndexType i = threadIdx.x; i < totalElements; i += blockDim.x) {
const IndexType inOffset = IndexToOffset<InT, IndexType, ADims>::get(i, in);
r = reduceOp(r, modifyOp(in.data[inOffset]));
}
// Reduce within the block
extern __shared__ char smemChar[];
AccT* smem = (AccT*) smemChar;
r = reduceBlock<AccT, ReduceAccOp>(smem, blockDim.x, r, reduceAccOp, init);
if (threadIdx.x == 0) {
// Write out reduced value
*out = r;
}
}
template <typename IndexType>
__device__ __forceinline__ IndexType getStartIndex(IndexType totalSize) {
IndexType sizePerBlock = THCCeilDiv(totalSize, (IndexType) gridDim.x);
return blockIdx.x * sizePerBlock;
}
template <typename IndexType>
__device__ __forceinline__ IndexType getEndIndex(IndexType totalSize) {
IndexType sizePerBlock = THCCeilDiv(totalSize, (IndexType) gridDim.x);
return min((IndexType) ((blockIdx.x + 1) * sizePerBlock), totalSize);
}
// Kernel that handles an entire reduction of a tensor in two passes
template <typename ModifyOp,
typename ReduceOp,
typename ReduceAccOp,
typename InT,
typename AccT,
typename IndexType,
int ADims>
__global__ void
kernelReduceAllPass1(TensorInfo<InT, IndexType> in,
IndexType totalElements,
AccT init,
ModifyOp modifyOp,
ReduceOp reduceOp,
ReduceAccOp reduceAccOp,
AccT* scratchSpace) {
const IndexType startIndex = getStartIndex<IndexType>(totalElements);
const IndexType endIndex = getEndIndex<IndexType>(totalElements);
// With a block-wide stride, have each thread perform its own reduction.
AccT r = init;
for (IndexType i = startIndex + threadIdx.x; i < endIndex; i += blockDim.x) {
const IndexType inOffset = IndexToOffset<InT, IndexType, ADims>::get(i, in);
r = reduceOp(r, modifyOp(in.data[inOffset]));
}
// Reduce within the block
extern __shared__ char smemChar[];
AccT* smem = (AccT*) smemChar;
r = reduceBlock<AccT, ReduceAccOp>(smem, blockDim.x, r, reduceAccOp, init);
if (threadIdx.x == 0) {
// Write out block-wide reduced value
scratchSpace[blockIdx.x] = r;
}
}
template <typename ReduceOp, typename T, typename IndexType>
__global__ void
kernelReduceAllPass2(int numPass1Blocks,
T init,
ReduceOp reduceOp,
T* scratchSpace,
T* out) {
T r = init;
if (threadIdx.x < numPass1Blocks) {
r = scratchSpace[threadIdx.x];
}
// Reduce within the block
extern __shared__ char smemChar[];
T* smem = (T*) smemChar;
r = reduceBlock<T, ReduceOp>(smem, numPass1Blocks, r, reduceOp, init);
if (threadIdx.x == 0) {
*out = r;
}
}
// Perform a two-pass reduction if the tensor is large enough to
// warrant it.
inline bool isTwoPassReductionSize(ptrdiff_t elements) {
return (elements > THC_TWO_PASS_REDUCTION_SIZE);
}
template <typename InT, typename AccT>
inline ptrdiff_t getTwoPassBlocks(THCState* state, ptrdiff_t elements) {
ptrdiff_t numBlocks = THCCeilDiv(elements, (ptrdiff_t)THC_REDUCE_ALL_BLOCK_SIZE);
// We can only have as many blocks as there is scratch space
ptrdiff_t scratchSpace =
THCState_getCurrentDeviceScratchSpaceSize(state) / sizeof(AccT);
THAssert(scratchSpace > 0);
// Limit to 1024 due to dimensionality constraint
if (scratchSpace > 1024) {
scratchSpace = 1024;
}
if (numBlocks > scratchSpace) {
numBlocks = scratchSpace;
}
return numBlocks;
}
// Get the block/grid size that we want
template <typename InT, typename AccT>
inline void getPass1ReduceBlockGrid(THCState* state, ptrdiff_t elements,
dim3& grid, dim3& block) {
grid = dim3(getTwoPassBlocks<InT, AccT>(state, elements));
block = dim3(THC_REDUCE_ALL_BLOCK_SIZE);
}
template <typename InT, typename AccT>
inline void getPass2ReduceBlockGrid(THCState* state, ptrdiff_t elements,
dim3& grid, dim3& block) {
grid = dim3(1);
// We only need as many threads as there were blocks originally
block = dim3(getTwoPassBlocks<InT, AccT>(state, elements));
}
template <typename InT, typename AccT>
inline void getSinglePassReduceBlockGrid(ptrdiff_t elements,
dim3& grid, dim3& block) {
grid = dim3(1);
block = dim3(THC_REDUCE_ALL_BLOCK_SIZE);
}
template <typename ModifyOp,
typename ReduceOp,
typename ReduceAccOp,
typename InT,
typename AccT,
typename IndexType,
int ADims>
void callReduceAll(THCState* state,
const TensorInfo<InT, IndexType>& in,
ptrdiff_t totalElements,
AccT init,
const ModifyOp& modifyOp,
const ReduceOp& reduceOp,
const ReduceAccOp& reduceAccOp,
AccT* devOut) {
dim3 grid;
dim3 block;
if (isTwoPassReductionSize(totalElements)) {
bool freeScratchSpace = false;
void* scratchSpace = THCState_getCurrentDeviceScratchSpace(state);
if (!scratchSpace) {
THCudaCheck(THCudaMalloc(state, &scratchSpace,
THCState_getCurrentDeviceScratchSpaceSize(state)));
freeScratchSpace = true;
}
getPass1ReduceBlockGrid<InT, AccT>(state, totalElements, grid, block);
size_t smemSize = block.x * sizeof(AccT);
kernelReduceAllPass1<ModifyOp, ReduceOp, ReduceAccOp, InT, AccT, IndexType, ADims>
<<<grid, block, smemSize, THCState_getCurrentStream(state)>>>(
in, (IndexType) totalElements, init, modifyOp, reduceOp, reduceAccOp,
(AccT*) scratchSpace);
int numPass1Blocks = grid.x;
getPass2ReduceBlockGrid<InT, AccT>(state, totalElements, grid, block);
smemSize = block.x * sizeof(AccT);
kernelReduceAllPass2<ReduceAccOp, AccT, IndexType>
<<<grid, block, smemSize, THCState_getCurrentStream(state)>>>(
numPass1Blocks, init, reduceAccOp,
(AccT*) scratchSpace, devOut);
if (freeScratchSpace) {
THCudaCheck(THCudaFree(state, scratchSpace));
}
} else {
getSinglePassReduceBlockGrid<InT, AccT>(totalElements, grid, block);
size_t smemSize = block.x * sizeof(AccT);
kernelReduceAll<ModifyOp, ReduceOp, ReduceAccOp, InT, AccT, IndexType, ADims>
<<<grid, block, smemSize, THCState_getCurrentStream(state)>>>(
in, (IndexType) totalElements, init, modifyOp, reduceOp, reduceAccOp, devOut);
}
}
// Reduces the entire tensor to one value. `out` points to
// host-resident memory.
template <typename TensorType,
typename ModifyOp,
typename ReduceOp,
typename ReduceAccOp,
typename AccT>
bool THC_reduceAll(THCState* state,
TensorType* in,
const ModifyOp& modifyOp,
const ReduceOp& reduceOp,
const ReduceAccOp& reduceAccOp,
AccT init,
AccT* out,
int outOnDevice) {
ptrdiff_t inElements = TensorUtils<TensorType>::getNumElements(state, in);
if (TensorUtils<TensorType>::getDims(state, in) > MAX_CUTORCH_DIMS) {
return false;
}
if (TensorUtils<TensorType>::getDims(state, in) == 0) {
// Zero-dim tensor; do nothing
*out = init;
return true;
}
bool freeDevOut = false;
AccT* devOut = out;
if (!outOnDevice) {
// Use the stream-specific scratch space for the reduction kernel
// to write out its value
devOut = (AccT*) THCState_getCurrentDeviceScratchSpace(state);
if (!devOut) {
THCudaCheck(THCudaMalloc(state, (void**)&devOut,
THCState_getCurrentDeviceScratchSpaceSize(state)));
freeDevOut = true;
}
}
// It is possible that the tensor dimensions are able to be collapsed,
// and thus we can reduce the actual code complexity of the copy by
// exploiting this knowledge statically, since the div/mod is the
// most expensive part of the operation, more so than memory accesses.
// For instance, when copying a non-contiguous to a contiguous tensor
// (or vice versa), the contiguous tensor can be collapsed to one
// dimension, and the loop to translate the linear index to the array
// index can be similarly collapsed. That is what this unrolling is for.
#define HANDLE_CASE(TYPE, IN) \
callReduceAll<ModifyOp, ReduceOp, ReduceAccOp, \
typename TensorUtils<TensorType>::DataType, \
AccT, \
TYPE, IN>( \
state, inInfo, inElements, init, modifyOp, \
reduceOp, reduceAccOp, devOut);
#define HANDLE_IN_CASE(TYPE, IN) \
{ \
if (inInfo.isContiguous()) { \
HANDLE_CASE(TYPE, -2); \
} else { \
switch (IN) { \
case 1: \
HANDLE_CASE(TYPE, 1); \
break; \
case 2: \
HANDLE_CASE(TYPE, 2); \
break; \
default: \
HANDLE_CASE(TYPE, -1); \
break; \
} \
} \
}
if (TensorUtils<TensorType>::canUse32BitIndexMath(state, in)) {
TensorInfo<typename TensorUtils<TensorType>::DataType, unsigned int> inInfo =
getTensorInfo<TensorType, unsigned int>(state, in);
inInfo.collapseDims();
HANDLE_IN_CASE(unsigned int, inInfo.dims);
} else {
TensorInfo<typename TensorUtils<TensorType>::DataType,
uint64_t> inInfo =
getTensorInfo<TensorType, uint64_t>(state, in);
inInfo.collapseDims();
// For large tensors, we only compile the completely contiguous
// version and the completely generic version, to reduce
// compilation time.
if (inInfo.isContiguous()) {
HANDLE_IN_CASE(uint64_t, -2);
} else {
HANDLE_IN_CASE(uint64_t, -1);
}
}
#undef HANDLE_CASE
#undef HANDLE_IN_CASE
// If our destination is not on the device, copy the value back to
// the host (synchronous!)
if (!outOnDevice) {
cudaStream_t stream = THCState_getCurrentStream(state);
THCudaCheck(cudaMemcpyAsync(out,
devOut,
sizeof(AccT),
cudaMemcpyDeviceToHost,
stream));
THCudaCheck(cudaStreamSynchronize(stream));
}
if (freeDevOut) {
THCudaCheck(THCudaFree(state, devOut));
}
return true;
}
#undef THC_REDUCE_ALL_BLOCK_SIZE
#undef THC_TWO_PASS_REDUCTION_SIZE
#endif // THC_REDUCEALL_INC
|
the_stack
|
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cuda_profiler_api.h>
#include "THC/THC.h"
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
#include <math.h>
#include "strided_batched_gemm.h"
#include "softmax.h"
#include "dropout.h"
#include "layer_norm.h"
// symbol to be automatically resolved by PyTorch libs
extern THCState *state;
namespace multihead_attn {
namespace encdec {
namespace cublas_gemmex {
std::vector<torch::Tensor> fwd_cuda(
bool use_time_mask,
bool is_training,
int heads,
torch::Tensor const& inputs_q,
torch::Tensor const& inputs_kv,
torch::Tensor const& input_weights_q,
torch::Tensor const& input_weights_kv,
torch::Tensor const& output_weights,
const uint8_t* pad_mask,
float dropout_prob
)
{
const int embed_dim = inputs_q.size(2);
const int sequences = inputs_q.size(1);
const int q_seq_len = inputs_q.size(0);
const int k_seq_len = inputs_kv.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// There is no reason to use more than one stream as every kernel is
// sequentially dependent
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// 3 Intermediate Results + Output (Note: dropout intermediates are generated by ATen library code)
auto act_options = inputs_q.options().requires_grad(false);
auto mask_options = act_options.dtype(torch::kUInt8);
torch::Tensor input_lin_q_results = torch::empty({q_seq_len, sequences, output_lin_q_dim}, act_options);
torch::Tensor input_lin_kv_results = torch::empty({k_seq_len, sequences, output_lin_kv_dim}, act_options);
torch::Tensor softmax_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_results = torch::empty({attn_batches, q_seq_len, k_seq_len}, act_options);
torch::Tensor dropout_mask = torch::empty({attn_batches, q_seq_len, k_seq_len}, mask_options);
torch::Tensor matmul2_results = torch::empty({q_seq_len, attn_batches, head_dim}, act_options);
torch::Tensor outputs = torch::empty_like(inputs_q, act_options);
// Input Linear Results Pointers to Q, K, and V of interviewed activations
void* q_lin_results_ptr = static_cast<void*>(input_lin_q_results.data_ptr());
void* k_lin_results_ptr = static_cast<void*>(input_lin_kv_results.data_ptr());
void* v_lin_results_ptr = static_cast<void*>(static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim);
// Softmax Intermediate Result Ptr (used by Matmul1 -> Softmax)
void* softmax_results_ptr = static_cast<void*>(softmax_results.data_ptr());
char a_layout_t{'t'};
char a_layout_n{'n'};
char b_layout_n{'n'};
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Input Linear Q Fwd
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
output_lin_q_dim,
batches_q,
embed_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(input_weights_q.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(inputs_q.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(&beta),
q_lin_results_ptr,
CUDA_R_16F,
output_lin_q_dim,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear KV Fwd
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
output_lin_kv_dim,
batches_kv,
embed_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(input_weights_kv.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(inputs_kv.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(&beta),
k_lin_results_ptr,
CUDA_R_16F,
output_lin_kv_dim,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// MatMul1 of Dot-Product Attention Plus scaling by 1/Sqrt(head size)
gemm_switch_fp32accum( state,
a_layout_t,
b_layout_n,
k_seq_len,
q_seq_len,
head_dim,
scale,
static_cast<const half*>(k_lin_results_ptr),
lead_dim_kv,
batch_stride_kv,
static_cast<const half*>(q_lin_results_ptr),
lead_dim_q,
batch_stride_q,
beta,
static_cast<half*>(softmax_results_ptr),
k_seq_len,
k_seq_len*q_seq_len,
attn_batches);
// Padded Softmax
bool softmax_success = false;
if (pad_mask == nullptr) {
softmax_success = dispatch_softmax<half, half, float>(
reinterpret_cast<half*>(softmax_results_ptr),
reinterpret_cast<const half*>(softmax_results_ptr),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len);
} else {
if (use_time_mask) {
softmax_success = dispatch_time_masked_softmax<half, half, float>(
reinterpret_cast<half*>(softmax_results_ptr),
reinterpret_cast<const half*>(softmax_results_ptr),
pad_mask,
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
q_seq_len);
} else {
softmax_success = dispatch_masked_softmax<half, half, float>(
reinterpret_cast<half*>(softmax_results_ptr),
reinterpret_cast<const half*>(softmax_results_ptr),
pad_mask,
k_seq_len,
k_seq_len,
attn_batches*q_seq_len,
attn_batches*q_seq_len/sequences);
}
}
assert(softmax_success);
if (is_training) {
apex_fused_dropout_cuda<at::Half,float,uint32_t>(
static_cast<at::Half const*>(softmax_results.data_ptr()),
static_cast<at::Half*>(dropout_results.data_ptr()),
static_cast<uint8_t*>(dropout_mask.data_ptr()),
dropout_elems,
(1.0f - dropout_prob));
}
// Matmul2
gemm_switch_fp32accum( state,
a_layout_n,
b_layout_n,
head_dim,
q_seq_len,
k_seq_len,
alpha,
static_cast<const half*>(v_lin_results_ptr),
lead_dim_kv,
batch_stride_kv,
(is_training) ? static_cast<const half*>(dropout_results.data_ptr()) : static_cast<const half*>(softmax_results.data_ptr()) ,
k_seq_len,
k_seq_len*q_seq_len,
beta,
static_cast<half*>(matmul2_results.data_ptr()),
head_dim*attn_batches,
head_dim,
attn_batches);
// Output Linear
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
embed_dim,
batches_q,
embed_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(output_weights.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(matmul2_results.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(&beta),
static_cast<void*>(outputs.data_ptr()),
CUDA_R_16F,
embed_dim,
CUDA_R_32F,
//CUBLAS_GEMM_ALGO1_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
input_lin_q_results,
input_lin_kv_results,
softmax_results,
dropout_results,
dropout_mask,
matmul2_results,
outputs
};
}
std::vector<torch::Tensor> bwd_cuda(
int heads,
torch::Tensor const& output_grads,
torch::Tensor const& matmul2_results,
torch::Tensor const& dropout_results,
torch::Tensor const& softmax_results,
torch::Tensor const& input_lin_q_results,
torch::Tensor const& input_lin_kv_results,
torch::Tensor const& inputs_q,
torch::Tensor const& inputs_kv,
torch::Tensor const& input_weights_q,
torch::Tensor const& input_weights_kv,
torch::Tensor const& output_weights,
torch::Tensor const& dropout_mask,
float dropout_prob
)
{
const int embed_dim = inputs_q.size(2);
const int sequences = inputs_q.size(1);
const int q_seq_len = inputs_q.size(0);
const int k_seq_len = inputs_kv.size(0);
const int batches_q = sequences * q_seq_len;
const int batches_kv = sequences * k_seq_len;
const int head_dim = embed_dim / heads;
const int output_lin_q_dim = embed_dim;
const int output_lin_kv_dim = 2 * embed_dim;
const int attn_batches = heads * sequences;
const int lead_dim_q = attn_batches * head_dim;
const int lead_dim_kv = attn_batches * 2 *head_dim;
const int batch_stride_q = head_dim;
const int batch_stride_kv = 2 * head_dim;
const int dropout_elems = attn_batches * q_seq_len * k_seq_len;
const float alpha = 1.0;
const float beta = 0.0;
const float scale = 1.0 / sqrt(static_cast<float>(head_dim));
// TODO: Streams can be used in Backprop but I haven't added more than one
// in my first attempt to create the code
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
cublasSetStream(handle, stream);
// Output Tensor Allocations
torch::Tensor input_q_grads = torch::empty_like(inputs_q);
torch::Tensor input_kv_grads = torch::empty_like(inputs_kv);
torch::Tensor input_weight_q_grads = torch::empty_like(input_weights_q);
torch::Tensor input_weight_kv_grads = torch::empty_like(input_weights_kv);
torch::Tensor output_weight_grads = torch::empty_like(output_weights);
// Intermediate Tensor Allocations
at::Tensor output_lin_grads = torch::empty_like(matmul2_results);
at::Tensor matmul2_grads = torch::empty_like(dropout_results);
at::Tensor input_lin_q_output_grads = torch::empty_like(input_lin_q_results);
at::Tensor input_lin_kv_output_grads = torch::empty_like(input_lin_kv_results);
auto q_lin_results_ptr = static_cast<half*>(input_lin_q_results.data_ptr());
auto k_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr());
auto v_lin_results_ptr = static_cast<half*>(input_lin_kv_results.data_ptr()) + head_dim;
auto q_lin_grads_ptr = static_cast<half*>(input_lin_q_output_grads.data_ptr());
auto k_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr());
auto v_lin_grads_ptr = static_cast<half*>(input_lin_kv_output_grads.data_ptr()) + head_dim;
char a_layout_n{'n'};
char a_layout_t{'t'};
char b_layout_n{'n'};
char b_layout_t{'t'};
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
// Output Linear Dgrad
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
embed_dim,
batches_q,
embed_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(output_weights.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(output_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(&beta),
static_cast<void*>(output_lin_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Output Linear Wgrad
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
embed_dim,
embed_dim,
batches_q,
static_cast<const void*>(&alpha),
static_cast<const void*>(matmul2_results.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(output_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(&beta),
static_cast<void*>(output_weight_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// MatMul2 Dgrad1
gemm_switch_fp32accum( state,
a_layout_t,
b_layout_n,
k_seq_len,
q_seq_len,
head_dim,
alpha,
static_cast<const half*>(v_lin_results_ptr),
lead_dim_kv,
batch_stride_kv,
static_cast<const half*>(output_lin_grads.data_ptr()),
head_dim*attn_batches,
head_dim,
beta,
static_cast<half*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
attn_batches);
// Matmul2 Dgrad2
gemm_switch_fp32accum( state,
a_layout_n,
b_layout_t,
head_dim,
k_seq_len,
q_seq_len,
alpha,
static_cast<const half*>(output_lin_grads.data_ptr()),
head_dim*attn_batches,
head_dim,
static_cast<const half*>(dropout_results.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
beta,
v_lin_grads_ptr,
lead_dim_kv,
batch_stride_kv,
attn_batches);
// Apply Dropout Mask and Scale by Dropout Probability
apex_masked_scale_cuda<at::Half,float,uint32_t>(
static_cast<at::Half const*>(matmul2_grads.data_ptr()),
static_cast<at::Half*>(matmul2_grads.data_ptr()),
static_cast<uint8_t const*>(dropout_mask.data_ptr()),
dropout_elems,
(1.0 / (1.0 - dropout_prob)));
// Softmax Grad
bool softmax_success = false;
softmax_success = dispatch_softmax_backward<half, half, float>(
static_cast<half*>(matmul2_grads.data_ptr()),
static_cast<half*>(matmul2_grads.data_ptr()),
reinterpret_cast<half const*>(softmax_results.data_ptr()),
k_seq_len,
k_seq_len,
attn_batches*q_seq_len);
assert(softmax_success);
// Matmul1 Dgrad1
gemm_switch_fp32accum( state,
a_layout_n,
b_layout_n,
head_dim,
q_seq_len,
k_seq_len,
scale,
k_lin_results_ptr,
lead_dim_kv,
batch_stride_kv,
static_cast<half*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
beta,
q_lin_grads_ptr,
lead_dim_q,
batch_stride_q,
attn_batches);
// Matmul1 Dgrad2
gemm_switch_fp32accum( state,
a_layout_n,
b_layout_t,
head_dim,
k_seq_len,
q_seq_len,
scale,
q_lin_results_ptr,
lead_dim_q,
batch_stride_q,
static_cast<half*>(matmul2_grads.data_ptr()),
k_seq_len,
k_seq_len*q_seq_len,
beta,
k_lin_grads_ptr,
lead_dim_kv,
batch_stride_kv,
attn_batches);
// Input Linear Q Dgrad
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
embed_dim,
batches_q,
output_lin_q_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(input_weights_q.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(q_lin_grads_ptr),
CUDA_R_16F,
output_lin_q_dim,
static_cast<const void*>(&beta),
static_cast<void*>(input_q_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
CUDA_R_32F,
//CUBLAS_GEMM_ALGO10_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear Q Wgrad
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
embed_dim,
output_lin_q_dim,
batches_q,
static_cast<const void*>(&alpha),
static_cast<const void*>(inputs_q.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(q_lin_grads_ptr),
CUDA_R_16F,
output_lin_q_dim,
static_cast<const void*>(&beta),
static_cast<void*>(input_weight_q_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear KV Dgrad
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
embed_dim,
batches_kv,
output_lin_kv_dim,
static_cast<const void*>(&alpha),
static_cast<const void*>(input_weights_kv.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(k_lin_grads_ptr),
CUDA_R_16F,
output_lin_kv_dim,
static_cast<const void*>(&beta),
static_cast<void*>(input_kv_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
CUDA_R_32F,
//CUBLAS_GEMM_ALGO10_TENSOR_OP));
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
// Input Linear KV Wgrad
THCublasCheck(cublasGemmEx(handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
embed_dim,
output_lin_kv_dim,
batches_kv,
static_cast<const void*>(&alpha),
static_cast<const void*>(inputs_kv.data_ptr()),
CUDA_R_16F,
embed_dim,
static_cast<const void*>(k_lin_grads_ptr),
CUDA_R_16F,
output_lin_kv_dim,
static_cast<const void*>(&beta),
static_cast<void*>(input_weight_kv_grads.data_ptr()),
CUDA_R_16F,
embed_dim,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
return {
input_q_grads,
input_kv_grads,
input_weight_q_grads,
input_weight_kv_grads,
output_weight_grads
};
}
} // end namespace cublas_gemmex
} // end namespace encdec
} // end namespace multihead_attn
|
the_stack
|
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/image_augmentation.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/variable.hpp>
#include <curand_kernel.h>
namespace nbla {
__global__ void kernel_prepare_curand(const int num, curandStateXORWOW_t *state,
const int seed) {
NBLA_CUDA_KERNEL_LOOP(idx, num) { curand_init(seed, idx, 0, &state[idx]); }
}
template <typename T>
__global__ void IAKernel(const T *x, const int w_in, const int h_in,
const float x0_in, const float y0_in, T *y,
const int w_out, const int h_out, const float x_ax,
const float y_ax, const float x_ay, const float y_ay,
const float distortion, const float brightness,
const float contrast, const float contrast_center,
curandStateXORWOW_t *state, const float noise) {
const int x_out = blockDim.x * blockIdx.x + threadIdx.x;
const int y_out = blockDim.y * blockIdx.y + threadIdx.y;
if (x_out < w_out && y_out < h_out) {
const int out_offset = w_out * y_out + x_out;
const float w_out_half = w_out * 0.5f;
const float h_out_half = h_out * 0.5f;
float dist_x = (x_out - w_out_half) / w_out_half;
float dist_y = (y_out - h_out_half) / h_out_half;
const float r = sqrt(dist_x * dist_x + dist_y * dist_y);
const float r2 = r * r;
const float dist_scale = 1.0f / (1.0f + distortion);
dist_x = (dist_x + dist_x * distortion * r2) * w_out_half * dist_scale +
w_out_half;
dist_y = (dist_y + dist_y * distortion * r2) * h_out_half * dist_scale +
h_out_half;
float x_in = x0_in + dist_x * x_ax + dist_y * y_ax;
float y_in = y0_in + dist_x * x_ay + dist_y * y_ay;
if (x_in < 0) {
x_in = 0.0;
} else if (x_in > w_in - 1) {
x_in = w_in - 1;
}
if (y_in < 0) {
y_in = 0.0;
} else if (y_in > h_in - 1) {
y_in = h_in - 1;
}
// Prepare linear interpolation
const int intx = (int)x_in;
const int inty = (int)y_in;
const float fmodx = x_in - intx;
const float fmody = y_in - inty;
const int intx_plus1 = intx < w_in - 1 ? intx + 1 : intx;
const int inty_plus1 = inty < h_in - 1 ? inty + 1 : inty;
// Top left
const int pos0 = intx + inty * w_in;
const T pos0_gain = (1 - fmodx) * (1 - fmody);
// Top right
const int pos1 = intx_plus1 + inty * w_in;
const T pos1_gain = fmodx * (1 - fmody);
// Bottom left
const int pos2 = intx + inty_plus1 * w_in;
const T pos2_gain = (1 - fmodx) * fmody;
// Bottom right
const int pos3 = intx_plus1 + inty_plus1 * w_in;
const T pos3_gain = fmodx * fmody;
// Linear interpolation
T result = x[pos0] * pos0_gain + x[pos1] * pos1_gain + x[pos2] * pos2_gain +
x[pos3] * pos3_gain;
result = (result + brightness) * contrast + contrast_center;
if (state) {
result += curand_normal(&state[out_offset]) * noise;
}
y[out_offset] = result;
}
}
template <typename T>
void ImageAugmentationCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
ImageAugmentation<T>::setup_impl(inputs, outputs);
Shape_t shape_in = inputs[0]->shape();
const int w_in = shape_in[shape_in.size() - 1];
const int h_in = shape_in[shape_in.size() - 2];
const int num_ch = shape_in.size() >= 3 ? shape_in[shape_in.size() - 3] : 1;
const int num_image = inputs[0]->size() / (w_in * h_in * num_ch);
Shape_t shape_out = outputs[0]->shape();
const int w_out = shape_out[shape_out.size() - 1];
const int h_out = shape_out[shape_out.size() - 2];
int curand_state_len = 0;
if (this->noise_ > 0.0) {
const int data_size = w_out * h_out;
if (data_size > curand_state_len) {
curand_state_len = data_size;
}
}
if (curand_state_len) {
int curand_state_size =
(sizeof(curandStateXORWOW_t) - 1) / sizeof(T) + sizeof(int);
// prepare curand state
Shape_t state_shape;
state_shape.push_back(curand_state_len * curand_state_size);
curand_state_.reshape(state_shape, true);
int *state = curand_state_.cast_data_and_get_pointer<int>(this->ctx_, true);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_prepare_curand, curand_state_len,
(curandStateXORWOW_t *)state, this->seed_);
}
output_data_for_recomp_.reshape(outputs[0]->shape(), true);
}
template <typename T>
void ImageAugmentationCuda<T>::setup_recompute_impl(const Variables &inputs,
const Variables &outputs) {
save_output_data_ = true;
}
template <typename T>
void ImageAugmentationCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
Shape_t shape_in = inputs[0]->shape();
const int w_in = shape_in[shape_in.size() - 1];
const int h_in = shape_in[shape_in.size() - 2];
const int w_in_pad = w_in + this->pad_[1] * 2;
const int h_in_pad = h_in + this->pad_[0] * 2;
const int num_ch = shape_in.size() >= 3 ? shape_in[shape_in.size() - 3] : 1;
const int num_image = inputs[0]->size() / (w_in * h_in * num_ch);
// std::cout << "shape_in : w=" << w_in << ", h=" << h_in << ", ch=" << num_ch
// << ", num=" << num_image << "\n";
Shape_t shape_out = outputs[0]->shape();
const int w_out = shape_out[shape_out.size() - 1];
const int h_out = shape_out[shape_out.size() - 2];
// std::cout << "shape_out : w=" << w_out << ", h=" << h_out << "\n";
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true);
const int ch_size_in = h_in * w_in;
const int ch_size_out = h_out * w_out;
vector<float> channel_brightness(num_ch);
vector<float> channel_contrast(num_ch);
int *state =
this->noise_ > 0.0
? curand_state_.cast_data_and_get_pointer<int>(this->ctx_, false)
: nullptr;
for (int iim = 0; iim < num_image; ++iim) {
// Define augmentation settings
// std::cout << "* image " << iim << "\n";
const int im_offset_in = iim * w_in * h_in * num_ch;
const Tc *x_im = x + im_offset_in;
int im_offset_out = iim * w_out * h_out * num_ch;
Tc *y_im = y + im_offset_out;
// std::cout << "offset : in=" << im_offset_in << ", out=" << im_offset_out
// << "\n";
const float scale =
this->min_scale_ *
std::exp(
(this->rgen_() % 1001) * 0.001f *
std::log(this->max_scale_ /
this->min_scale_)); // [this->min_scale_, this->max_scale_]
const float scale_x = std::exp(-std::log(this->aspect_ratio_) * 0.5 +
(this->rgen_() % 1001) * 0.001f *
std::log(this->aspect_ratio_));
const float scale_y = 1.0 / scale_x;
const float i_scale_x = 1.0f / (scale * scale_x);
const float i_scale_y = 1.0f / (scale * scale_y);
// std::cout << "scale : min=" << min_scale_ << ", max=" << max_scale_ << ",
// v=" << scale << ", inv=" << i_scale << "\n";
const float angle = -this->angle_ +
((this->rgen_() % 1001) * 0.001f) * this->angle_ *
2; // [-angle_, angle_]
// std::cout << "angle : " << angle << "\n";
// Preparation
const float w_scaled = w_in_pad * scale * scale_x;
const float h_scaled = h_in_pad * scale * scale_y;
// std::cout << "shape_scaled : w=" << w_scaled << ", h=" << h_scaled <<
// "\n";
const float cx = (w_out - 1) * 0.5f;
const float cy = (h_out - 1) * 0.5f;
// std::cout << "center : x=" << cx << ", y=" << cy << "\n";
const float cx_scaled =
((this->rgen_() % 1001) * 0.001f) * (w_scaled - w_out) + cx;
const float cy_scaled =
((this->rgen_() % 1001) * 0.001f) * (h_scaled - h_out) + cy;
// std::cout << "center_scaled : x=" << cx_scaled << ", y=" << cy_scaled <<
// "\n";
const bool flip_lr = this->flip_lr_ & (this->rgen_() % 2);
const bool flip_ud = this->flip_ud_ & (this->rgen_() % 2);
const float global_brightness =
((this->rgen_() % 1001) * 0.001f * this->brightness_ * 2.0f) -
this->brightness_;
// std::cout << "global_brightness : " << global_brightness << "\n";
const float global_contrast = std::exp((this->rgen_() % 1001) * 0.001f *
std::log(this->contrast_) * 2.0f) /
this->contrast_;
// std::cout << "global_contrast : " << global_contrast << "\n";
for (int ic = 0; ic < num_ch; ++ic) {
const float ch_brightness =
this->brightness_each_
? ((this->rgen_() % 1001) * 0.001f * this->brightness_ * 2.0f) -
this->brightness_
: global_brightness;
channel_brightness[ic] = ch_brightness - this->contrast_center_;
// std::cout << "channel_brightness - 0.5 : " << channel_brightness[ic] <<
// "\n";
const float ch_contrast =
this->contrast_each_
? std::exp((this->rgen_() % 1001) * 0.001f *
std::log(this->contrast_) * 2.0f) /
this->contrast_
: global_contrast;
channel_contrast[ic] = ch_contrast;
// std::cout << "channel_contrast : " << channel_contrast[ic] << "\n";
}
const float distortion =
std::exp(((this->rgen_() % 1001) * 0.001f * 2.0f * this->distortion_) -
this->distortion_) -
1.0f;
// std::cout << "distortion : " << distortion << "\n";
const float noise = (this->rgen_() % 1001) * 0.001f * this->noise_;
// std::cout << "noise : " << noise << "\n";
// Pixel loop
const float cos_theta = std::cos(angle);
const float sin_theta = std::sin(angle);
const float x_ax = (flip_lr ? -cos_theta : cos_theta) * i_scale_x;
const float y_ax = (flip_lr ? sin_theta : -sin_theta) * i_scale_y;
const float x_ay = (flip_ud ? -sin_theta : sin_theta) * i_scale_x;
const float y_ay = (flip_ud ? -cos_theta : cos_theta) * i_scale_y;
float x0_in =
(cx_scaled * i_scale_x) - (x_ax * cx + y_ax * cy) - this->pad_[1];
float y0_in =
(cy_scaled * i_scale_y) - (x_ay * cx + y_ay * cy) - this->pad_[0];
dim3 threads(32, 16);
dim3 blocks((w_out - 1) / threads.x + 1, (h_out - 1) / threads.y + 1);
for (int ic = 0; ic < num_ch; ++ic) {
IAKernel<<<blocks, threads>>>(
x_im + ch_size_in * ic, w_in, h_in, x0_in, y0_in,
y_im + ch_size_out * ic, w_out, h_out, x_ax, y_ax, x_ay, y_ay,
distortion, channel_brightness[ic], channel_contrast[ic],
this->contrast_center_, (curandStateXORWOW_t *)state, noise);
NBLA_CUDA_KERNEL_CHECK();
}
}
// Save output data for recomputation.
if (save_output_data_) {
save_output_data<Tc>(this->ctx_, outputs[0], output_data_for_recomp_);
}
}
template <typename T>
void ImageAugmentationCuda<T>::recompute_impl(const Variables &inputs,
const Variables &outputs) {
// Restore output data of previous forward execution.
restore_output_data<Tc>(this->ctx_, output_data_for_recomp_, outputs[0]);
save_output_data_ = false;
}
template <typename T>
void ImageAugmentationCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
// Not supported
}
}
|
the_stack
|
namespace amgx
{
struct Not_empty_row
{
typedef bool result_type;
template< typename Tuple >
inline __device__ __host__ bool operator()( const Tuple &t ) const
{
return thrust::get<0>(t) != thrust::get<1>(t);
}
};
template< typename Matrix, typename Vector >
static
void build_sort_permutation( const Matrix &M, Vector &permutation )
{
int num_nz = M.get_num_nz();
Vector row_indices( num_nz, 0 );
thrust::scatter_if( thrust::counting_iterator<int>(0),
thrust::counting_iterator<int>(M.row_offsets.size() - 1),
M.row_offsets.begin(),
thrust::make_transform_iterator(
thrust::make_zip_iterator( thrust::make_tuple( M.row_offsets.begin(), M.row_offsets.begin() + 1 ) ),
Not_empty_row()),
row_indices.begin());
cudaCheckError();
thrust::inclusive_scan( row_indices.begin(), row_indices.begin() + M.get_num_nz(), row_indices.begin(), thrust::maximum<int>() );
cudaCheckError();
permutation.resize( num_nz );
thrust::sequence( permutation.begin(), permutation.end() );
cudaCheckError();
Vector tmp( M.col_indices );
thrust::stable_sort_by_key( tmp.begin(), tmp.end(), permutation.begin() );
cudaCheckError();
tmp = row_indices;
thrust::gather( permutation.begin(), permutation.end(), tmp.begin(), row_indices.begin() );
cudaCheckError();
thrust::stable_sort_by_key( row_indices.begin(), row_indices.end(), permutation.begin() );
cudaCheckError();
}
template< typename Value_type >
static __device__ __inline__
bool equal( Value_type x, Value_type y, Value_type epsilon, Value_type max_abs_error )
{
if ( x == y )
{
return true;
}
if ( abs(x - y) < max_abs_error )
{
return true;
}
if ( abs(x - y) <= epsilon )
{
return true;
}
return false;
}
static __device__ __inline__
bool equal( float x, float y )
{
return equal( x, y, 1.0e-6f, 1.0e10f * FLT_MIN );
}
static __device__ __inline__
bool equal( double x, double y )
{
return equal( x, y, 1.0e-12, 1.0e10 * DBL_MIN );
}
template< typename Value_type >
__global__
void compare_matrices_kernel( const int num_rows,
const int block_size_sq,
const int has_diag,
const int *A_rows,
const int *A_cols,
const int *A_diag,
const Value_type *A_vals,
const int *A_perm,
const int *B_rows,
const int *B_cols,
const int *B_diag,
const Value_type *B_vals,
const int *B_perm,
int *ok )
{
int row_id = blockIdx.x * blockDim.x + threadIdx.x;
for ( ; row_id < num_rows ; row_id += gridDim.x * blockDim.x )
{
if ( has_diag )
{
int a_it = A_diag[row_id];
int b_it = B_diag[row_id];
Value_type a_val = A_vals[a_it];
Value_type b_val = A_vals[b_it];
if ( !equal( a_val, b_val ) )
{
printf( "ERROR: row=%d: Diag A[%d]=%f and B[%d]=%f are different!!!\n", row_id, a_it, a_val, b_it, b_val );
ok[0] = 0;
return;
}
}
int row_it = A_rows[row_id + 0];
int row_end = A_rows[row_id + 1];
if ( row_it != B_rows[row_id + 0] || row_end != B_rows[row_id + 1] )
{
printf( "ERROR: Rows A[%d] and B[%d] have different lenghts!!!\n", row_id, row_id );
ok[0] = 0;
return;
}
for ( ; row_it < row_end ; ++row_it )
{
const int a_it = A_perm[row_it];
const int b_it = B_perm[row_it];
const int a_col = A_cols[a_it];
const int b_col = B_cols[b_it];
if ( a_col != b_col )
{
printf( "ERROR: row=%d row_it=%d: Cols A[%d]=%d and B[%d]=%d are different!!!\n", row_id, row_it, a_it, a_col, b_it, b_col );
ok[0] = 0;
return;
}
for ( int k = 0 ; k < block_size_sq ; ++k )
{
Value_type a_val = A_vals[block_size_sq * a_it + k];
Value_type b_val = B_vals[block_size_sq * b_it + k];
if ( !equal( a_val, b_val ) )
{
printf( "ERROR: row=%d row_it=%d: Vals A[%d]=%f and B[%d]=%f are different!!!\n", row_id, row_it, a_it, a_val, b_it, b_val );
ok[0] = 0;
return;
}
}
}
}
}
// parameter is used as test name
DECLARE_UNITTEST_BEGIN(AggregatesCoarseGeneratorTest);
// setup restriction on HOST
void fillRowOffsetsAndColIndices(const int num_aggregates,
typename Matrix_h::IVector aggregates,
const int R_num_cols,
typename Matrix_h::IVector &R_row_offsets,
typename Matrix_h::IVector &R_col_indices)
{
for (int i = 0; i < num_aggregates + 1; i++)
{
R_row_offsets[i] = 0;
}
// Count number of neighbors for each row
for (int i = 0; i < R_num_cols; i++)
{
int I = aggregates[i];
R_row_offsets[I]++;
}
R_row_offsets[num_aggregates] = R_num_cols;
for (int i = num_aggregates - 1; i >= 0; i--)
{
R_row_offsets[i] = R_row_offsets[i + 1] - R_row_offsets[i];
}
/* Set column indices. */
for (int i = 0; i < R_num_cols; i++)
{
int I = aggregates[i];
int Ip = R_row_offsets[I]++;
R_col_indices[Ip] = i;
}
/* Reset r[i] to start of row memory. */
for (int i = num_aggregates - 1; i > 0; i--)
{
R_row_offsets[i] = R_row_offsets[i - 1];
}
R_row_offsets[0] = 0;
}
template< typename Matrix_type >
void compare_matrices( const Matrix_type &A, const Matrix_type &B )
{
typedef typename Matrix_type::IVector IVector;
IVector A_perm;
build_sort_permutation( A, A_perm );
IVector B_perm;
build_sort_permutation( B, B_perm );
IVector ok( 1, 1 );
compare_matrices_kernel <<< 2048, 256>>>( A.get_num_rows(),
A.get_block_dimx() * A.get_block_dimy(),
A.hasProps(DIAG),
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
A_perm.raw(),
B.row_offsets.raw(),
B.col_indices.raw(),
B.diag.raw(),
B.values.raw(),
B_perm.raw(),
ok.raw() );
cudaCheckError();
UNITTEST_ASSERT_TRUE( ok[0] == 1 );
}
void run()
{
cudaCheckError();
this->randomize( 375139 );
AMG_Config cfg;
const std::string &cfg_scope = "default";
// setup generators
cudaCheckError();
aggregation::CoarseAGenerator<TConfig> *d_generator = new aggregation::LowDegCoarseAGenerator<TConfig>(cfg, cfg_scope);
this->PrintOnFail("Device generator creation");
UNITTEST_ASSERT_TRUE(d_generator != NULL);
cudaCheckError();
aggregation::CoarseAGenerator<TConfig_h> *h_generator = new aggregation::LowDegCoarseAGenerator<TConfig_h>(cfg, cfg_scope);
this->PrintOnFail("Host generator creation");
UNITTEST_ASSERT_TRUE(h_generator != NULL);
cudaCheckError();
MatrixA A, Ac;
int bsizes[] = {1, 2, 3, 4, 5, 8, 10};
for (int diag_prop = 0; diag_prop < 2; diag_prop++)
{
for (int bs = 0; bs < sizeof(bsizes)/sizeof(bsizes[0]); bs++)
{
int b = bsizes[bs];
// setup matrix A
cudaCheckError();
generateMatrixRandomStruct<TConfig>::generate(A, 128, diag_prop, b, false);
cudaCheckError();
random_fill(A);
cudaCheckError();
Matrix_h h_A = A;
cudaCheckError();
Matrix_h h_Ac;
cudaCheckError();
// setup aggregates on CPU
int num_aggregates = (A.get_num_rows() - 1) / 2 + 1; //A.get_num_rows();
typename Matrix_h::IVector h_aggregates;
h_aggregates.resize( A.get_num_rows() );
for ( int i = 0; i < h_aggregates.size(); i++ )
{
h_aggregates[i] = i / 2; //i;
}
// setup R matrix on CPU
typename Matrix_h::IVector h_R_row_offsets;
typename Matrix_h::IVector h_R_col_indices;
h_R_row_offsets.resize( num_aggregates + 1 );
h_R_col_indices.resize( A.get_num_rows() );
fillRowOffsetsAndColIndices( num_aggregates, h_aggregates, A.get_num_rows(), h_R_row_offsets, h_R_col_indices );
// assign GPU vectors
IVector aggregates = h_aggregates;
IVector R_row_offsets = h_R_row_offsets;
IVector R_col_indices = h_R_col_indices;
// compute Galerkin product on CPU and GPU
h_generator->computeAOperator(h_A, h_Ac, h_aggregates, h_R_row_offsets, h_R_col_indices, num_aggregates);
d_generator->computeAOperator(A, Ac, aggregates, R_row_offsets, R_col_indices, num_aggregates);
// simple check on matrix size
this->PrintOnFail("Coarse matrix has wrong size %i != num aggregates %i", Ac.get_num_rows(), num_aggregates);
UNITTEST_ASSERT_TRUE( Ac.get_num_rows() == num_aggregates );
// dump matrix to file
VVector v;
v.resize(Ac.get_num_rows() * Ac.get_block_dimy());
random_fill(v);
// compare structure
this->PrintOnFail("Coarse matrix has incorrect structure, diag prop %i, block size %i, num rows %i, num aggregates %i", diag_prop, b, A.get_num_rows(), num_aggregates);
compare_matrices(Ac, MatrixA(h_Ac) /*, b==1 && !diag_prop */ );
}
}
delete d_generator;
delete h_generator;
}
DECLARE_UNITTEST_END(AggregatesCoarseGeneratorTest);
// run for all device configs
#define AMGX_CASE_LINE(CASE) AggregatesCoarseGeneratorTest<TemplateMode<CASE>::Type> AggregatesCoarseGeneratorTest_##CASE;
AMGX_FORALL_BUILDS_DEVICE(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
//AggregatesCoarseGeneratorTest<TemplateMode<AMGX_mode_dDDI>::Type> AggregatesCoarseGeneratorTest_dDDI;
} //namespace amgx
|
the_stack
|
#pragma once
#include <gunrock/util/multithreading.cuh>
#include <gunrock/util/multithread_utils.cuh>
#include <gunrock/util/kernel_runtime_stats.cuh>
#include <gunrock/util/test_utils.cuh>
#include <gunrock/util/scan/block_scan.cuh>
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/advance/kernel_policy.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <gunrock/oprtr/filter/kernel_policy.cuh>
#include <gunrock/app/enactor_base.cuh>
#include <gunrock/app/cc/cc_problem.cuh>
#include <gunrock/app/cc/cc_functor.cuh>
namespace gunrock {
namespace app {
namespace cc {
template <typename VertexId, typename SizeT>
__global__ void Expand_Incoming_Kernel(
const int thread_num, const SizeT num_elements,
const VertexId *const keys_in, const VertexId *const vertex_associate_in,
VertexId *vertex_associate_org) {
SizeT x = (SizeT)blockIdx.x * blockDim.x + threadIdx.x;
const SizeT STRIDE = (SizeT)blockDim.x * gridDim.x;
while (x < num_elements) {
VertexId key = _ldg(keys_in + x);
VertexId new_pred = _ldg(vertex_associate_in + x);
VertexId old_pred = _ldg(vertex_associate_org + key);
if (new_pred != old_pred) {
if (new_pred < old_pred)
vertex_associate_org[old_pred] = new_pred;
else if (old_pred < vertex_associate_org[new_pred])
vertex_associate_org[new_pred] = old_pred;
}
// atomicMin(vertex_associate_org + old_pred, new_pred);
// atomicMin(vertex_associate_org + new_pred, old_pred);
// atomicMin(vertex_associate_org + key, new_pred);
// if (TO_TRACK)
//{
// if (to_track(key) || to_track(old_pred))
// printf("%d\t Expand_Incoming_Kernel : %d -> %d -> %d, in_pos =
// %d\n",
// thread_num, key, old_pred, new_pred, x);
//}
// if (new_pred < old_pred)
//{
// vertex_associate_org[key] = new_pred;
// vertex_associate_org[old_pred] = new_pred;
// atomicMin(vertex_associate_org + old_pred, new_pred);
//}
x += STRIDE;
}
}
template <typename VertexId, typename SizeT>
__global__ void First_Expand_Incoming_Kernel(
const int thread_num, const int num_gpus, const SizeT nodes,
VertexId **component_id_ins, VertexId *component_ids, VertexId *old_c_ids) {
SizeT x = (SizeT)blockIdx.x * blockDim.x + threadIdx.x;
const SizeT STRIDE = (SizeT)blockDim.x * gridDim.x;
__shared__ VertexId *s_component_id_ins[8];
if (threadIdx.x < num_gpus)
s_component_id_ins[threadIdx.x] = component_id_ins[threadIdx.x];
__syncthreads();
VertexId pred_ins[8], min_pred;
while (x < nodes) {
pred_ins[0] = /*old_c_ids[x];*/ component_ids[x];
min_pred = pred_ins[0];
for (int gpu = 1; gpu < num_gpus; gpu++) {
pred_ins[gpu] = s_component_id_ins[gpu][x];
if (pred_ins[gpu] < min_pred) min_pred = pred_ins[gpu];
}
// if (min_pred < component_ids[x]) component_ids[x] = min_pred;
for (int gpu = 0; gpu < num_gpus; gpu++)
if (min_pred < component_ids[pred_ins[gpu]])
component_ids[pred_ins[gpu]] = min_pred;
// atomicMin(component_ids + pred_ins[gpu], min_pred);
old_c_ids[x] = pred_ins[0];
x += STRIDE;
}
}
template <typename KernelPolicy, typename VertexId, typename SizeT>
__global__ void Make_Output_Kernel(int thread_num, const SizeT num_vertices,
VertexId *old_component_ids,
VertexId *component_ids,
SizeT *output_length, VertexId *keys_out,
VertexId *component_out) {
SizeT x = (SizeT)blockIdx.x * blockDim.x + threadIdx.x;
const SizeT STRIDE = (SizeT)blockDim.x * gridDim.x;
typedef util::Block_Scan<SizeT, KernelPolicy::CUDA_ARCH,
KernelPolicy::LOG_THREADS>
BlockScanT;
__shared__ typename BlockScanT::Temp_Space scan_space;
__shared__ SizeT block_offset;
while (x - threadIdx.x < num_vertices) {
bool to_process = true;
VertexId old_cid = 0, new_cid = 0, min_cid = 0;
if (x < num_vertices) {
old_cid = _ldg(old_component_ids + x);
new_cid = _ldg(component_ids + x);
min_cid = min(new_cid, old_cid);
if (old_cid == min_cid)
to_process = false;
else {
old_component_ids[x] = min_cid;
VertexId old_grandparent = _ldg(component_ids + old_cid);
if (min_cid != old_grandparent) {
// printf("%d\t Make_Output : not updated, old_cid = %d, min_cid = %d,
// old_grandparent = %d\n",
// thread_num, old_cid, min_cid, old_grandparent);
if (min_cid < old_grandparent) {
component_ids[old_cid] = min_cid;
old_component_ids[old_cid] = util::InvalidValue<VertexId>();
} else {
component_ids[min_cid] = old_grandparent;
old_component_ids[min_cid] = util::InvalidValue<VertexId>();
}
}
}
} else
to_process = false;
SizeT output_pos = 0;
BlockScanT::LogicScan(to_process, output_pos, scan_space);
if (threadIdx.x == blockDim.x - 1) {
if (output_pos != 0 || to_process)
block_offset =
atomicAdd(output_length, output_pos + ((to_process) ? 1 : 0));
}
__syncthreads();
if (to_process) {
output_pos += block_offset - 1;
keys_out[output_pos] = x;
component_out[output_pos] = min_cid;
// if (TO_TRACK)
//{
// if (to_track(x) || to_track(old_cid) || to_track(new_cid))
// printf("%d\t Make_Output : %d, cid = %d -> %d -> %d, pos =
// %d\n",
// thread_num, x, old_cid, new_cid, component_ids[new_cid],
// output_pos);
//}
}
x += STRIDE;
}
}
/*
* @brief Iteration structure derived from IterationBase.
*
* @tparam AdvanceKernelPolicy Kernel policy for advance operator.
* @tparam FilterKernelPolicy Kernel policy for filter operator.
* @tparam Enactor Enactor we process on.
*/
template <typename AdvanceKernelPolicy, typename FilterKernelPolicy,
typename Enactor>
struct CCIteration
: public IterationBase<AdvanceKernelPolicy, FilterKernelPolicy, Enactor,
false, // HAS_SUBQ
true, // HAS_FULLQ
true, // BACKWARD
true, // FORWARD
false> // UPDATE_PREDECESSORS
{
public:
typedef typename Enactor::SizeT SizeT;
typedef typename Enactor::Value Value;
typedef typename Enactor::VertexId VertexId;
typedef typename Enactor::Problem Problem;
typedef typename Problem::DataSlice DataSlice;
typedef GraphSlice<VertexId, SizeT, Value> GraphSliceT;
typedef typename util::DoubleBuffer<VertexId, SizeT, Value> Frontier;
typedef IterationBase<AdvanceKernelPolicy, FilterKernelPolicy, Enactor, false,
true, true, true, false>
BaseIteration;
typedef UpdateMaskFunctor<VertexId, SizeT, Value, Problem> UpdateMaskFunctor;
typedef HookMinFunctor<VertexId, SizeT, Value, Problem> HookMinFunctor;
typedef HookMaxFunctor<VertexId, SizeT, Value, Problem> HookMaxFunctor;
typedef PtrJumpFunctor<VertexId, SizeT, Value, Problem> PtrJumpFunctor;
typedef PtrJumpMaskFunctor<VertexId, SizeT, Value, Problem>
PtrJumpMaskFunctor;
typedef PtrJumpUnmaskFunctor<VertexId, SizeT, Value, Problem>
PtrJumpUnmaskFunctor;
typedef HookInitFunctor<VertexId, SizeT, Value, Problem> HookInitFunctor;
/*
* @brief FullQueue_Gather function.
*
* @param[in] thread_num Number of threads.
* @param[in] peer_ Peer GPU index.
* @param[in] frontier_queue Pointer to the frontier queue.
* @param[in] partitioned_scanned_edges Pointer to the scanned edges.
* @param[in] frontier_attribute Pointer to the frontier attribute.
* @param[in] enactor_stats Pointer to the enactor statistics.
* @param[in] data_slice Pointer to the data slice we process on.
* @param[in] d_data_slice Pointer to the data slice on the device.
* @param[in] graph_slice Pointer to the graph slice we process on.
* @param[in] work_progress Pointer to the work progress class.
* @param[in] context CudaContext for ModernGPU API.
* @param[in] stream CUDA stream.
*/
static void FullQueue_Gather(
Enactor *enactor, int thread_num, int peer_, Frontier *frontier_queue,
util::Array1D<SizeT, SizeT> *scanned_edges,
FrontierAttribute<SizeT> *frontier_attribute,
EnactorStats<SizeT> *enactor_stats, DataSlice *data_slice,
DataSlice *d_data_slice, GraphSliceT *graph_slice,
util::CtaWorkProgressLifetime<SizeT> *work_progress, ContextPtr context,
cudaStream_t stream) {
if (data_slice->turn == 0) {
frontier_attribute->queue_index = 0;
frontier_attribute->selector = 0;
// if (AdvanceKernelPolicy::ADVANCE_MODE ==
// gunrock::oprtr::advance::ALL_EDGES)
frontier_attribute->queue_length = graph_slice->edges;
// else
// frontier_attribute -> queue_length =
// (data_slice -> num_gpus == 1 )? graph_slice -> nodes :
// data_slice -> local_vertices.GetSize();//graph_slice->edges;
frontier_attribute->queue_reset = true;
/*bool over_sized = false;
if ((enactor -> size_check ||
(gunrock::oprtr::advance::hasPreScan<AdvanceKernelPolicy::ADVANCE_MODE>()))
&&
(!data_slice -> scanned_queue_computed))
{
printf("scaned, queue_sizing = %d\n", frontier_attribute ->
queue_length);fflush(stdout); if (enactor_stats -> retval = Check_Size<
SizeT, SizeT> ( enactor -> size_check, "scanned_edges", frontier_attribute
-> queue_length + 1, scanned_edges, over_sized, -1, -1, -1, false))
return;
if (enactor_stats -> retval =
gunrock::oprtr::advance::ComputeOutputLength <AdvanceKernelPolicy,
Problem, HookInitFunctor, gunrock::oprtr::advance::V2V>(
frontier_attribute,
graph_slice -> row_offsets.GetPointer(util::DEVICE),
graph_slice -> column_indices.GetPointer(util::DEVICE),
(SizeT*)NULL,
(VertexId*)NULL,
(data_slice -> num_gpus == 1) ? (VertexId*)NULL :
data_slice ->
local_vertices.GetPointer(util::DEVICE),//d_in_key_queue,
scanned_edges->GetPointer(util::DEVICE),
graph_slice -> nodes,
graph_slice -> edges,
context[0],
stream,
//ADVANCE_TYPE,
true,
false,
false)) return;
//frontier_attribute -> output_length.Move(
// util::DEVICE, util::HOST, 1, 0, stream);
//return retval;
data_slice -> scanned_queue_computed = true;
}
gunrock::oprtr::advance::LaunchKernel
<AdvanceKernelPolicy, Problem, HookInitFunctor,
gunrock::oprtr::advance::V2V>( enactor_stats[0], frontier_attribute[0],
(VertexId)enactor_stats -> iteration,
data_slice,
d_data_slice,
(VertexId*)NULL,
(bool* )NULL,
(bool* )NULL,
scanned_edges -> GetPointer(util::DEVICE),
data_slice -> num_gpus == 1 ? (VertexId*)NULL :
data_slice -> local_vertices.GetPointer(util::DEVICE),
(VertexId*)NULL,
(Value* )NULL,
(Value* )NULL,
graph_slice -> row_offsets .GetPointer(util::DEVICE),
graph_slice -> column_indices.GetPointer(util::DEVICE),
(SizeT* )NULL,
(VertexId*)NULL,
graph_slice -> nodes,
graph_slice -> edges,
work_progress[0],
context[0],
stream,
false,
false,
false);*/
gunrock::oprtr::filter::LaunchKernel<FilterKernelPolicy, Problem,
HookInitFunctor>(
enactor_stats[0], frontier_attribute[0],
(VertexId)enactor_stats->iteration, data_slice, d_data_slice,
(SizeT *)NULL, // vertex_markers
(unsigned char *)NULL, // visited_mask
(VertexId *)NULL, // keys_in
(VertexId *)NULL, (Value *)NULL, (Value *)NULL, graph_slice->edges,
graph_slice->nodes, work_progress[0], context[0], stream,
graph_slice->edges, util::MaxValue<SizeT>(),
enactor_stats->filter_kernel_stats,
false, // By-Pass
false); // skip_marking
if (enactor->debug &&
(enactor_stats->retval =
util::GRError("filter::Kernel Initial HookInit Operation failed",
__FILE__, __LINE__)))
return;
enactor_stats->edges_queued[0] +=
graph_slice->edges; // frontier_attribute -> queue_length;
}
data_slice->turn++;
frontier_attribute->queue_length = 1;
}
/*
* @brief FullQueue_Core function.
*
* @param[in] thread_num Number of threads.
* @param[in] peer_ Peer GPU index.
* @param[in] frontier_queue Pointer to the frontier queue.
* @param[in] partitioned_scanned_edges Pointer to the scanned edges.
* @param[in] frontier_attribute Pointer to the frontier attribute.
* @param[in] enactor_stats Pointer to the enactor statistics.
* @param[in] data_slice Pointer to the data slice we process on.
* @param[in] d_data_slice Pointer to the data slice on the device.
* @param[in] graph_slice Pointer to the graph slice we process on.
* @param[in] work_progress Pointer to the work progress class.
* @param[in] context CudaContext for ModernGPU API.
* @param[in] stream CUDA stream.
*/
static void FullQueue_Core(
Enactor *enactor, int thread_num, int peer_, Frontier *frontier_queue,
util::Array1D<SizeT, SizeT> *scanned_edges,
FrontierAttribute<SizeT> *frontier_attribute,
EnactorStats<SizeT> *enactor_stats, DataSlice *data_slice,
DataSlice *d_data_slice, GraphSliceT *graph_slice,
util::CtaWorkProgressLifetime<SizeT> *work_progress, ContextPtr context,
cudaStream_t stream) {
/*if (data_slice -> turn == 2 && data_slice -> num_gpus > 1 && (enactor ->
problem -> edges / 3 > enactor -> problem -> nodes)) { // special
expand_incoming for first data exchagne for (int peer = 1; peer < data_slice
-> num_gpus; peer++) data_slice -> vertex_associate_ins[peer] = data_slice
-> vertex_associate_in[enactor_stats -> iteration
%2][peer].GetPointer(util::DEVICE); data_slice ->
vertex_associate_ins.Move(util::HOST, util::DEVICE, data_slice -> num_gpus,
0, stream); First_Expand_Incoming_Kernel<<<240, 512, 0, stream>>>
(thread_num,
data_slice -> num_gpus,
graph_slice -> nodes,
data_slice -> vertex_associate_ins.GetPointer(util::DEVICE),
data_slice -> component_ids.GetPointer(util::DEVICE),
data_slice -> old_c_ids.GetPointer(util::DEVICE));
for (int peer = 1; peer < data_slice -> num_gpus; peer++)
{
data_slice -> keys_out[peer].ForceSetPointer(data_slice ->
temp_vertex_out, util::DEVICE); data_slice ->
vertex_associate_out[peer].ForceSetPointer(data_slice -> temp_comp_out,
util::DEVICE);
}
}*/
enactor_stats->iteration = 0;
frontier_attribute->queue_index = 0;
frontier_attribute->selector = 0;
frontier_attribute->queue_length =
/*data_slice -> turn <= 1 ?*/ graph_slice->nodes /* :
data_slice -> local_vertices.GetSize()*/
;
frontier_attribute->queue_reset = true;
// util::MemsetCopyVectorKernel <<<240, 512, 0, stream>>>(
// data_slice -> old_c_ids.GetPointer(util::DEVICE),
// data_slice -> component_ids.GetPointer(util::DEVICE),
// data_slice -> nodes);
// First Pointer Jumping Round
data_slice->vertex_flag[0] = 0;
while (!data_slice->vertex_flag[0]) {
data_slice->vertex_flag[0] = 1;
data_slice->vertex_flag.Move(util::HOST, util::DEVICE, 1, 0, stream);
gunrock::oprtr::filter::LaunchKernel<FilterKernelPolicy, Problem,
PtrJumpFunctor>(
enactor_stats[0], frontier_attribute[0],
(VertexId)enactor_stats->iteration, data_slice, d_data_slice,
(SizeT *)NULL, // vertex_markers,
(unsigned char *)NULL, // visited_mask,
/*data_slice -> turn <= 1 ?*/ (VertexId *)NULL /* :
data_slice -> local_vertices.GetPointer(util::DEVICE)*/
,
// frontier_queue -> values[frontier_attribute ->
// selector].GetPointer(util::DEVICE),
(VertexId *)NULL, (Value *)NULL, (Value *)NULL,
/*data_slice -> turn <= 1 ?*/ graph_slice->nodes /* :
data_slice -> local_vertices.GetSize()*/
, // frontier_attribute -> output_length[0],
graph_slice->nodes, work_progress[0], context[0], stream,
graph_slice->nodes, // frontier_queue -> values[frontier_attribute ->
// selector].GetSize(),
util::MaxValue<SizeT>(), enactor_stats->filter_kernel_stats,
false, // By-Pass
false); // skip_marking
if (enactor->debug &&
(enactor_stats->retval = util::GRError(
"filter::Kernel First Pointer Jumping Round failed", __FILE__,
__LINE__)))
return;
enactor_stats->nodes_queued[0] += frontier_attribute->queue_length;
frontier_attribute->queue_reset = false;
frontier_attribute->queue_index++;
enactor_stats->iteration++;
data_slice->vertex_flag.Move(util::DEVICE, util::HOST, 1, 0, stream);
if (enactor_stats->retval =
util::GRError(cudaStreamSynchronize(stream),
"cudaStreamSynchronize failed", __FILE__, __LINE__))
return;
// Check if done
if (data_slice->vertex_flag[0]) break;
}
if (data_slice->turn > 1 &&
(enactor->problem->edges / 3 > enactor->problem->nodes)) {
enactor_stats->iteration = data_slice->turn;
return;
}
util::MemsetKernel<<<240, 512, 0, stream>>>(
data_slice->marks.GetPointer(util::DEVICE), false, graph_slice->edges);
frontier_attribute->queue_index = 0; // Work queue index
frontier_attribute->selector = 0;
frontier_attribute->queue_length = graph_slice->nodes;
frontier_attribute->queue_reset = true;
gunrock::oprtr::filter::LaunchKernel<FilterKernelPolicy, Problem,
UpdateMaskFunctor>(
enactor_stats[0], frontier_attribute[0],
(VertexId)enactor_stats->iteration, data_slice, d_data_slice,
(SizeT *)NULL, // vertex_markers,
(unsigned char *)NULL, // visited_mask,
(VertexId *)NULL, // frontier_queue -> values[frontier_attribute ->
// selector].GetPointer(util::DEVICE),
(VertexId *)NULL, (Value *)NULL, (Value *)NULL,
graph_slice->nodes, // frontier_attribute -> output_length[0],
graph_slice->nodes, work_progress[0], context[0], stream,
graph_slice->nodes, // frontier_queue -> values[frontier_attribute ->
// selector].GetSize(),
util::MaxValue<SizeT>(), enactor_stats->filter_kernel_stats,
false, // By-Pass
false); // skip_marking
if (enactor->debug && (enactor_stats->retval = util::GRError(
"filter::Kernel Update Mask Operation failed",
__FILE__, __LINE__)))
return;
enactor_stats->nodes_queued[0] += frontier_attribute->queue_length;
enactor_stats->iteration = 1;
data_slice->edge_flag[0] = 0;
while (!data_slice->edge_flag[0]) {
frontier_attribute->queue_index = 0; // Work queue index
// if (AdvanceKernelPolicy::ADVANCE_MODE ==
// gunrock::oprtr::advance::ALL_EDGES)
frontier_attribute->queue_length = graph_slice->edges;
// else frontier_attribute->queue_length =
// (data_slice -> num_gpus == 1) ? graph_slice -> nodes :
// data_slice -> local_vertices.GetSize();//graph_slice->edges;
frontier_attribute->selector = 0;
frontier_attribute->queue_reset = true;
data_slice->edge_flag[0] = 1;
data_slice->edge_flag.Move(util::HOST, util::DEVICE, 1, 0, stream);
gunrock::oprtr::filter::LaunchKernel<FilterKernelPolicy, Problem,
HookMaxFunctor>(
enactor_stats[0], frontier_attribute[0],
(VertexId)enactor_stats->iteration, data_slice, d_data_slice,
(SizeT *)NULL, // vertex_markers,
(unsigned char *)NULL, // visited_mask,
(VertexId *)NULL, // keys_in
(VertexId *)NULL, (Value *)NULL, (Value *)NULL, graph_slice->edges,
graph_slice->nodes, work_progress[0], context[0], stream,
graph_slice->edges, util::MaxValue<SizeT>(),
enactor_stats->filter_kernel_stats,
false, // By-Pass
false); // skip_marking
/*gunrock::oprtr::advance::LaunchKernel
<AdvanceKernelPolicy, Problem, HookMaxFunctor,
gunrock::oprtr::advance::V2V>( enactor_stats[0], frontier_attribute[0],
(VertexId)enactor_stats -> iteration ,
data_slice,
d_data_slice,
(VertexId*)NULL,
(bool* )NULL,
(bool* )NULL,
scanned_edges -> GetPointer(util::DEVICE),
(data_slice -> num_gpus == 1) ? (VertexId*)NULL :
data_slice -> local_vertices.GetPointer(util::DEVICE),
(VertexId*)NULL,
(Value* )NULL,
(Value* )NULL,
graph_slice -> row_offsets .GetPointer(util::DEVICE),
graph_slice -> column_indices.GetPointer(util::DEVICE),
(SizeT* )NULL,
(VertexId*)NULL,
graph_slice -> nodes,
graph_slice -> edges,
work_progress[0],
context[0],
stream,
false,
false,
false);*/
//}
if (enactor->debug && (enactor_stats->retval = util::GRError(
"filter::Kernel Hook Min/Max Operation failed",
__FILE__, __LINE__)))
return;
enactor_stats->edges_queued[0] += frontier_attribute->queue_length;
frontier_attribute->queue_reset = false;
frontier_attribute->queue_index++;
enactor_stats->iteration++;
data_slice->edge_flag.Move(util::DEVICE, util::HOST, 1, 0, stream);
if (enactor_stats->retval =
util::GRError(cudaStreamSynchronize(stream),
"cudaStreamSynchronize failed", __FILE__, __LINE__))
return;
// Check if done
if (data_slice->edge_flag[0])
break; //|| enactor_stats->iteration>5) break;
///////////////////////////////////////////
// Pointer Jumping
frontier_attribute->queue_index = 0;
frontier_attribute->selector = 0;
frontier_attribute->queue_length = graph_slice->nodes;
frontier_attribute->queue_reset = true;
// First Pointer Jumping Round
data_slice->vertex_flag[0] = 0;
while (!data_slice->vertex_flag[0]) {
data_slice->vertex_flag[0] = 1;
data_slice->vertex_flag.Move(util::HOST, util::DEVICE, 1, 0, stream);
gunrock::oprtr::filter::LaunchKernel<FilterKernelPolicy, Problem,
PtrJumpMaskFunctor>(
enactor_stats[0], frontier_attribute[0],
(VertexId)enactor_stats->iteration, data_slice, d_data_slice,
(SizeT *)NULL, // vertex_markers,
(unsigned char *)NULL, // visited_mask,
(VertexId *)NULL, // frontier_queue ->values[frontier_attribute ->
// selector].GetPointer(util::DEVICE),
(VertexId *)NULL, (Value *)NULL, (Value *)NULL,
graph_slice->nodes, // frontier_attribute -> output_length[0],
graph_slice->nodes, work_progress[0], context[0], stream,
graph_slice->nodes, // frontier_queue -> values[frontier_attribute
// -> selector].GetSize(),
util::MaxValue<SizeT>(), enactor_stats->filter_kernel_stats,
false, // By-Pass
false); // skip_marking
if (enactor->debug && (enactor_stats->retval = util::GRError(
"filter::Kernel Pointer Jumping Mask failed",
__FILE__, __LINE__)))
return;
enactor_stats->nodes_queued[0] += frontier_attribute->queue_length;
frontier_attribute->queue_reset = false;
frontier_attribute->queue_index++;
data_slice->vertex_flag.Move(util::DEVICE, util::HOST, 1, 0, stream);
if (enactor_stats->retval = util::GRError(
cudaStreamSynchronize(stream), "cudaStreamSynchronize failed",
__FILE__, __LINE__))
return;
// Check if done
if (data_slice->vertex_flag[0]) break;
}
frontier_attribute->queue_index = 0; // Work queue index
frontier_attribute->selector = 0;
frontier_attribute->queue_length = graph_slice->nodes;
frontier_attribute->queue_reset = true;
gunrock::oprtr::filter::LaunchKernel<FilterKernelPolicy, Problem,
PtrJumpUnmaskFunctor>(
enactor_stats[0], frontier_attribute[0],
(VertexId)enactor_stats->iteration, data_slice, d_data_slice,
(SizeT *)NULL, // vertex_markers,
(unsigned char *)NULL, // visited_mask,
(VertexId *)NULL, // frontier_queue -> values[frontier_attribute ->
// selector].GetPointer(util::DEVICE),
(VertexId *)NULL, (Value *)NULL, (Value *)NULL,
graph_slice->nodes, // frontier_attribute -> output_length[0],
graph_slice->nodes, work_progress[0], context[0], stream,
graph_slice->nodes, // frontier_queue -> values[frontier_attribute ->
// selector].GetSize(),
util::MaxValue<SizeT>(), enactor_stats->filter_kernel_stats,
false, // By-Pass
false); // skip_marking
if (enactor->debug &&
(enactor_stats->retval = util::GRError(
"filter::Kernel Pointer Jumping Unmask Operation failed",
__FILE__, __LINE__)))
return;
enactor_stats->nodes_queued[0] += frontier_attribute->queue_length;
gunrock::oprtr::filter::LaunchKernel<FilterKernelPolicy, Problem,
UpdateMaskFunctor>(
enactor_stats[0], frontier_attribute[0],
(VertexId)enactor_stats->iteration, data_slice, d_data_slice,
(SizeT *)NULL, // vertex_markers,
(unsigned char *)NULL, // visited_mask,
(VertexId *)NULL, // frontier_queue -> values[frontier_attribute ->
// selector].GetPointer(util::DEVICE),
(VertexId *)NULL, (Value *)NULL, (Value *)NULL,
graph_slice->nodes, // frontier_attribute -> output_length[0],
graph_slice->nodes, work_progress[0], context[0], stream,
graph_slice->nodes, // frontier_queue -> values[frontier_attribute ->
// selector].GetSize(),
util::MaxValue<SizeT>(), enactor_stats->filter_kernel_stats,
false, // By-Pass
false); // skip_marking
if (enactor->debug && (enactor_stats->retval = util::GRError(
"filter::Kernel Update Mask Operation failed",
__FILE__, __LINE__)))
return;
enactor_stats->nodes_queued[0] += frontier_attribute->queue_length;
///////////////////////////////////////////
}
enactor_stats->iteration = data_slice->turn;
}
/*
* @brief Compute output queue length function.
*
* @param[in] frontier_attribute Pointer to the frontier attribute.
* @param[in] d_offsets Pointer to the offsets.
* @param[in] d_indices Pointer to the indices.
* @param[in] d_in_key_queue Pointer to the input mapping queue.
* @param[in] partitioned_scanned_edges Pointer to the scanned edges.
* @param[in] max_in Maximum input queue size.
* @param[in] max_out Maximum output queue size.
* @param[in] context CudaContext for ModernGPU API.
* @param[in] stream CUDA stream.
* @param[in] ADVANCE_TYPE Advance kernel mode.
* @param[in] express Whether or not enable express mode.
*
* \return cudaError_t object Indicates the success of all CUDA calls.
*/
static cudaError_t Compute_OutputLength(
Enactor *enactor, FrontierAttribute<SizeT> *frontier_attribute,
// DataSlice *data_slice,
// DataSlice *d_data_slice,
SizeT *d_offsets, VertexId *d_indices, SizeT *d_inv_offsets,
VertexId *d_inv_indices, VertexId *d_in_key_queue,
util::Array1D<SizeT, SizeT> *partitioned_scanned_edges, SizeT max_in,
SizeT max_out, CudaContext &context, cudaStream_t stream,
gunrock::oprtr::advance::TYPE ADVANCE_TYPE, bool express = false,
bool in_inv = false, bool out_inv = false) {
// util::MemsetKernel<SizeT><<<1,1,0,stream>>>(
// frontier_attribute->output_length.GetPointer(util::DEVICE),
// frontier_attribute->queue_length ==0 ? 0 :
// 1/*frontier_attribute->queue_length*/, 1);
cudaError_t retval = cudaSuccess;
// printf("SIZE_CHECK = %s\n", Enactor::SIZE_CHECK ? "true" : "false");
frontier_attribute->output_length[0] =
(frontier_attribute->queue_length == 0) ? 0 : 1;
return retval;
}
template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES>
static void Expand_Incoming(
Enactor *enactor, cudaStream_t stream, VertexId iteration, int peer_,
SizeT received_length, SizeT num_elements,
util::Array1D<SizeT, SizeT> &in_length_out,
util::Array1D<SizeT, VertexId> &keys_in,
util::Array1D<SizeT, VertexId> &vertex_associate_in,
util::Array1D<SizeT, Value> &value__associate_in,
util::Array1D<SizeT, VertexId> &keys_out,
util::Array1D<SizeT, VertexId *> &vertex_associate_orgs,
util::Array1D<SizeT, Value *> &value__associate_orgs,
DataSlice *h_data_slice, EnactorStats<SizeT> *enactor_stats) {
// printf("%d\t %d\t Expand_Incoming num_elements = %d\n",
// h_data_slice -> gpu_idx, enactor_stats -> iteration, num_elements);
// if (h_data_slice -> turn == 1 && (enactor -> problem -> edges / 3 >
// enactor -> problem -> nodes)) return;
int num_blocks = num_elements / AdvanceKernelPolicy::THREADS / 2 + 1;
if (num_blocks > 480) num_blocks = 480;
Expand_Incoming_Kernel<VertexId, SizeT>
<<<num_blocks, AdvanceKernelPolicy::THREADS, 0, stream>>>(
h_data_slice->gpu_idx, num_elements,
keys_in.GetPointer(util::DEVICE),
vertex_associate_in.GetPointer(util::DEVICE),
vertex_associate_orgs[0]);
if (!enactor->problem->unified_receive) in_length_out[peer_] = 0;
}
/*
* @brief Check frontier queue size function.
*
* @param[in] thread_num Number of threads.
* @param[in] peer_ Peer GPU index.
* @param[in] request_length Request frontier queue length.
* @param[in] frontier_queue Pointer to the frontier queue.
* @param[in] frontier_attribute Pointer to the frontier attribute.
* @param[in] enactor_stats Pointer to the enactor statistics.
* @param[in] graph_slice Pointer to the graph slice we process on.
*/
static void Check_Queue_Size(Enactor *enactor, int thread_num, int peer_,
SizeT request_length, Frontier *frontier_queue,
FrontierAttribute<SizeT> *frontier_attribute,
EnactorStats<SizeT> *enactor_stats,
GraphSliceT *graph_slice) {}
/*
* @brief Stop_Condition check function.
*
* @param[in] enactor_stats Pointer to the enactor statistics.
* @param[in] frontier_attribute Pointer to the frontier attribute.
* @param[in] data_slice Pointer to the data slice we process on.
* @param[in] num_gpus Number of GPUs used.
*/
static bool Stop_Condition(EnactorStats<SizeT> *enactor_stats,
FrontierAttribute<SizeT> *frontier_attribute,
util::Array1D<SizeT, DataSlice> *data_slice,
int num_gpus) {
// printf("CC Stop checked\n");fflush(stdout);
for (int gpu = 0; gpu < num_gpus * num_gpus; gpu++)
if (enactor_stats[gpu].retval != cudaSuccess) {
printf("(CUDA error %d @ GPU %d: %s\n", enactor_stats[gpu].retval,
gpu % num_gpus, cudaGetErrorString(enactor_stats[gpu].retval));
fflush(stdout);
return true;
}
if (num_gpus < 2 && data_slice[0]->turn > 0) return true;
for (int gpu = 0; gpu < num_gpus; gpu++)
if (data_slice[gpu]->turn == 0) {
// printf("data_slice[%d]->turn==0\n", gpu);
// fflush(stdout);
return false;
}
for (int gpu = 0; gpu < num_gpus; gpu++)
for (int peer = 1; peer < num_gpus; peer++)
for (int i = 0; i < 2; i++)
if (data_slice[gpu]->in_length[i][peer] != 0) {
// printf("data_slice[%d]->in_length[%d][%d] = %lld\n",
// gpu, i, peer,
// (long long)data_slice[gpu]->in_length[i][peer]);
// fflush(stdout);
return false;
}
for (int gpu = 0; gpu < num_gpus; gpu++)
for (int peer = 0; peer < num_gpus; peer++)
if (data_slice[gpu]->out_length[peer] != 0) {
// printf("data_slice[%d] -> out_length[%d] = %lld\n",
// gpu, peer, (long long)data_slice[gpu]->out_length[peer]);
// fflush(stdout);
return false;
}
if (num_gpus > 1)
for (int gpu = 0; gpu < num_gpus; gpu++)
if (data_slice[gpu]->has_change || data_slice[gpu]->previous_change) {
// printf("data_slice[%d] -> has_change = %s, previous_change = %s\n",
// gpu, data_slice[gpu] -> has_change ? "true" : "false",
// data_slice[gpu] -> previous_change ? "true" : "false");
// fflush(stdout);
return false;
}
// printf("CC to stop\n");fflush(stdout);
return true;
}
/*
* @brief Make_Output function.
*
* @tparam NUM_VERTEX_ASSOCIATES
* @tparam NUM_VALUE__ASSOCIATES
*
* @param[in] thread_num Number of threads.
* @param[in] num_elements
* @param[in] num_gpus Number of GPUs used.
* @param[in] frontier_queue Pointer to the frontier queue.
* @param[in] partitioned_scanned_edges Pointer to the scanned edges.
* @param[in] frontier_attribute Pointer to the frontier attribute.
* @param[in] enactor_stats Pointer to the enactor statistics.
* @param[in] data_slice Pointer to the data slice we process on.
* @param[in] graph_slice Pointer to the graph slice we process on.
* @param[in] work_progress Pointer to the work progress class.
* @param[in] context CudaContext for ModernGPU API.
* @param[in] stream CUDA stream.
*/
template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES>
static void Make_Output(Enactor *enactor, int thread_num, SizeT num_elements,
int num_gpus, Frontier *frontier_queue,
util::Array1D<SizeT, SizeT> *scanned_edges,
FrontierAttribute<SizeT> *frontier_attribute,
EnactorStats<SizeT> *enactor_stats,
util::Array1D<SizeT, DataSlice> *data_slice_,
GraphSliceT *graph_slice,
util::CtaWorkProgressLifetime<SizeT> *work_progress,
ContextPtr context, cudaStream_t stream) {
DataSlice *data_slice = data_slice_->GetPointer(util::HOST);
/*if (data_slice -> turn == 1 && (enactor -> problem -> edges / 3 > enactor
-> problem -> nodes))
{
//util::MemsetCopyVectorKernel<<<240, 512, 0, stream>>>(
// data_slice -> vertex_associate_out[1].GetPointer(util::DEVICE),
// data_slice -> component_ids.GetPointer(util::DEVICE),
// graph_slice -> nodes);
//util::MemsetCopyVectorKernel<<<240, 512, 0, stream>>>(
// data_slice -> old_c_ids.GetPointer(util::DEVICE),
// data_slice -> component_ids.GetPointer(util::DEVICE),
// graph_slice -> nodes);
//util::MemsetIdxKernel<<<240, 512, 0, stream>>>(
// data_slice -> keys_out[1].GetPointer(util::DEVICE),
// graph_slice -> nodes);
data_slice -> temp_vertex_out = data_slice ->
keys_out[1].GetPointer(util::DEVICE); data_slice -> temp_comp_out =
data_slice -> vertex_associate_out[1].GetPointer(util::DEVICE); for (int
peer_ = 1; peer_ < num_gpus; peer_++)
{
data_slice -> keys_out[peer_].ForceSetPointer(NULL, util::DEVICE);
data_slice -> vertex_associate_out[peer_].ForceSetPointer(data_slice
-> component_ids.GetPointer(util::DEVICE), util::DEVICE);
}
data_slice -> out_length[1] = graph_slice -> nodes + 1;
} else*/
{
data_slice->out_length[1] = 1;
data_slice->out_length.Move(util::HOST, util::DEVICE, 1, 1, stream);
int num_blocks = data_slice->nodes / AdvanceKernelPolicy::THREADS + 1;
if (num_blocks > 480) num_blocks = 480;
Make_Output_Kernel<AdvanceKernelPolicy, VertexId, SizeT>
<<<num_blocks, AdvanceKernelPolicy::THREADS, 0, stream>>>(
thread_num, data_slice->nodes,
data_slice->old_c_ids.GetPointer(util::DEVICE),
data_slice->component_ids.GetPointer(util::DEVICE),
data_slice->out_length.GetPointer(util::DEVICE) + 1,
data_slice->keys_out[1].GetPointer(util::DEVICE),
data_slice->vertex_associate_out[1].GetPointer(util::DEVICE));
data_slice->out_length.Move(util::DEVICE, util::HOST, 1, 1, stream);
// util::MemsetCopyVectorKernel <<<240, 512, 0, stream>>>(
// data_slice -> old_c_ids.GetPointer(util::DEVICE),
// data_slice -> component_ids.GetPointer(util::DEVICE),
// data_slice -> nodes);
}
if (enactor_stats->retval =
util::GRError(cudaStreamSynchronize(stream),
"cudaStreamSynchronize failed", __FILE__, __LINE__))
return;
// printf("%d num_diff = %d\n", thread_num, data_slice -> out_length[1]);
data_slice->out_length[1]--;
// printf("%d\t %lld\t changes = %lld\n",
// thread_num, enactor_stats -> iteration,
// (long long)data_slice -> out_length[1]);
// fflush(stdout);
data_slice->previous_change = data_slice->has_change;
for (int i = 0; i < num_gpus; i++)
data_slice->out_length[i] = data_slice->out_length[1];
if (data_slice->out_length[1] != 0)
data_slice->has_change = true;
else
data_slice->has_change = false;
}
/*
* @brief Iteration_Update_Preds function.
*
* @param[in] graph_slice Pointer to the graph slice we process on.
* @param[in] data_slice Pointer to the data slice we process on.
* @param[in] frontier_attribute Pointer to the frontier attribute.
* @param[in] frontier_queue Pointer to the frontier queue.
* @param[in] num_elements Number of elements.
* @param[in] stream CUDA stream.
*/
static void Iteration_Update_Preds(
Enactor *enactor, GraphSliceT *graph_slice, DataSlice *data_slice,
FrontierAttribute<SizeT> *frontier_attribute, Frontier *frontier_queue,
SizeT num_elements, cudaStream_t stream) {}
};
/**
* @brief Thread controls.
*
* @tparam AdvanceKernelPolicy Kernel policy for advance operator.
* @tparam FilterKernelPolicy Kernel policy for filter operator.
* @tparam CcEnactor Enactor type we process on.
*
* @thread_data_ Thread data.
*/
template <typename AdvanceKernelPolicy, typename FilterKernelPolicy,
typename Enactor>
static CUT_THREADPROC CCThread(void *thread_data_) {
typedef typename Enactor::Problem Problem;
typedef typename Enactor::SizeT SizeT;
typedef typename Enactor::VertexId VertexId;
typedef typename Enactor::Value Value;
typedef typename Problem::DataSlice DataSlice;
typedef GraphSlice<VertexId, SizeT, Value> GraphSliceT;
typedef UpdateMaskFunctor<VertexId, SizeT, Value, Problem> Functor;
ThreadSlice *thread_data = (ThreadSlice *)thread_data_;
Problem *problem = (Problem *)thread_data->problem;
Enactor *enactor = (Enactor *)thread_data->enactor;
int num_gpus = problem->num_gpus;
int thread_num = thread_data->thread_num;
int gpu_idx = problem->gpu_idx[thread_num];
DataSlice *data_slice =
problem->data_slices[thread_num].GetPointer(util::HOST);
FrontierAttribute<SizeT> *frontier_attribute =
&(enactor->frontier_attribute[thread_num * num_gpus]);
EnactorStats<SizeT> *enactor_stats =
&(enactor->enactor_stats[thread_num * num_gpus]);
// printf("CCThread entered\n");fflush(stdout);
if (enactor_stats[0].retval = util::SetDevice(gpu_idx)) {
thread_data->status = ThreadSlice::Status::Idle;
CUT_THREADEND;
}
thread_data->status = ThreadSlice::Status::Idle;
while (thread_data->status != ThreadSlice::Status::ToKill) {
while (thread_data->status == ThreadSlice::Status::Wait ||
thread_data->status == ThreadSlice::Status::Idle) {
sleep(0);
}
if (thread_data->status == ThreadSlice::Status::ToKill) break;
for (int peer_ = 0; peer_ < num_gpus; peer_++) {
frontier_attribute[peer_].queue_index = 0;
frontier_attribute[peer_].selector = 0;
frontier_attribute[peer_].queue_length = 0;
frontier_attribute[peer_].queue_reset = true;
enactor_stats[peer_].iteration = 0;
}
if (num_gpus > 1) {
data_slice->vertex_associate_orgs[0] =
data_slice->component_ids.GetPointer(util::DEVICE);
data_slice->vertex_associate_orgs.Move(util::HOST, util::DEVICE);
}
gunrock::app::Iteration_Loop<
Enactor, Functor,
CCIteration<AdvanceKernelPolicy, FilterKernelPolicy, Enactor>, 1, 0>(
thread_data);
thread_data->status = ThreadSlice::Status::Idle;
}
// printf("CC_Thread finished\n");fflush(stdout);
thread_data->status = ThreadSlice::Status::Ended;
CUT_THREADEND;
}
/**
* @brief Problem enactor class.
*
* @tparam _Problem Problem type we process on
* @tparam _INSTRUMENT Whether or not to collect per-CTA clock-count stats.
* @tparam _DEBUG Whether or not to enable debug mode.
* @tparam _SIZE_CHECK Whether or not to enable size check.
*/
template <typename _Problem>
// bool _INSTRUMENT,
// bool _DEBUG,
// bool _SIZE_CHECK>
class CCEnactor
: public EnactorBase<typename _Problem::SizeT /*, _DEBUG, _SIZE_CHECK*/> {
// Members
ThreadSlice *thread_slices;
CUTThread *thread_Ids;
// Methods
public:
_Problem *problem;
typedef _Problem Problem;
typedef typename Problem::SizeT SizeT;
typedef typename Problem::VertexId VertexId;
typedef typename Problem::Value Value;
typedef EnactorBase<SizeT> BaseEnactor;
typedef CCEnactor<Problem> Enactor;
// static const bool INSTRUMENT = _INSTRUMENT;
// static const bool DEBUG = _DEBUG;
// static const bool SIZE_CHECK = _SIZE_CHECK;
public:
/**
* @brief CCEnactor default constructor
*/
CCEnactor(int num_gpus = 1, int *gpu_idx = NULL, bool instrument = false,
bool debug = false, bool size_check = true)
: BaseEnactor(EDGE_FRONTIERS, num_gpus, gpu_idx, instrument, debug,
size_check) {
thread_slices = NULL;
thread_Ids = NULL;
problem = NULL;
}
/**
* @brief CCEnactor default destructor
*/
virtual ~CCEnactor() { Release(); }
cudaError_t Release() {
cudaError_t retval = cudaSuccess;
if (thread_slices != NULL) {
for (int gpu = 0; gpu < this->num_gpus; gpu++)
thread_slices[gpu].status = ThreadSlice::Status::ToKill;
cutWaitForThreads(thread_Ids, this->num_gpus);
delete[] thread_Ids;
thread_Ids = NULL;
delete[] thread_slices;
thread_slices = NULL;
}
if (retval = BaseEnactor::Release()) return retval;
problem = NULL;
return retval;
}
/**
* @brief Initialize the problem.
*
* @tparam AdvanceKernelPolicy Kernel policy for advance operator.
* @tparam FilterKernelPolicy Kernel policy for filter operator.
*
* @param[in] context CudaContext pointer for ModernGPU API.
* @param[in] problem Pointer to Problem object.
* @param[in] max_grid_size Maximum grid size for kernel calls.
*
* \return cudaError_t object Indicates the success of all CUDA calls.
*/
template <typename AdvanceKernelPolicy, typename FilterKernelPolicy>
cudaError_t InitCC(ContextPtr *context, Problem *problem,
int max_grid_size = 512) {
cudaError_t retval = cudaSuccess;
// Lazy initialization
if (retval = BaseEnactor::Init(
// problem,
max_grid_size, AdvanceKernelPolicy::CTA_OCCUPANCY,
FilterKernelPolicy::CTA_OCCUPANCY))
return retval;
/*for (int gpu=0;gpu<this->num_gpus;gpu++)
{
if (retval = util::SetDevice(this->gpu_idx[gpu])) break;
if (sizeof(SizeT) == 4)
{
cudaChannelFormatDesc row_offsets_dest =
cudaCreateChannelDesc<SizeT>();
gunrock::oprtr::edge_map_partitioned::RowOffsetsTex<SizeT>::row_offsets.channelDesc
= row_offsets_dest; if (retval = util::GRError(cudaBindTexture( 0,
gunrock::oprtr::edge_map_partitioned::RowOffsetsTex<SizeT>::row_offsets,
problem->graph_slices[gpu]->row_offsets.GetPointer(util::DEVICE),
((size_t) (problem -> graph_slices[gpu]->nodes + 1)) *
sizeof(SizeT)), "BFSEnactor cudaBindTexture row_offsets_ref failed",
__FILE__, __LINE__)) break;
}
}*/
if (this->debug) {
printf("CC vertex map occupancy %d, level-grid size %d\n",
FilterKernelPolicy::CTA_OCCUPANCY,
this->enactor_stats[0].filter_grid_size);
}
this->problem = problem;
thread_slices = new ThreadSlice[this->num_gpus];
thread_Ids = new CUTThread[this->num_gpus];
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
// thread_slices[gpu].cpu_barrier = cpu_barrier;
thread_slices[gpu].thread_num = gpu;
thread_slices[gpu].problem = (void *)problem;
thread_slices[gpu].enactor = (void *)this;
thread_slices[gpu].context = &(context[gpu * this->num_gpus]);
thread_slices[gpu].status = ThreadSlice::Status::Inited;
thread_slices[gpu].thread_Id =
cutStartThread((CUT_THREADROUTINE) &
(CCThread<AdvanceKernelPolicy, FilterKernelPolicy,
CCEnactor<Problem>>),
(void *)&(thread_slices[gpu]));
thread_Ids[gpu] = thread_slices[gpu].thread_Id;
}
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
while (thread_slices[gpu].status != ThreadSlice::Status::Idle) {
sleep(0);
// std::this_thread::yield();
}
}
return retval;
}
/**
* @brief Reset enactor
*
* \return cudaError_t object Indicates the success of all CUDA calls.
*/
cudaError_t Reset() {
cudaError_t retval = cudaSuccess;
if (retval = BaseEnactor::Reset()) return retval;
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
thread_slices[gpu].status = ThreadSlice::Status::Wait;
}
return retval;
}
/**
* @brief Enacts a connected-component computing on the specified graph.
*
* @tparam AdvanceKernelPolicy Kernel policy for advance operator.
* @tparam FilterKernelPolicy Kernel policy for filter operator.
*
* \return cudaError_t object Indicates the success of all CUDA calls.
*/
template <typename AdvanceKernelPolicy, typename FilterKernelPolicy>
cudaError_t EnactCC() {
cudaError_t retval = cudaSuccess;
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
thread_slices[gpu].status = ThreadSlice::Status::Running;
}
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
while (thread_slices[gpu].status != ThreadSlice::Status::Idle) {
sleep(0);
// std::this_thread::yield();
}
}
for (int gpu = 0; gpu < this->num_gpus * this->num_gpus; gpu++)
if (this->enactor_stats[gpu].retval != cudaSuccess) {
retval = this->enactor_stats[gpu].retval;
return retval;
}
if (this->debug) printf("\nGPU CC Done.\n");
return retval;
}
/**
* \addtogroup PublicInterface
* @{
*/
typedef gunrock::oprtr::advance::KernelPolicy<
Problem, // Problem data type
GR_CUDA_ARCH, // CUDA_ARCH
8, // MIN_CTA_OCCUPANCY,
8, // LOG_THREADS,
8, // LOG_BLOCKS,
32 * 128, // LIGHT_EDGE_THRESHOLD (used for partitioned advance mode)
1, // LOG_LOAD_VEC_SIZE,
0, // LOG_LOADS_PER_TILE
5, // LOG_RAKING_THREADS,
32, // WART_GATHER_THRESHOLD,
128 * 4, // CTA_GATHER_THRESHOLD,
7, // LOG_SCHEDULE_GRANULARITY,
gunrock::oprtr::advance::LB>
LB_AdvanceKernelPolicy;
typedef gunrock::oprtr::advance::KernelPolicy<
Problem, // Problem data type
300, // CUDA_ARCH
8, // MIN_CTA_OCCUPANCY,
8, // LOG_THREADS,
8, // LOG_BLOCKS,
32 * 128, // LIGHT_EDGE_THRESHOLD (used for partitioned advance mode)
1, // LOG_LOAD_VEC_SIZE,
0, // LOG_LOADS_PER_TILE
5, // LOG_RAKING_THREADS,
32, // WART_GATHER_THRESHOLD,
128 * 4, // CTA_GATHER_THRESHOLD,
7, // LOG_SCHEDULE_GRANULARITY,
gunrock::oprtr::advance::ALL_EDGES>
EDGES_AdvanceKernelPolicy;
typedef gunrock::oprtr::advance::KernelPolicy<
Problem, // Problem data type
300, // CUDA_ARCH
8, // MIN_CTA_OCCUPANCY,
8, // LOG_THREADS,
8, // LOG_BLOCKS,
32 * 128, // LIGHT_EDGE_THRESHOLD (used for partitioned advance mode)
1, // LOG_LOAD_VEC_SIZE,
0, // LOG_LOADS_PER_TILE
5, // LOG_RAKING_THREADS,
32, // WART_GATHER_THRESHOLD,
128 * 4, // CTA_GATHER_THRESHOLD,
7, // LOG_SCHEDULE_GRANULARITY,
gunrock::oprtr::advance::LB_LIGHT>
LB_LIGHT_AdvanceKernelPolicy;
typedef gunrock::oprtr::advance::KernelPolicy<
Problem, // Problem data type
300, // CUDA_ARCH
8, // MIN_CTA_OCCUPANCY,
7, // LOG_THREADS,
8, // LOG_BLOCKS,
32 * 128, // LIGHT_EDGE_THRESHOLD (used for partitioned advance mode)
1, // LOG_LOAD_VEC_SIZE,
0, // LOG_LOADS_PER_TILE
5, // LOG_RAKING_THREADS,
32, // WART_GATHER_THRESHOLD,
128 * 4, // CTA_GATHER_THRESHOLD,
7, // LOG_SCHEDULE_GRANULARITY,
gunrock::oprtr::advance::TWC_FORWARD>
TWC_AdvanceKernelPolicy;
typedef gunrock::oprtr::filter::KernelPolicy<Problem, // Problem data type
300, // CUDA_ARCH
0, // SATURATION QUIT
true, // DEQUEUE_PROBLEM_SIZE
4, // MIN_CTA_OCCUPANCY
9, // LOG_THREADS
1, // LOG_LOAD_VEC_SIZE
0, // LOG_LOADS_PER_TILE
5, // LOG_RAKING_THREADS
0, // END_BITMASK (no bit-mask
// for cc)
8, // LOG_SCHEDULE_GRANULARITY
gunrock::oprtr::filter::BY_PASS>
FilterKernelPolicy;
template <typename Dummy, gunrock::oprtr::advance::MODE A_MODE>
struct MODE_SWITCH {};
template <typename Dummy>
struct MODE_SWITCH<Dummy, gunrock::oprtr::advance::LB> {
static cudaError_t Enact(Enactor &enactor) {
return enactor.EnactCC<LB_AdvanceKernelPolicy, FilterKernelPolicy>();
}
static cudaError_t Init(Enactor &enactor, ContextPtr *context,
Problem *problem, int max_grid_size = 512) {
return enactor.InitCC<LB_AdvanceKernelPolicy, FilterKernelPolicy>(
context, problem, max_grid_size);
}
};
template <typename Dummy>
struct MODE_SWITCH<Dummy, gunrock::oprtr::advance::LB_LIGHT> {
static cudaError_t Enact(Enactor &enactor) {
return enactor
.EnactCC<LB_LIGHT_AdvanceKernelPolicy, FilterKernelPolicy>();
}
static cudaError_t Init(Enactor &enactor, ContextPtr *context,
Problem *problem, int max_grid_size = 512) {
return enactor.InitCC<LB_LIGHT_AdvanceKernelPolicy, FilterKernelPolicy>(
context, problem, max_grid_size);
}
};
template <typename Dummy>
struct MODE_SWITCH<Dummy, gunrock::oprtr::advance::TWC_FORWARD> {
static cudaError_t Enact(Enactor &enactor) {
return enactor.EnactCC<TWC_AdvanceKernelPolicy, FilterKernelPolicy>();
}
static cudaError_t Init(Enactor &enactor, ContextPtr *context,
Problem *problem, int max_grid_size = 512) {
return enactor.InitCC<TWC_AdvanceKernelPolicy, FilterKernelPolicy>(
context, problem, max_grid_size);
}
};
template <typename Dummy>
struct MODE_SWITCH<Dummy, gunrock::oprtr::advance::ALL_EDGES> {
static cudaError_t Enact(Enactor &enactor) {
return enactor.EnactCC<EDGES_AdvanceKernelPolicy, FilterKernelPolicy>();
}
static cudaError_t Init(Enactor &enactor, ContextPtr *context,
Problem *problem, int max_grid_size = 512) {
return enactor.InitCC<EDGES_AdvanceKernelPolicy, FilterKernelPolicy>(
context, problem, max_grid_size);
}
};
/**
* @brief CC Enact kernel entry.
*
* @param[in] traversal_mode Mode of workload strategy in advance
*
* \return cudaError_t object Indicates the success of all CUDA calls.
*/
// template <typename CCProblem>
cudaError_t Enact(std::string traversal_mode = "LB") {
if (this->min_sm_version >= 300) {
if (traversal_mode == "LB")
return MODE_SWITCH<SizeT, gunrock::oprtr::advance::LB>::Enact(*this);
else if (traversal_mode == "LB_LIGHT")
return MODE_SWITCH<SizeT, gunrock::oprtr::advance::LB_LIGHT>::Enact(
*this);
else if (traversal_mode == "TWC")
return MODE_SWITCH<SizeT, gunrock::oprtr::advance::TWC_FORWARD>::Enact(
*this);
else if (traversal_mode == "ALL_EDGES")
return MODE_SWITCH<SizeT, gunrock::oprtr::advance::ALL_EDGES>::Enact(
*this);
}
// to reduce compile time, get rid of other architecture for now
// TODO: add all the kernel policy settings for all archs
printf("Not yet tuned for this architecture\n");
return cudaErrorInvalidDeviceFunction;
}
/**
* @brief CC Enact kernel entry.
*
* @param[in] context CudaContext pointer for ModernGPU API.
* @param[in] problem Pointer to Problem object.
* @param[in] traversal_mode Mode of workload strategy in advance
* @param[in] max_grid_size Maximum grid size for kernel calls.
*
* \return cudaError_t object Indicates the success of all CUDA calls.
*/
cudaError_t Init(ContextPtr *context, Problem *problem,
std::string traversal_mode = "LB", int max_grid_size = 512) {
if (this->min_sm_version >= 300) {
if (traversal_mode == "LB")
return MODE_SWITCH<SizeT, gunrock::oprtr::advance::LB>::Init(
*this, context, problem, max_grid_size);
else if (traversal_mode == "LB_LIGHT")
return MODE_SWITCH<SizeT, gunrock::oprtr::advance::LB_LIGHT>::Init(
*this, context, problem, max_grid_size);
else if (traversal_mode == "TWC")
return MODE_SWITCH<SizeT, gunrock::oprtr::advance::TWC_FORWARD>::Init(
*this, context, problem, max_grid_size);
else if (traversal_mode == "ALL_EDGES")
return MODE_SWITCH<SizeT, gunrock::oprtr::advance::ALL_EDGES>::Init(
*this, context, problem, max_grid_size);
}
// to reduce compile time, get rid of other architecture for now
// TODO: add all the kernel policy settings for all archs
printf("Not yet tuned for this architecture\n");
return cudaErrorInvalidDeviceFunction;
}
/** @} */
};
} // namespace cc
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
namespace megdnn {
namespace cuda {
namespace relayout_format {
namespace internal {
using namespace integer_subbyte;
template <typename dt>
struct qtype_signedness;
template <>
struct qtype_signedness<dtype::QuantizedS4> {
static constexpr bool value = true;
};
template <>
struct qtype_signedness<dtype::Quantized4Asymm> {
static constexpr bool value = false;
};
template <typename dt_src, typename dt_dst>
struct enable_qtype_b4 {
static constexpr bool val_src = std::is_same<dt_src, dtype::QuantizedS4>::value ||
std::is_same<dt_src, dtype::Quantized4Asymm>::value;
static constexpr bool val_dst = std::is_same<dt_dst, dtype::QuantizedS4>::value ||
std::is_same<dt_dst, dtype::Quantized4Asymm>::value;
static constexpr bool value =
std::is_same<dt_src, dt_dst>::value && val_src && val_dst;
using type = typename std::enable_if<value>::type;
};
// The input fragment is stored in RowMajor order. The translayout operator
// performs a transpose operation on the input fragment, and produces a
// reordered fragment, i.e. a fragment stored in ColumnMajor order.
template <
int col, int row, typename SrcType, typename DnnSrcType, typename DnnDstType,
bool same_scale, typename enable = void>
struct Translayout;
// partial specialization for translayout operator for qint8 and quint8
template <typename SrcType, typename DnnSrcType, typename DnnDstType, bool same_scale>
struct Translayout<1, 4, SrcType, DnnSrcType, DnnDstType, same_scale> {
using InnerDtype = typename DTypeRWHelper<
typename DTypeTrait<DnnSrcType>::ctype, 1>::InnerDtype;
using DstDtype =
typename DTypeRWHelper<typename DTypeTrait<DnnSrcType>::ctype, 1>::DstDtype;
static inline __device__ void trans(
DstDtype (&dst_width)[1], InnerDtype (&read_channel)[4],
CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process,
const char zero_point) {
dst_width[0].x = post_process(read_channel[0]);
dst_width[0].y = post_process(read_channel[1]);
dst_width[0].z = post_process(read_channel[2]);
dst_width[0].w = post_process(read_channel[3]);
}
};
template <typename SrcType, typename DnnSrcType, typename DnnDstType, bool same_scale>
struct Translayout<4, 4, SrcType, DnnSrcType, DnnDstType, same_scale> {
using InnerDtype = typename DTypeRWHelper<
typename DTypeTrait<DnnSrcType>::ctype, 4>::InnerDtype;
using DstDtype =
typename DTypeRWHelper<typename DTypeTrait<DnnSrcType>::ctype, 4>::DstDtype;
static inline __device__ void trans(
DstDtype (&dst_width)[4], InnerDtype (&read_channel)[4],
CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process,
const char zero_point) {
dst_width[0].x = post_process(read_channel[0].x);
dst_width[0].y = post_process(read_channel[1].x);
dst_width[0].z = post_process(read_channel[2].x);
dst_width[0].w = post_process(read_channel[3].x);
dst_width[1].x = post_process(read_channel[0].y);
dst_width[1].y = post_process(read_channel[1].y);
dst_width[1].z = post_process(read_channel[2].y);
dst_width[1].w = post_process(read_channel[3].y);
dst_width[2].x = post_process(read_channel[0].z);
dst_width[2].y = post_process(read_channel[1].z);
dst_width[2].z = post_process(read_channel[2].z);
dst_width[2].w = post_process(read_channel[3].z);
dst_width[3].x = post_process(read_channel[0].w);
dst_width[3].y = post_process(read_channel[1].w);
dst_width[3].z = post_process(read_channel[2].w);
dst_width[3].w = post_process(read_channel[3].w);
}
};
// =========================================================
// partial specialization for translayout operator for qint4
// NCHW <-> NCHW64
template <typename SrcType, typename DnnSrcType_, typename DnnDstType_, bool same_scale>
struct Translayout<
2, 64, SrcType, DnnSrcType_, DnnDstType_, same_scale,
typename enable_qtype_b4<DnnSrcType_, DnnDstType_>::type> {
using DnnSrcType = DnnSrcType_;
using DnnDstType = DnnDstType_;
using InnerDtype = typename DTypeRWHelper<
typename DTypeTrait<DnnSrcType>::ctype, 2>::InnerDtype;
using DstDtype =
typename DTypeRWHelper<typename DTypeTrait<DnnSrcType>::ctype, 2>::DstDtype;
static constexpr bool signedness = qtype_signedness<DnnSrcType>::value;
static inline __device__ void trans(
DstDtype (&dst_width)[2], InnerDtype (&read_channel)[64],
CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process,
const char zero_point) {
int intermediate[8][2];
int* dst_frag = reinterpret_cast<int*>(dst_width);
auto pack_channel = [&](int idx) -> int {
return transform_int8_to_b4x8<signedness>(
post_process(intermediate[0][idx]),
post_process(intermediate[1][idx]),
post_process(intermediate[2][idx]),
post_process(intermediate[3][idx]),
post_process(intermediate[4][idx]),
post_process(intermediate[5][idx]),
post_process(intermediate[6][idx]),
post_process(intermediate[7][idx]));
};
#pragma unroll
for (int i = 0; i < 64; i += 8) {
transform_b4x2_to_int8<signedness>(
intermediate[0], reinterpret_cast<uint8_t&>(read_channel[i + 0]));
transform_b4x2_to_int8<signedness>(
intermediate[1], reinterpret_cast<uint8_t&>(read_channel[i + 1]));
transform_b4x2_to_int8<signedness>(
intermediate[2], reinterpret_cast<uint8_t&>(read_channel[i + 2]));
transform_b4x2_to_int8<signedness>(
intermediate[3], reinterpret_cast<uint8_t&>(read_channel[i + 3]));
transform_b4x2_to_int8<signedness>(
intermediate[4], reinterpret_cast<uint8_t&>(read_channel[i + 4]));
transform_b4x2_to_int8<signedness>(
intermediate[5], reinterpret_cast<uint8_t&>(read_channel[i + 5]));
transform_b4x2_to_int8<signedness>(
intermediate[6], reinterpret_cast<uint8_t&>(read_channel[i + 6]));
transform_b4x2_to_int8<signedness>(
intermediate[7], reinterpret_cast<uint8_t&>(read_channel[i + 7]));
int frag_idx = i / 8;
dst_frag[0 * 8 + frag_idx] = pack_channel(0);
dst_frag[1 * 8 + frag_idx] = pack_channel(1);
}
}
using Fragment = array_wrapper<SrcType, 64>;
static inline __device__ void trans(
Fragment& dst, Fragment& src,
CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process) {
trans(reinterpret_cast<DstDtype(&)[2]>(dst),
reinterpret_cast<InnerDtype(&)[64]>(src), post_process, 0);
}
};
template <typename SrcType, typename DnnSrcType_, typename DnnDstType_, bool same_scale>
struct Translayout<
8, 64, SrcType, DnnSrcType_, DnnDstType_, same_scale,
typename enable_qtype_b4<DnnSrcType_, DnnDstType_>::type> {
using DnnSrcType = DnnSrcType_;
using DnnDstType = DnnDstType_;
using InnerDtype = typename DTypeRWHelper<
typename DTypeTrait<DnnSrcType>::ctype, 8>::InnerDtype;
using DstDtype =
typename DTypeRWHelper<typename DTypeTrait<DnnSrcType>::ctype, 8>::DstDtype;
static constexpr bool signedness = qtype_signedness<DnnSrcType>::value;
static inline __device__ void trans(
DstDtype (&dst_width)[8], InnerDtype (&read_channel)[64],
CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process,
const char zero_point) {
int intermediate[8][8];
int* dst_frag = reinterpret_cast<int*>(dst_width);
auto pack_channel = [&](int idx) -> int {
return transform_int8_to_b4x8<signedness>(
post_process(intermediate[0][idx]),
post_process(intermediate[1][idx]),
post_process(intermediate[2][idx]),
post_process(intermediate[3][idx]),
post_process(intermediate[4][idx]),
post_process(intermediate[5][idx]),
post_process(intermediate[6][idx]),
post_process(intermediate[7][idx]));
};
#pragma unroll
for (int i = 0; i < 64; i += 8) {
transform_b4x8_to_int8<signedness>(intermediate[0], read_channel[i + 0]);
transform_b4x8_to_int8<signedness>(intermediate[1], read_channel[i + 1]);
transform_b4x8_to_int8<signedness>(intermediate[2], read_channel[i + 2]);
transform_b4x8_to_int8<signedness>(intermediate[3], read_channel[i + 3]);
transform_b4x8_to_int8<signedness>(intermediate[4], read_channel[i + 4]);
transform_b4x8_to_int8<signedness>(intermediate[5], read_channel[i + 5]);
transform_b4x8_to_int8<signedness>(intermediate[6], read_channel[i + 6]);
transform_b4x8_to_int8<signedness>(intermediate[7], read_channel[i + 7]);
int frag_idx = i / 8;
dst_frag[0 * 8 + frag_idx] = pack_channel(0);
dst_frag[1 * 8 + frag_idx] = pack_channel(1);
dst_frag[2 * 8 + frag_idx] = pack_channel(2);
dst_frag[3 * 8 + frag_idx] = pack_channel(3);
dst_frag[4 * 8 + frag_idx] = pack_channel(4);
dst_frag[5 * 8 + frag_idx] = pack_channel(5);
dst_frag[6 * 8 + frag_idx] = pack_channel(6);
dst_frag[7 * 8 + frag_idx] = pack_channel(7);
}
}
using Fragment = array_wrapper<unsigned, 64>;
static inline __device__ void trans(
Fragment& dst, Fragment& src,
CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process) {
trans(reinterpret_cast<DstDtype(&)[8]>(dst),
reinterpret_cast<InnerDtype(&)[64]>(src), post_process, 0);
}
};
template <typename SrcType, typename DnnSrcType_, typename DnnDstType_, bool same_scale>
struct Translayout<
64, 8, SrcType, DnnSrcType_, DnnDstType_, same_scale,
typename enable_qtype_b4<DnnSrcType_, DnnDstType_>::type> {
using DnnSrcType = DnnSrcType_;
using DnnDstType = DnnDstType_;
static constexpr int row = 8;
static constexpr int col = 64;
static constexpr int size_nbits = 4;
static constexpr int col_in_type = col * size_nbits / (8 * sizeof(SrcType));
static constexpr int elements_in_type = row * col_in_type;
static constexpr int inc_col = 8;
static constexpr int inc_col_in_type = inc_col * size_nbits / (8 * sizeof(SrcType));
static constexpr bool signedness = qtype_signedness<DnnSrcType>::value;
using Fragment = array_wrapper<SrcType, elements_in_type>;
static MEGDNN_DEVICE __forceinline__ void trans(
Fragment& dst, const Fragment& src,
CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process) {
int intermediate[8][8];
int* dst_frag = reinterpret_cast<int*>(&dst);
auto pack = [&](int idx) -> int {
return transform_int8_to_b4x8<signedness>(
post_process(intermediate[0][idx]),
post_process(intermediate[1][idx]),
post_process(intermediate[2][idx]),
post_process(intermediate[3][idx]),
post_process(intermediate[4][idx]),
post_process(intermediate[5][idx]),
post_process(intermediate[6][idx]),
post_process(intermediate[7][idx]));
};
#pragma unroll
for (int j = 0; j < col_in_type; j += inc_col_in_type) {
transform_b4x8_to_int8<signedness>(
intermediate[0],
reinterpret_cast<const int&>(src[0 * col_in_type + j]));
transform_b4x8_to_int8<signedness>(
intermediate[1],
reinterpret_cast<const int&>(src[1 * col_in_type + j]));
transform_b4x8_to_int8<signedness>(
intermediate[2],
reinterpret_cast<const int&>(src[2 * col_in_type + j]));
transform_b4x8_to_int8<signedness>(
intermediate[3],
reinterpret_cast<const int&>(src[3 * col_in_type + j]));
transform_b4x8_to_int8<signedness>(
intermediate[4],
reinterpret_cast<const int&>(src[4 * col_in_type + j]));
transform_b4x8_to_int8<signedness>(
intermediate[5],
reinterpret_cast<const int&>(src[5 * col_in_type + j]));
transform_b4x8_to_int8<signedness>(
intermediate[6],
reinterpret_cast<const int&>(src[6 * col_in_type + j]));
transform_b4x8_to_int8<signedness>(
intermediate[7],
reinterpret_cast<const int&>(src[7 * col_in_type + j]));
dst_frag[(j / inc_col_in_type) * 8 + 0] = pack(0);
dst_frag[(j / inc_col_in_type) * 8 + 1] = pack(1);
dst_frag[(j / inc_col_in_type) * 8 + 2] = pack(2);
dst_frag[(j / inc_col_in_type) * 8 + 3] = pack(3);
dst_frag[(j / inc_col_in_type) * 8 + 4] = pack(4);
dst_frag[(j / inc_col_in_type) * 8 + 5] = pack(5);
dst_frag[(j / inc_col_in_type) * 8 + 6] = pack(6);
dst_frag[(j / inc_col_in_type) * 8 + 7] = pack(7);
}
}
};
template <typename SrcType, typename DnnSrcType_, typename DnnDstType_, bool same_scale>
struct Translayout<
64, 2, SrcType, DnnSrcType_, DnnDstType_, same_scale,
typename enable_qtype_b4<DnnSrcType_, DnnDstType_>::type> {
using DnnSrcType = DnnSrcType_;
using DnnDstType = DnnDstType_;
static constexpr int row = 2;
static constexpr int col = 64;
static constexpr int size_nbits = 4;
static constexpr int col_in_type = col * size_nbits / (8 * sizeof(SrcType));
static constexpr int elements_in_type = row * col_in_type;
static constexpr int inc_col = 8;
static constexpr int inc_col_in_type = inc_col * size_nbits / (8 * sizeof(SrcType));
static constexpr bool signedness = qtype_signedness<DnnSrcType>::value;
using Fragment = array_wrapper<SrcType, elements_in_type>;
static MEGDNN_DEVICE __forceinline__ void trans(
Fragment& dst, const Fragment& src,
CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process) {
int intermediate[2][8];
int* dst_frag = reinterpret_cast<int*>(&dst);
#pragma unroll
for (int j = 0; j < col_in_type; j += inc_col_in_type) {
transform_b4x8_to_int8<signedness>(
intermediate[0],
reinterpret_cast<const int&>(src[0 * col_in_type + j]));
transform_b4x8_to_int8<signedness>(
intermediate[1],
reinterpret_cast<const int&>(src[1 * col_in_type + j]));
dst_frag[(j / inc_col_in_type) * 2 + 0] =
transform_int8_to_b4x8<signedness>(
post_process(intermediate[0][0]),
post_process(intermediate[1][0]),
post_process(intermediate[0][1]),
post_process(intermediate[1][1]),
post_process(intermediate[0][2]),
post_process(intermediate[1][2]),
post_process(intermediate[0][3]),
post_process(intermediate[1][3]));
dst_frag[(j / inc_col_in_type) * 2 + 1] =
transform_int8_to_b4x8<signedness>(
post_process(intermediate[0][4]),
post_process(intermediate[1][4]),
post_process(intermediate[0][5]),
post_process(intermediate[1][5]),
post_process(intermediate[0][6]),
post_process(intermediate[1][6]),
post_process(intermediate[0][7]),
post_process(intermediate[1][7]));
}
}
};
// =========================================================
// partial specialization for translayout operator for qint4
// NCHW <-> NHWC
template <typename SrcType, typename DnnSrcType_, typename DnnDstType_, bool same_scale>
struct Translayout<
2, 8, SrcType, DnnSrcType_, DnnDstType_, same_scale,
typename enable_qtype_b4<DnnSrcType_, DnnDstType_>::type> {
using DnnSrcType = DnnSrcType_;
using DnnDstType = DnnDstType_;
static constexpr int row = 8;
static constexpr int col = 2;
static constexpr int size_nbits = 4;
static constexpr int col_in_type = col * size_nbits / (8 * sizeof(SrcType));
static constexpr int elements_in_type = row * col_in_type;
static constexpr bool signedness = qtype_signedness<DnnSrcType>::value;
using Fragment = array_wrapper<SrcType, elements_in_type>;
static inline __device__ void trans(
Fragment& dst, const Fragment& src,
CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process) {
int intermediate[8][2];
transform_b4x2_to_int8<signedness>(
intermediate[0],
reinterpret_cast<const uint8_t&>(src[0 * col_in_type]));
transform_b4x2_to_int8<signedness>(
intermediate[1],
reinterpret_cast<const uint8_t&>(src[1 * col_in_type]));
transform_b4x2_to_int8<signedness>(
intermediate[2],
reinterpret_cast<const uint8_t&>(src[2 * col_in_type]));
transform_b4x2_to_int8<signedness>(
intermediate[3],
reinterpret_cast<const uint8_t&>(src[3 * col_in_type]));
transform_b4x2_to_int8<signedness>(
intermediate[4],
reinterpret_cast<const uint8_t&>(src[4 * col_in_type]));
transform_b4x2_to_int8<signedness>(
intermediate[5],
reinterpret_cast<const uint8_t&>(src[5 * col_in_type]));
transform_b4x2_to_int8<signedness>(
intermediate[6],
reinterpret_cast<const uint8_t&>(src[6 * col_in_type]));
transform_b4x2_to_int8<signedness>(
intermediate[7],
reinterpret_cast<const uint8_t&>(src[7 * col_in_type]));
int* dst_frag = reinterpret_cast<int*>(&dst);
auto pack = [&](int idx) -> int {
return transform_int8_to_b4x8<signedness>(
post_process(intermediate[0][idx]),
post_process(intermediate[1][idx]),
post_process(intermediate[2][idx]),
post_process(intermediate[3][idx]),
post_process(intermediate[4][idx]),
post_process(intermediate[5][idx]),
post_process(intermediate[6][idx]),
post_process(intermediate[7][idx]));
};
dst_frag[0] = pack(0);
dst_frag[1] = pack(1);
}
};
template <typename SrcType, typename DnnSrcType_, typename DnnDstType_, bool same_scale>
struct Translayout<
8, 8, SrcType, DnnSrcType_, DnnDstType_, same_scale,
typename enable_qtype_b4<DnnSrcType_, DnnDstType_>::type> {
using DnnSrcType = DnnSrcType_;
using DnnDstType = DnnDstType_;
static constexpr int row = 8;
static constexpr int col = 8;
static constexpr int size_nbits = 4;
static constexpr int col_in_type = col * size_nbits / (8 * sizeof(SrcType));
static constexpr int elements_in_type = row * col_in_type;
static constexpr bool signedness = qtype_signedness<DnnSrcType>::value;
using Fragment = array_wrapper<SrcType, elements_in_type>;
static inline __device__ void trans(
Fragment& dst, const Fragment& src,
CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process) {
int intermediate[8][8];
transform_b4x8_to_int8<signedness>(
intermediate[0], reinterpret_cast<const int&>(src[0 * col_in_type]));
transform_b4x8_to_int8<signedness>(
intermediate[1], reinterpret_cast<const int&>(src[1 * col_in_type]));
transform_b4x8_to_int8<signedness>(
intermediate[2], reinterpret_cast<const int&>(src[2 * col_in_type]));
transform_b4x8_to_int8<signedness>(
intermediate[3], reinterpret_cast<const int&>(src[3 * col_in_type]));
transform_b4x8_to_int8<signedness>(
intermediate[4], reinterpret_cast<const int&>(src[4 * col_in_type]));
transform_b4x8_to_int8<signedness>(
intermediate[5], reinterpret_cast<const int&>(src[5 * col_in_type]));
transform_b4x8_to_int8<signedness>(
intermediate[6], reinterpret_cast<const int&>(src[6 * col_in_type]));
transform_b4x8_to_int8<signedness>(
intermediate[7], reinterpret_cast<const int&>(src[7 * col_in_type]));
int* dst_frag = reinterpret_cast<int*>(&dst);
auto pack = [&](int idx) {
return transform_int8_to_b4x8<signedness>(
post_process(intermediate[0][idx]),
post_process(intermediate[1][idx]),
post_process(intermediate[2][idx]),
post_process(intermediate[3][idx]),
post_process(intermediate[4][idx]),
post_process(intermediate[5][idx]),
post_process(intermediate[6][idx]),
post_process(intermediate[7][idx]));
};
dst_frag[0] = pack(0);
dst_frag[1] = pack(1);
dst_frag[2] = pack(2);
dst_frag[3] = pack(3);
dst_frag[4] = pack(4);
dst_frag[5] = pack(5);
dst_frag[6] = pack(6);
dst_frag[7] = pack(7);
}
};
template <typename SrcType, typename DnnSrcType_, typename DnnDstType_, bool same_scale>
struct Translayout<
8, 2, SrcType, DnnSrcType_, DnnDstType_, same_scale,
typename enable_qtype_b4<DnnSrcType_, DnnDstType_>::type> {
using DnnSrcType = DnnSrcType_;
using DnnDstType = DnnDstType_;
static constexpr int row = 2;
static constexpr int col = 8;
static constexpr int size_nbits = 4;
static constexpr int col_in_type = col * size_nbits / (8 * sizeof(SrcType));
static constexpr int elements_in_type = row * col_in_type;
static constexpr bool signedness = qtype_signedness<DnnSrcType>::value;
using Fragment = array_wrapper<SrcType, elements_in_type>;
static inline __device__ void trans(
Fragment& dst, const Fragment& src,
CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process) {
int intermediate[2][8];
transform_b4x8_to_int8<signedness>(
intermediate[0], reinterpret_cast<const int&>(src[0 * col_in_type]));
transform_b4x8_to_int8<signedness>(
intermediate[1], reinterpret_cast<const int&>(src[1 * col_in_type]));
int* dst_frag = reinterpret_cast<int*>(&dst);
dst_frag[0] = transform_int8_to_b4x8<signedness>(
post_process(intermediate[0][0]), post_process(intermediate[1][0]),
post_process(intermediate[0][1]), post_process(intermediate[1][1]),
post_process(intermediate[0][2]), post_process(intermediate[1][2]),
post_process(intermediate[0][3]), post_process(intermediate[1][3]));
dst_frag[1] = transform_int8_to_b4x8<signedness>(
post_process(intermediate[0][4]), post_process(intermediate[1][4]),
post_process(intermediate[0][5]), post_process(intermediate[1][5]),
post_process(intermediate[0][6]), post_process(intermediate[1][6]),
post_process(intermediate[0][7]), post_process(intermediate[1][7]));
}
};
} // namespace internal
} // namespace relayout_format
} // namespace cuda
} // namespace megdnn
|
the_stack
|
extern "C"
{
#include "sph/sph_sha2.h"
#include "sph/sph_keccak.h"
#include "sph/sph_ripemd.h"
#include "sph/sph_haval.h"
#include "sph/sph_tiger.h"
#include "sph/sph_whirlpool.h"
#include "sph/sph_blake.h"
}
#include "miner.h"
#include "cuda_helper.h"
//#include "mpir.h"
//extern int device_map[MAX_GPUS];
static uint64_t *d_hash[MAX_GPUS];
static uint64_t *KeccakH[MAX_GPUS];
static uint64_t *Sha512H[MAX_GPUS];
static uint64_t *d_prod0[MAX_GPUS];
static uint64_t *d_prod1[MAX_GPUS];
//extern cudaError_t MyStreamSynchronize(cudaStream_t stream, int situation, int thr_id);
/*
static void mpz_set_uint256(mpz_t r, uint8_t *u)
{
mpz_import(r, 32 / sizeof(unsigned long), -1, sizeof(unsigned long), -1, 0, u);
}
static void mpz_get_uint256(mpz_t r, uint8_t *u)
{
u=0;
mpz_export(u, 0, -1, sizeof(unsigned long), -1, 0, r);
}
static void mpz_set_uint512(mpz_t r, uint8_t *u)
{
mpz_import(r, 64 / sizeof(unsigned long), -1, sizeof(unsigned long), -1, 0, u);
}
static void set_one_if_zero(uint8_t *hash512) {
for (int i = 0; i < 32; i++) {
if (hash512[i] != 0) {
return;
}
}
hash512[0] = 1;
}
*/
//extern uint32_t m7_sha256_cpu_hash_300(int thr_id, int threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, int order);
extern uint32_t m7_sha256_cpu_hash_300(int thr_id, int threads, uint32_t startNounce, uint64_t *d_nonceVector, uint64_t *d_hash, int order);
extern void m7_sha256_setBlock_120(void *data,const void *ptarget);
extern void m7_sha256_cpu_hash_120(int thr_id, int threads, uint32_t startNounce, uint64_t *d_outputHash, int order);
extern void m7_sha256_cpu_init(int thr_id, int threads);
extern void m7_sha512_cpu_init(int thr_id, int threads);
extern void m7_sha512_setBlock_120(void *pdata);
extern void m7_sha512_cpu_hash_120(int thr_id, int threads, uint32_t startNounce, uint64_t *d_hash, int order);
extern void m7_ripemd160_cpu_init(int thr_id, int threads);
extern void m7_ripemd160_setBlock_120(void *pdata);
extern void m7_ripemd160_cpu_hash_120(int thr_id, int threads, uint32_t startNounce, uint64_t *d_hash, int order);
extern void tiger192_cpu_init(int thr_id, int threads);
extern void tiger192_setBlock_120(void *pdata);
extern void m7_tiger192_cpu_hash_120(int thr_id, int threads, uint32_t startNounce, uint64_t *d_hash, int order);
extern void m7_bigmul_init(int thr_id, int threads);
extern void m7_bigmul_unroll1_cpu(int thr_id, int threads,uint64_t* Hash1, uint64_t* Hash2,uint64_t *finalHash,int order);
extern void m7_bigmul_unroll2_cpu(int thr_id, int threads,uint64_t* Hash1, uint64_t* Hash2,uint64_t *finalHash,int order);
extern void cpu_mul(int thr_id, int threads, uint32_t alegs, uint32_t blegs, uint64_t *g_a, uint64_t *g_b, uint64_t *g_p, int order);
extern void cpu_mulT4(int thr_id, int threads, uint32_t alegs, uint32_t blegs, uint64_t *g_a, uint64_t *g_b, uint64_t *g_p, int order);
extern void mul_init();
extern void m7_keccak512_setBlock_120(void *pdata);
extern void m7_keccak512_cpu_hash(int thr_id, int threads, uint32_t startNounce, uint64_t *d_hash, int order);
extern void m7_keccak512_cpu_init(int thr_id, int threads);
extern void m7_whirlpool512_cpu_init(int thr_id, int threads, int flag);
extern void m7_whirlpool512_setBlock_120(void *pdata);
extern void m7_whirlpool512_cpu_hash_120(int thr_id, int threads, uint32_t startNounce, uint64_t *d_outputHash, int order);
extern void haval256_setBlock_120(void *data);
extern void m7_haval256_cpu_hash_120(int thr_id, int threads, uint32_t startNounce, uint64_t *d_outputHash, int order);
// m7 Hashfunktion
/*
inline void m7_hash(void *state, const void *input,uint32_t TheNonce, int debug)
{
// sha256(sha256*sha512*keccak512*ripemd160*haval*tiger1*whirlpool) good luck with that...
char data_str[245], hash_str[65], target_str[65];
uint8_t *bdata = 0;
mpz_t bns[7];
mpz_t product;
int rc = 0;
for(int i=0; i < 7; i++){
mpz_init(bns[i]);
}
mpz_init(product);
uint32_t data[32] ;
uint32_t *data_p64 = data + (116 / sizeof(data[0]));
uint8_t bhash[7][64];
uint32_t hash[8];
memcpy(data,input,122);
int M7_MIDSTATE_LEN = 116;
for(int i=0; i < 7; i++){
mpz_init(bns[i]);
}
sph_sha256_context ctx_final_sha256;
sph_sha256_context ctx_sha256;
sph_sha512_context ctx_sha512;
sph_keccak512_context ctx_keccak;
sph_whirlpool_context ctx_whirlpool;
sph_haval256_5_context ctx_haval;
sph_tiger_context ctx_tiger;
sph_ripemd160_context ctx_ripemd;
sph_sha256_init(&ctx_sha256);
sph_sha256 (&ctx_sha256, data, M7_MIDSTATE_LEN);
sph_sha512_init(&ctx_sha512);
sph_sha512 (&ctx_sha512, data, M7_MIDSTATE_LEN);
sph_keccak512_init(&ctx_keccak);
sph_keccak512 (&ctx_keccak, data, M7_MIDSTATE_LEN);
sph_whirlpool_init(&ctx_whirlpool);
sph_whirlpool (&ctx_whirlpool, data, M7_MIDSTATE_LEN);
sph_haval256_5_init(&ctx_haval);
sph_haval256_5 (&ctx_haval, data, M7_MIDSTATE_LEN);
sph_tiger_init(&ctx_tiger);
sph_tiger (&ctx_tiger, data, M7_MIDSTATE_LEN);
sph_ripemd160_init(&ctx_ripemd);
sph_ripemd160 (&ctx_ripemd, data, M7_MIDSTATE_LEN);
sph_sha256_context ctx2_sha256;
sph_sha512_context ctx2_sha512;
sph_keccak512_context ctx2_keccak;
sph_whirlpool_context ctx2_whirlpool;
sph_haval256_5_context ctx2_haval;
sph_tiger_context ctx2_tiger;
sph_ripemd160_context ctx2_ripemd;
data[29] = TheNonce;
memset(bhash, 0, 7 * 64);
ctx2_sha256 = ctx_sha256;
sph_sha256 (&ctx2_sha256, data_p64, 122 - M7_MIDSTATE_LEN);
sph_sha256_close(&ctx2_sha256, (void*)(bhash[0]));
ctx2_sha512 = ctx_sha512;
sph_sha512 (&ctx2_sha512, data_p64, 122 - M7_MIDSTATE_LEN);
sph_sha512_close(&ctx2_sha512, (void*)(bhash[1]));
ctx2_keccak = ctx_keccak;
sph_keccak512 (&ctx2_keccak, data_p64, 122 - M7_MIDSTATE_LEN);
sph_keccak512_close(&ctx2_keccak, (void*)(bhash[2]));
ctx2_whirlpool = ctx_whirlpool;
sph_whirlpool (&ctx2_whirlpool, data_p64, 122 - M7_MIDSTATE_LEN);
sph_whirlpool_close(&ctx2_whirlpool, (void*)(bhash[3]));
ctx2_haval = ctx_haval;
sph_haval256_5 (&ctx2_haval, data_p64, 122 - M7_MIDSTATE_LEN);
sph_haval256_5_close(&ctx2_haval, (void*)(bhash[4]));
ctx2_tiger = ctx_tiger;
sph_tiger (&ctx2_tiger, data_p64, 122 - M7_MIDSTATE_LEN);
sph_tiger_close(&ctx2_tiger, (void*)(bhash[5]));
ctx2_ripemd = ctx_ripemd;
sph_ripemd160 (&ctx2_ripemd, data_p64, 122 - M7_MIDSTATE_LEN);
sph_ripemd160_close(&ctx2_ripemd, (void*)(bhash[6]));
if (debug == 1) {
for (int i=0;i<16;i++) {applog(LOG_INFO,"sha256[%d]=%02x %02x %02x %02x sha512[%d]=%02x %02x %02x %02x keccak[%d]=%02x %02x %02x %02x whirlpool[2][%d]=%02x %02x %02x %02x haval[%d]=%02x %02x %02x %02x tiger[%d]=%02x %02x %02x %02x ripemd[%d]=%02x %02x %02x %02x\n",
i,bhash[0][4*i+3],bhash[0][4*i+2],bhash[0][4*i+1],bhash[0][4*i+0],
i,bhash[1][4*i+3],bhash[1][4*i+2],bhash[1][4*i+1],bhash[1][4*i+0],
i,bhash[2][4*i+3],bhash[2][4*i+2],bhash[2][4*i+1],bhash[2][4*i+0],
i,bhash[3][4*i+3],bhash[3][4*i+2],bhash[3][4*i+1],bhash[3][4*i+0],
i,bhash[4][4*i+3],bhash[4][4*i+2],bhash[4][4*i+1],bhash[4][4*i+0],
i,bhash[5][4*i+3],bhash[5][4*i+2],bhash[5][4*i+1],bhash[5][4*i+0],
i,bhash[6][4*i+3],bhash[6][4*i+2],bhash[6][4*i+1],bhash[6][4*i+0]
);}
}
for(int i=0; i < 7; i++){
set_one_if_zero(bhash[i]);
mpz_set_uint512(bns[i],bhash[i]);
}
for(int i=6; i > 0; i--){
mpz_mul(bns[i-1], bns[i-1], bns[i]);
}
int bytes = mpz_sizeinbase(bns[0], 256);
bdata = (uint8_t *)realloc(bdata, bytes);
mpz_export((void *)bdata, NULL, -1, 1, 0, 0, bns[0]);
sph_sha256_init(&ctx_final_sha256);
sph_sha256 (&ctx_final_sha256, bdata, bytes);
sph_sha256_close(&ctx_final_sha256, (void*)(hash));
memcpy(state, hash, 32);
}
*/
extern bool opt_benchmark;
extern "C" int scanhash_m7(int thr_id, uint32_t *pdata,
const uint32_t *ptarget, uint32_t max_nonce,
unsigned long *hashes_done)
{
if (opt_benchmark)
((uint32_t*)ptarget)[7] = 0x0000ff;
// const int throughput = 256*256*16;
const int throughput = 2560*512*1;
const uint32_t FirstNonce = pdata[29];
static bool init[8] = {0,0,0,0,0,0,0,0};
if (!init[thr_id])
{
cudaSetDevice(device_map[thr_id]);
cudaMalloc(&d_prod0[thr_id], 35 *sizeof(uint64_t) * throughput);
cudaMalloc(&d_prod1[thr_id], 38 *sizeof(uint64_t) * throughput);
cudaMalloc(&KeccakH[thr_id], 8 *sizeof(uint64_t) * throughput);
cudaMalloc(&Sha512H[thr_id], 8 *sizeof(uint64_t) * throughput);
m7_sha256_cpu_init(thr_id, throughput);
m7_sha512_cpu_init(thr_id, throughput);
m7_keccak512_cpu_init(thr_id, throughput);
tiger192_cpu_init(thr_id, throughput);
m7_whirlpool512_cpu_init(thr_id, throughput,0);
m7_ripemd160_cpu_init(thr_id, throughput);
m7_bigmul_init(thr_id, throughput);
mul_init();
init[thr_id] = true;
}
const uint32_t Htarg = ptarget[7];
m7_whirlpool512_setBlock_120((void*)pdata);
m7_sha256_setBlock_120((void*)pdata,ptarget);
m7_sha512_setBlock_120((void*)pdata);
haval256_setBlock_120((void*)pdata);
m7_keccak512_setBlock_120((void*)pdata);
m7_ripemd160_setBlock_120((void*)pdata);
tiger192_setBlock_120((void*)pdata);
do {
int order = 0;
m7_keccak512_cpu_hash(thr_id, throughput, pdata[29], KeccakH[thr_id], order++);
m7_sha512_cpu_hash_120(thr_id, throughput, pdata[29], Sha512H[thr_id], order++);
cpu_mulT4(0, throughput, 8, 8, Sha512H[thr_id], KeccakH[thr_id], d_prod0[thr_id],order); //64
MyStreamSynchronize(0,order++,thr_id);
m7_whirlpool512_cpu_hash_120(thr_id, throughput, pdata[29], KeccakH[thr_id], order++);
cpu_mulT4(0, throughput,8, 16, KeccakH[thr_id], d_prod0[thr_id], d_prod1[thr_id],order); //128
MyStreamSynchronize(0,order++,thr_id);
m7_sha256_cpu_hash_120(thr_id, throughput, pdata[29], KeccakH[thr_id], order++);
cpu_mulT4(0, throughput, 4, 24, KeccakH[thr_id], d_prod1[thr_id], d_prod0[thr_id],order); //96
MyStreamSynchronize(0,order++,thr_id);
m7_haval256_cpu_hash_120(thr_id, throughput, pdata[29], KeccakH[thr_id], order++);
cpu_mulT4(0, throughput, 4, 28, KeccakH[thr_id], d_prod0[thr_id], d_prod1[thr_id],order); //112
MyStreamSynchronize(0,order++,thr_id);
m7_tiger192_cpu_hash_120(thr_id, throughput, pdata[29], KeccakH[thr_id], order++);
m7_bigmul_unroll1_cpu(thr_id, throughput, KeccakH[thr_id], d_prod1[thr_id], d_prod0[thr_id],order);
MyStreamSynchronize(0,order++,thr_id);
m7_ripemd160_cpu_hash_120(thr_id, throughput, pdata[29], KeccakH[thr_id], order++);
m7_bigmul_unroll2_cpu(thr_id, throughput, KeccakH[thr_id], d_prod0[thr_id], d_prod1[thr_id],order);
MyStreamSynchronize(0,order++,thr_id);
uint32_t foundNonce = m7_sha256_cpu_hash_300(thr_id, throughput, pdata[29], NULL, d_prod1[thr_id], order);
if (foundNonce != 0xffffffff) {
uint32_t vhash64[8];
// m7_hash(vhash64, pdata,foundNonce,0);
// if( (vhash64[7]<=Htarg ) ) {
pdata[29] = foundNonce;
*hashes_done = foundNonce - FirstNonce + 1;
return 1;
// } else {
// applog(LOG_INFO, "GPU #%d: result for nonce $%08X does not validate on CPU! vhash64 %08x and htarg %08x", thr_id, foundNonce,vhash64[7],Htarg);
// m7_hash(vhash64, pdata,foundNonce,1);
// }
} // foundNonce
pdata[29] += throughput;
*hashes_done +=throughput;
} while (pdata[29] < max_nonce && !work_restart[thr_id].restart);
//*hashes_done = pdata[29] - FirstNonce + 1;
return 0;
}
|
the_stack
|
namespace h2o4gpu {
#define BLOCK_SIZE 32
inline cusolverStatus_t cusolverDnTgeqrf_bufferSize(cusolverDnHandle_t handle,
int m, int n, float *A,
int lda, int *lwork) {
return cusolverDnSgeqrf_bufferSize(handle, m, n, A, lda, lwork);
}
inline cusolverStatus_t cusolverDnTgeqrf_bufferSize(cusolverDnHandle_t handle,
int m, int n, double *A,
int lda, int *lwork) {
return cusolverDnDgeqrf_bufferSize(handle, m, n, A, lda, lwork);
}
inline cusolverStatus_t cusolverDnTgeqrf(cusolverDnHandle_t handle, int m,
int n, float *A, int lda, float *TAU,
float *Workspace, int Lwork,
int *devInfo) {
return cusolverDnSgeqrf(handle, m, n, A, lda, TAU, Workspace, Lwork, devInfo);
}
inline cusolverStatus_t cusolverDnTgeqrf(cusolverDnHandle_t handle, int m,
int n, double *A, int lda, double *TAU,
double *Workspace, int Lwork,
int *devInfo) {
return cusolverDnDgeqrf(handle, m, n, A, lda, TAU, Workspace, Lwork, devInfo);
}
inline cusolverStatus_t cusolverDnTormqr(cusolverDnHandle_t handle,
cublasSideMode_t side,
cublasOperation_t trans, int m, int n,
int k, const float *A, int lda,
const float *tau, float *C, int ldc,
float *work, int lwork, int *devInfo) {
return cusolverDnSormqr(handle, side, trans, m, n, k, A, lda, tau, C, ldc,
work, lwork, devInfo);
}
inline cusolverStatus_t cusolverDnTormqr(
cusolverDnHandle_t handle, cublasSideMode_t side, cublasOperation_t trans,
int m, int n, int k, const double *A, int lda, const double *tau, double *C,
int ldc, double *work, int lwork, int *devInfo) {
return cusolverDnDormqr(handle, side, trans, m, n, k, A, lda, tau, C, ldc,
work, lwork, devInfo);
}
inline cublasStatus_t cublasTtrsm(cublasHandle_t handle, cublasSideMode_t side,
cublasFillMode_t uplo,
cublasOperation_t trans,
cublasDiagType_t diag, int m, int n,
const float *alpha, const float *A, int lda,
float *B, int ldb) {
return cublasStrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B,
ldb);
}
inline cublasStatus_t cublasTtrsm(cublasHandle_t handle, cublasSideMode_t side,
cublasFillMode_t uplo,
cublasOperation_t trans,
cublasDiagType_t diag, int m, int n,
const double *alpha, const double *A, int lda,
double *B, int ldb) {
return cublasDtrsm(handle, side, uplo, trans, diag, m, n, alpha, A, lda, B,
ldb);
}
template <class T>
__global__ void copy_kernel(const T *__restrict d_in, T *__restrict d_out,
const int M, const int N) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < N) && (j < N)) d_out[j * N + i] = d_in[j * M + i];
}
template <class T>
__global__ void ts_data_to_matrix_kernel(const T *__restrict data, T *X,
const int ldx, const int n) {
// row
const int i = blockIdx.x * blockDim.x + threadIdx.x;
// col, time axis
const int j = blockIdx.y * blockDim.y + threadIdx.y;
// TODO: optimize with shared memory(read data only once)
if (i < n && i < ldx) {
X[j * ldx + i] = data[j + i];
}
}
template <class T>
__global__ void update_residual_kernel(T *residual, const T *data, const T *phi,
const int p, const T *last_residual,
const T *theta, const int q,
const int n) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
// TODO: optimize with shared memory(read data only once)
// at least cache phi and theta
if (i < n) {
T AR_prediction = 0;
for (int j = 0; j < p; ++j) {
AR_prediction += data[i + j + 1] * phi[j];
}
T MA_prediction = 0;
for (int j = 0; j < q; ++j) {
MA_prediction += last_residual[i + j + 1] * theta[j];
}
residual[i] = data[i] - AR_prediction - MA_prediction;
}
}
LeastSquaresSolver::LeastSquaresSolver(int rows, int cols)
: rows(rows), cols(cols) {
safe_cusolver(cusolverDnCreate(&this->solver_handle));
safe_cublas(cublasCreate(&this->cublas_handle));
}
LeastSquaresSolver::~LeastSquaresSolver() {
safe_cusolver(cusolverDnDestroy(this->solver_handle));
safe_cublas(cublasDestroy(this->cublas_handle));
}
template <typename T>
void LeastSquaresSolver::Solve(T *A, T *B) {
int work_size = 0;
int *devInfo;
OK(cudaMalloc(&devInfo, sizeof(int)));
/**********************************/
/* COMPUTING THE QR DECOMPOSITION */
/**********************************/
// --- CUDA QR GEQRF preliminary operations
T *d_TAU;
OK(cudaMalloc(&d_TAU, min(rows, cols) * sizeof(T)));
safe_cusolver(cusolverDnTgeqrf_bufferSize(solver_handle, rows, cols, A, rows,
&work_size));
T *work;
OK(cudaMalloc(&work, work_size * sizeof(T)));
// CUDA GEQRF execution: The matrix R is overwritten in upper triangular
// part of A, including diagonal
// elements. The matrix Q is not formed explicitly, instead, a sequence of
// householder vectors are stored in lower triangular part of A.
safe_cusolver(cusolverDnTgeqrf(solver_handle, rows, cols, A, rows, d_TAU,
work, work_size, devInfo));
int devInfo_h = 0;
OK(cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost));
assert(devInfo_h == 0);
/*****************************/
/* SOLVING THE LINEAR SYSTEM */
/*****************************/
// --- CUDA ORMQR execution: Computes the multiplication Q^T * B and stores it
// in B
safe_cusolver(cusolverDnTormqr(solver_handle, CUBLAS_SIDE_LEFT, CUBLAS_OP_T,
rows, 1, min(rows, cols), A, rows, d_TAU, B,
rows, work, work_size, devInfo));
OK(cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost));
OK(cudaFree(d_TAU));
OK(cudaFree(devInfo));
OK(cudaFree(work));
assert(devInfo_h == 0);
// --- Solving an upper triangular linear system R * x = Q^T * B
const T alpha = 1.;
safe_cublas(cublasTtrsm(
cublas_handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N,
CUBLAS_DIAG_NON_UNIT, cols, 1, &alpha, A, rows, B, cols));
OK(cudaDeviceSynchronize());
}
template <class T>
__global__ void differencing(T *out, const T *in, const int n) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < n;
i += gridDim.x * blockDim.x) {
if (i < n - 1)
out[i] = in[i] - in[i + 1];
else
out[n - 1] = NAN;
}
}
template <class T>
__global__ void undifferencing(T *out, const T *in, const int n) {
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < n;
i += gridDim.x * blockDim.x) {
if (i > 0)
out[i] = in[i] - in[i - 1];
else
out[0] = NAN;
}
}
template <class T>
ARIMAModel<T>::ARIMAModel(int p, int d, int q, int length)
: p(p), d(d), q(q), length(length) {
assert(q >= 0);
assert(d >= 0);
assert(p >= 0);
assert(length > 0);
assert(p > 0 || q > 0);
if (q > 0) {
OK(cudaMallocHost(&this->theta, sizeof(T) * q));
memset(this->theta, 0, sizeof(T) * q);
OK(cudaMalloc(&this->d_theta, sizeof(T) * q));
OK(cudaMemset(this->d_theta, 0, sizeof(T) * q));
}
if (p > 0) {
OK(cudaMallocHost(&this->phi, sizeof(T) * p));
memset(this->phi, 0, sizeof(T) * p);
OK(cudaMalloc(&this->d_phi, sizeof(T) * p));
OK(cudaMemset(this->d_phi, 0, sizeof(T) * p));
}
OK(cudaMalloc(&this->d_buffer, sizeof(T) * this->DifferencedLength()));
};
template <class T>
ARIMAModel<T>::~ARIMAModel() {
if (q > 0) {
OK(cudaFreeHost(this->theta));
OK(cudaFree(this->d_theta));
}
if (p > 0) {
OK(cudaFreeHost(this->phi));
OK(cudaFree(this->d_phi));
}
OK(cudaFree(this->d_buffer));
}
template <class T>
void ARIMAModel<T>::Difference(T *out, const T *in, int length) {
int block_size, grid_size;
compute1DInvokeConfig(length, &grid_size, &block_size, differencing<T>);
differencing<T><<<grid_size, block_size>>>(out, in, length);
}
template <class T>
void ARIMAModel<T>::AsMatrix(const T *ts_data, T *A, int depth, int lda,
int length) {
if (depth > 0) {
int n = length - depth + 1;
dim3 grid_size(DIVUP(n, BLOCK_SIZE), depth);
dim3 block_size(BLOCK_SIZE, 1);
ts_data_to_matrix_kernel<T><<<grid_size, block_size>>>(ts_data, A, lda, n);
}
}
template <class T>
void ARIMAModel<T>::AsMatrix(const T *ts_a, const T *ts_b, T *A, int a_depth,
int b_depth, int lda, int length) {
ARIMAModel<T>::AsMatrix(ts_a, A, a_depth, lda, length);
ARIMAModel<T>::AsMatrix(ts_b, A + a_depth * lda, b_depth, lda, length);
}
template <class T>
void ARIMAModel<T>::Apply(T *residual, const T *ts_data, const T *phi,
const int p, const T *last_residual, const T *theta,
const int q, int length) {
int block_size, grid_size;
compute1DInvokeConfig(length - max(p, q), &grid_size, &block_size,
update_residual_kernel<T>);
update_residual_kernel<T><<<grid_size, block_size>>>(
residual, ts_data, phi, p, last_residual, theta, q, length - max(p, q));
}
template <class T>
void ARIMAModel<T>::Fit(const T *data, const int maxiter) {
OK(cudaMalloc(&this->d_data_src, sizeof(T) * this->length));
OK(cudaMalloc(&this->d_data_differenced, sizeof(T) * this->length));
OK(cudaMalloc(&this->d_last_residual, sizeof(T) * this->length));
OK(cudaMemset(this->d_last_residual, 0, sizeof(T) * this->length));
OK(cudaMemcpy(this->d_data_src, data, sizeof(T) * this->length,
cudaMemcpyHostToDevice));
for (auto i = 0; i < this->d; ++i) {
this->Difference(this->d_data_differenced, this->d_data_src, this->length);
OK(cudaGetLastError());
OK(cudaDeviceSynchronize());
std::swap(this->d_data_src, this->d_data_differenced);
}
OK(cudaMemcpy(this->d_data_differenced, this->d_data_src,
this->length * sizeof(T), cudaMemcpyDeviceToDevice));
OK(cudaMemcpy(this->d_buffer, this->d_data_src,
sizeof(T) * this->DifferencedLength(),
cudaMemcpyDeviceToDevice));
if (this->p > 0) {
T *X;
OK(cudaMalloc(&X, sizeof(T) * this->ARLength() * this->p));
this->AsMatrix(this->d_data_src + 1, X, this->p, this->ARLength(),
this->DifferencedLength());
OK(cudaGetLastError());
OK(cudaDeviceSynchronize());
LeastSquaresSolver solver(this->ARLength(), this->p);
solver.Solve(X, this->d_data_differenced);
OK(cudaMemcpy(this->d_phi, this->d_data_differenced, sizeof(T) * this->p,
cudaMemcpyDeviceToDevice));
OK(cudaMemcpy(this->phi, this->d_data_differenced, sizeof(T) * this->p,
cudaMemcpyDeviceToHost));
OK(cudaFree(X));
}
if (this->q > 0) {
T *X;
int rows = min(this->MALength() == 0 ? this->ARLength() : this->MALength(),
this->ARLength() == 0 ? this->MALength() : this->ARLength());
OK(cudaMalloc(&X, sizeof(T) * rows * (this->q + this->p)));
for (int i = 0; i < maxiter; ++i) {
OK(cudaMemcpy(this->d_data_src, this->d_buffer,
sizeof(T) * this->DifferencedLength(),
cudaMemcpyDeviceToDevice));
this->Apply(this->d_data_differenced, this->d_data_src, this->d_phi,
this->p, this->d_last_residual, this->d_theta, this->q,
this->DifferencedLength());
OK(cudaGetLastError());
OK(cudaDeviceSynchronize());
this->AsMatrix(this->d_data_src + 1, this->d_data_differenced + 1, X,
this->p, this->q, rows, this->DifferencedLength());
OK(cudaGetLastError());
OK(cudaDeviceSynchronize());
LeastSquaresSolver solver(rows, this->q + this->p);
solver.Solve(X, this->d_data_src);
if (this->p > 0) {
OK(cudaMemcpy(this->d_phi, this->d_data_src, sizeof(T) * this->p,
cudaMemcpyDeviceToDevice));
}
OK(cudaDeviceSynchronize());
OK(cudaGetLastError());
OK(cudaMemcpy(this->d_theta, this->d_data_src + this->p,
sizeof(T) * this->q, cudaMemcpyDeviceToDevice));
std::swap(this->d_last_residual, this->d_data_differenced);
}
OK(cudaMemcpy(this->phi, this->d_data_src, sizeof(T) * this->p,
cudaMemcpyDeviceToHost));
OK(cudaMemcpy(this->theta, this->d_data_src + this->p, sizeof(T) * this->q,
cudaMemcpyDeviceToHost));
OK(cudaFree(X));
}
OK(cudaFree(this->d_data_src));
OK(cudaFree(this->d_data_differenced));
OK(cudaFree(this->d_last_residual));
}
template class ARIMAModel<float>;
template class ARIMAModel<double>;
} // namespace h2o4gpu
template <typename T>
void arima_fit(const int p, const int d, const int q, const T *ts_data,
const int length, T *theta, T *phi, const int maxiter) {
h2o4gpu::ARIMAModel<T> model(p, d, q, length);
model.Fit(ts_data, maxiter);
if (p > 0) std::memcpy(phi, model.Phi(), sizeof(T) * p);
if (q > 0) std::memcpy(theta, model.Theta(), sizeof(T) * q);
}
void arima_fit_float(const int p, const int d, const int q,
const float *ts_data, const int length, float *theta,
float *phi, const int maxiter) {
arima_fit<float>(p, d, q, ts_data, length, theta, phi, maxiter);
}
void arima_fit_double(const int p, const int d, const int q,
const double *ts_data, const int length, double *theta,
double *phi, const int maxiter) {
arima_fit<double>(p, d, q, ts_data, length, theta, phi, maxiter);
}
|
the_stack
|
namespace nvbio {
namespace bowtie2 {
namespace cuda {
namespace {
std::string mate_file_name(const std::string& file_name, const uint32 anchor)
{
const size_t dot = file_name.find('.');
if (dot == std::string::npos)
return file_name + (anchor ? ".2" : ".1");
else
{
std::string output = file_name;
output.insert( dot, (anchor ? ".2" : ".1") );
return output;
}
}
} // anonymous namespace
// clear the persisting files
//
void persist_clear(const std::string& file_name)
{
// clear mate 0
{
const std::string mate_file = mate_file_name( file_name, 0u );
FILE* file = fopen( mate_file.c_str(), "w" ); fclose( file );
}
// clear mate 1
{
const std::string mate_file = mate_file_name( file_name, 0u );
FILE* file = fopen( mate_file.c_str(), "w" ); fclose( file );
}
}
// compute a set of hits
//
void persist_hits(
const std::string& file_name,
const char* name,
const uint32 anchor,
const uint32 count,
const SeedHitDequeArray& hit_deques)
{
// check whether we need to persist anything
if (file_name == "")
return;
const std::string mate_file = mate_file_name( file_name, anchor );
FILE* file = fopen( mate_file.c_str(), "a+" );
SeedHitDequeArrayHostStorage hit_deques_h = static_cast<const SeedHitDequeArrayDeviceStorage&>( hit_deques );
fprintf( file, "\n---------------------------------------------------------------------------------\n" );
fprintf( file, "%s\n", name );
fprintf( file, "---------------------------------------------------------------------------------\n" );
for (uint32 i = 0; i < count; ++i)
fprintf( file, "cnt[%u] = %u\n", i, uint32(hit_deques_h.m_counts[i]) );
fprintf( file, "\n" );
for (uint32 i = 0; i < count; ++i)
{
const uint32 n_ranges = hit_deques_h.m_counts[i];
const SeedHit* hits = &hit_deques_h.m_hits[0] + hit_deques_h.m_index[i];
fprintf( file, "hits[%u] = [%u]{\n", i, n_ranges );
for (uint32 j = 0; j < n_ranges; ++j)
{
const SeedHit hit = hits[j];
fprintf( file, " range[%u] = { (%u, %u - %u), dir[%u], pos[%u], rc[%u] }\n",
j, hit.get_range().x, hit.get_range().y, hit.get_range().y - hit.get_range().x,
hit.get_indexdir(),
hit.get_posinread(),
hit.get_readtype() );
}
fprintf( file, "}\n" );
}
fprintf( file, "\n" );
fclose( file );
}
// persist a set of reads
//
void persist_reads(
const std::string& file_name,
const char* name,
const uint32 anchor,
const uint32 count,
const thrust::device_vector<uint32>::iterator iterator)
{
// check whether we need to persist anything
if (file_name == "")
return;
const std::string mate_file = mate_file_name( file_name, anchor );
FILE* file = fopen( mate_file.c_str(), "a+" );
thrust::host_vector<int32> hvec( count );
thrust::copy(
iterator,
iterator + count,
hvec.begin() );
fprintf( file, "\n---------------------------------------------------------------------------------\n" );
fprintf( file, "%s\n", name );
fprintf( file, "---------------------------------------------------------------------------------\n" );
for (uint32 i = 0; i < count; ++i)
fprintf( file, "read[%u] = %u\n", i, hvec[i] );
fprintf( file, "\n" );
fclose( file );
}
// persist a set of selected hits
//
void persist_selection(
const std::string& file_name,
const char* name,
const uint32 anchor,
const uint32 read_count,
const packed_read* read_infos_dptr,
const uint32 n_multi,
const uint32 hits_queue_size,
const ReadHitsIndex& hits_index,
const HitQueues& hits_queue)
{
// check whether we need to persist anything
if (file_name == "")
return;
const std::string mate_file = mate_file_name( file_name, anchor );
FILE* file = fopen( mate_file.c_str(), "a+" );
fprintf( file, "\n---------------------------------------------------------------------------------\n" );
fprintf( file, "%s\n", name );
fprintf( file, "---------------------------------------------------------------------------------\n" );
thrust::host_vector<uint32> loc_vec( hits_queue.loc );
thrust::host_vector<packed_seed> seed_vec( hits_queue.seed );
if (n_multi > 1)
{
const uint32 link_stride = hits_index.m_stride;
thrust::host_vector<uint32> link_hvec( hits_index.m_links );
thrust::host_vector<uint32> read_infos_hvec( read_count );
thrust::host_vector<uint32> idx_hvec( read_count );
uint32* link_hptr = thrust::raw_pointer_cast( &link_hvec.front() );
uint32* read_infos_hptr = (uint32*)thrust::raw_pointer_cast( &read_infos_hvec.front() );
cudaDeviceSynchronize();
cudaMemcpy( read_infos_hptr, read_infos_dptr, read_count * sizeof(uint32), cudaMemcpyDeviceToHost );
cudaDeviceSynchronize();
// sort the reads so as to show everything in the same order all the times
thrust::copy(
thrust::make_counting_iterator(0u),
thrust::make_counting_iterator(0u) + read_count,
idx_hvec.begin() );
thrust::sort_by_key(
read_infos_hvec.begin(),
read_infos_hvec.end(),
idx_hvec.begin() );
fprintf( file, "selection = [reads: %u - batch-size: %u - link-stride: %u]\n", read_count, n_multi, link_stride );
for (uint32 i = 0; i < read_count; ++i)
{
const packed_read *read_info = (packed_read *)&read_infos_hvec[i];
const uint32 idx = idx_hvec[i];
const uint32 n = link_hvec[idx];
fprintf( file, "read[%06u:%06u:%u] = [%02u]{", i, read_info->read_id, read_info->top_flag, n );
if (n)
{
strided_iterator<const uint32*> link_vec( link_hptr + idx + link_stride, link_stride );
for (uint32 j = 0; j < n; ++j)
{
const packed_seed seed = seed_vec[ link_vec[j] ];
const uint32 loc = loc_vec[ link_vec[j] ];
fprintf( file, " seed[pos:%u,dir:%u,rc:%u,top:%u,loc:%u]\n", (uint32)seed.pos_in_read, (uint32)seed.index_dir, (uint32)seed.rc, (uint32)seed.top_flag, loc );
}
}
fprintf( file, "}\n" );
}
}
else
{
thrust::host_vector<uint32> read_infos_hvec( read_count );
thrust::host_vector<uint32> idx_hvec( read_count );
uint32* read_infos_hptr = (uint32*)thrust::raw_pointer_cast( &read_infos_hvec.front() );
cudaDeviceSynchronize();
cudaMemcpy( read_infos_hptr, read_infos_dptr, read_count * sizeof(uint32), cudaMemcpyDeviceToHost );
cudaDeviceSynchronize();
// sort the reads so as to show everything in the same order all the times
thrust::copy(
thrust::make_counting_iterator(0u),
thrust::make_counting_iterator(0u) + read_count,
idx_hvec.begin() );
thrust::sort_by_key(
read_infos_hvec.begin(),
read_infos_hvec.end(),
idx_hvec.begin() );
fprintf( file, "selection = [reads: %u - batch-size: 1]\n", read_count );
for (uint32 i = 0; i < read_count; ++i)
{
const packed_read *read_info = (packed_read *)&read_infos_hvec[i];
const uint32 idx = idx_hvec[i];
fprintf( file, "read[%06u:%06u:%u] = [%02u]{", i, read_info->read_id, read_info->top_flag, 1u );
const packed_seed seed = seed_vec[ idx ];
const uint32 loc = loc_vec[ idx ];
fprintf( file, " seed[pos:%u,dir:%u,rc:%u,top:%u,loc:%u]\n", (uint32)seed.pos_in_read, (uint32)seed.index_dir, (uint32)seed.rc, (uint32)seed.top_flag, loc );
fprintf( file, "}\n" );
}
}
fclose( file );
}
// persist a set of scores
//
void persist_scores(
const std::string& file_name,
const char* name,
const uint32 anchor,
const uint32 read_count,
const uint32 n_multi,
const uint32 hits_queue_size,
const ScoringQueues& scoring_queues)
{
// check whether we need to persist anything
if (file_name == "")
return;
const std::string mate_file = mate_file_name( file_name, anchor );
FILE* file = fopen( mate_file.c_str(), "a+" );
fprintf( file, "\n---------------------------------------------------------------------------------\n" );
fprintf( file, "%s\n", name );
fprintf( file, "---------------------------------------------------------------------------------\n" );
thrust::host_vector<packed_read> read_infos_hvec( scoring_queues.active_reads.in_queue );
thrust::host_vector<int32> score_hvec( scoring_queues.hits.score );
uint32* read_infos_hptr = (uint32*)thrust::raw_pointer_cast( &read_infos_hvec.front() );
int32* score_hptr = thrust::raw_pointer_cast( &score_hvec.front() );
if (n_multi > 1)
{
const uint32 link_stride = scoring_queues.hits_index.m_stride;
thrust::host_vector<uint32> link_hvec( scoring_queues.hits_index.m_links );
thrust::host_vector<uint32> idx_hvec( read_count );
uint32* link_hptr = thrust::raw_pointer_cast( &link_hvec.front() );
// sort the reads so as to show everything in the same order all the times
thrust::copy(
thrust::make_counting_iterator(0u),
thrust::make_counting_iterator(0u) + read_count,
idx_hvec.begin() );
thrust::sort_by_key(
read_infos_hptr,
read_infos_hptr + read_count,
idx_hvec.begin() );
fprintf( file, "scores = [reads: %u - batch-size: %u - link-stride: %u]\n", read_count, n_multi, link_stride );
for (uint32 i = 0; i < read_count; ++i)
{
const packed_read *read_info = (packed_read *)&read_infos_hvec[i];
const uint32 idx = idx_hvec[i];
const uint32 n = link_hvec[idx];
if (n)
{
strided_iterator<const uint32*> link_vec( link_hptr + idx + link_stride, link_stride );
fprintf( file, "read[%06u:%06u:%u] = {\n", i, read_info->read_id, read_info->top_flag );
for (uint32 j = 0; j < n; ++j)
{
const uint32 link = link_vec[j];
fprintf( file, " score[%04u] = %d\n", j, score_hvec[link] );
}
fprintf( file, "}\n" );
}
}
fprintf( file, "\n" );
// sort
thrust::sort(
score_hvec.begin(),
score_hvec.end() );
// write sorted score list
fprintf( file, "\n" );
for (uint32 i = 0; i < hits_queue_size; ++i)
fprintf( file, "score[%u] = %d\n", i, score_hvec[i] );
fprintf( file, "\n" );
int64 sum = 0;
for (uint32 i = 0; i < hits_queue_size; ++i)
sum += score_hvec[i];
fprintf( file, "sum = %lld\n", sum );
// compute a crc
const char* ptr = (const char*)score_hptr;
const uint64 crc = crcCalc( ptr, sizeof(int32)*hits_queue_size );
fprintf( file, "crc = %llu\n", crc );
}
else
{
thrust::host_vector<uint32> idx_hvec( read_count );
// sort the reads so as to show everything in the same order all the times
thrust::copy(
thrust::make_counting_iterator(0u),
thrust::make_counting_iterator(0u) + read_count,
idx_hvec.begin() );
thrust::sort_by_key(
read_infos_hptr,
read_infos_hptr + read_count,
idx_hvec.begin() );
for (uint32 i = 0; i < read_count; ++i)
{
const packed_read *read_info = (packed_read *)&read_infos_hvec[i];
const uint32 idx = idx_hvec[i];
const uint32 score = score_hvec[idx];
fprintf( file, "read[%06u:%06u:%u] : score[%d]\n", i, read_info->read_id, read_info->top_flag, score );
}
fprintf( file, "\n" );
}
fclose( file );
}
} // namespace cuda
} // namespace bowtie2
} // namespace nvbio
|
the_stack
|
__constant__ static __align__(16) uint32_t c_E8_bslice32[42][8] = {
// Round 0 (Function0)
{ 0xa2ded572, 0x90d6ab81, 0x67f815df, 0xf6875a4d, 0x0a15847b, 0xc54f9f4e, 0x571523b7, 0x402bd1c3 },
{ 0xe03a98ea, 0xb4960266, 0x9cfa455c, 0x8a53bbf2, 0x99d2c503, 0x1a1456b5, 0x9a99b266, 0x31a2db88 }, // 1
{ 0x5c5aa303, 0x8019051c, 0xdb0e199a, 0x1d959e84, 0x0ab23f40, 0xadeb336f, 0x1044c187, 0xdccde75e }, // 2
{ 0x9213ba10, 0x39812c0a, 0x416bbf02, 0x5078aa37, 0x156578dc, 0xd2bf1a3f, 0xd027bbf7, 0xd3910041 }, // 3
{ 0x0d5a2d42, 0x0ba75c18, 0x907eccf6, 0xac442bc7, 0x9c9f62dd, 0xd665dfd1, 0xce97c092, 0x23fcc663 }, // 4
{ 0x036c6e97, 0xbb03f1ee, 0x1ab8e09e, 0xfa618e5d, 0x7e450521, 0xb29796fd, 0xa8ec6c44, 0x97818394 }, // 5
{ 0x37858e4a, 0x8173fe8a, 0x2f3003db, 0x6c69b8f8, 0x2d8d672a, 0x4672c78a, 0x956a9ffb, 0x14427fc0 }, // 6
// Round 7 (Function0)
{ 0x8f15f4c5, 0xb775de52, 0xc45ec7bd, 0xbc88e4ae, 0xa76f4475, 0x1e00b882, 0x80bb118f, 0xf4a3a698 },
{ 0x338ff48e, 0x20edf1b6, 0x1563a3a9, 0xfde05a7c, 0x24565faa, 0x5ae9ca36, 0x89f9b7d5, 0x362c4206 },
{ 0x433529ce, 0x591ff5d0, 0x3d98fe4e, 0x86814e6f, 0x74f93a53, 0x81ad9d0e, 0xa74b9a73, 0x9f5ad8af },
{ 0x670605a7, 0x26077447, 0x6a6234ee, 0x3f1080c6, 0xbe280b8b, 0x6f7ea0e0, 0x2717b96e, 0x7b487ec6 },
{ 0xa50a550d, 0x81727686, 0xc0a4f84a, 0xd48d6050, 0x9fe7e391, 0x415a9e7e, 0x9ef18e97, 0x62b0e5f3 },
{ 0xec1f9ffc, 0xf594d74f, 0x7a205440, 0xd895fa9d, 0x001ae4e3, 0x117e2e55, 0x84c9f4ce, 0xa554c324 },
{ 0x2872df5b, 0xef7c8905, 0x286efebd, 0x2ed349ee, 0xe27ff578, 0x85937e44, 0xb2c4a50f, 0x7f5928eb },
// Round 14 (Function0)
{ 0x37695f70, 0x04771bc7, 0x4a3124b3, 0xe720b951, 0xf128865e, 0xe843fe74, 0x65e4d61d, 0x8a87d423 },
{ 0xa3e8297d, 0xfb301b1d, 0xf2947692, 0xe01bdc5b, 0x097acbdd, 0x4f4924da, 0xc1d9309b, 0xbf829cf2 },
{ 0x31bae7a4, 0x32fcae3b, 0xffbf70b4, 0x39d3bb53, 0x0544320d, 0xc1c39f45, 0x48bcf8de, 0xa08b29e0 },
{ 0xfd05c9e5, 0x01b771a2, 0x0f09aef7, 0x95ed44e3, 0x12347094, 0x368e3be9, 0x34f19042, 0x4a982f4f },
{ 0x631d4088, 0xf14abb7e, 0x15f66ca0, 0x30c60ae2, 0x4b44c147, 0xc5b67046, 0xffaf5287, 0xe68c6ecc },
{ 0x56a4d5a4, 0x45ce5773, 0x00ca4fbd, 0xadd16430, 0x4b849dda, 0x68cea6e8, 0xae183ec8, 0x67255c14 },
{ 0xf28cdaa3, 0x20b2601f, 0x16e10ecb, 0x7b846fc2, 0x5806e933, 0x7facced1, 0x9a99949a, 0x1885d1a0 },
// Round 21 (Function0)
{ 0xa15b5932, 0x67633d9f, 0xd319dd8d, 0xba6b04e4, 0xc01c9a50, 0xab19caf6, 0x46b4a5aa, 0x7eee560b },
{ 0xea79b11f, 0x5aac571d, 0x742128a9, 0x76d35075, 0x35f7bde9, 0xfec2463a, 0xee51363b, 0x01707da3 },
{ 0xafc135f7, 0x15638341, 0x42d8a498, 0xa8db3aea, 0x20eced78, 0x4d3bc3fa, 0x79676b9e, 0x832c8332 },
{ 0x1f3b40a7, 0x6c4e3ee7, 0xf347271c, 0xfd4f21d2, 0x34f04059, 0x398dfdb8, 0x9a762db7, 0xef5957dc },
{ 0x490c9b8d, 0xd0ae3b7d, 0xdaeb492b, 0x84558d7a, 0x49d7a25b, 0xf0e9a5f5, 0x0d70f368, 0x658ef8e4 },
{ 0xf4a2b8a0, 0x92946891, 0x533b1036, 0x4f88e856, 0x9e07a80c, 0x555cb05b, 0x5aec3e75, 0x4cbcbaf8 },
{ 0x993bbbe3, 0x28acae64, 0x7b9487f3, 0x6db334dc, 0xd6f4da75, 0x50a5346c, 0x5d1c6b72, 0x71db28b8 },
// Round 28 (Function0)
{ 0xf2e261f8, 0xf1bcac1c, 0x2a518d10, 0xa23fce43, 0x3364dbe3, 0x3cd1bb67, 0xfc75dd59, 0xb043e802 },
{ 0xca5b0a33, 0xc3943b92, 0x75a12988, 0x1e4d790e, 0x4d19347f, 0xd7757479, 0x5c5316b4, 0x3fafeeb6 },
{ 0xf7d4a8ea, 0x5324a326, 0x21391abe, 0xd23c32ba, 0x097ef45c, 0x4a17a344, 0x5127234c, 0xadd5a66d },
{ 0xa63e1db5, 0xa17cf84c, 0x08c9f2af, 0x4d608672, 0x983d5983, 0xcc3ee246, 0x563c6b91, 0xf6c76e08 },
{ 0xb333982f, 0xe8b6f406, 0x5e76bcb1, 0x36d4c1be, 0xa566d62b, 0x1582ee74, 0x2ae6c4ef, 0x6321efbc },
{ 0x0d4ec1fd, 0x1614c17e, 0x69c953f4, 0x16fae006, 0xc45a7da7, 0x3daf907e, 0x26585806, 0x3f9d6328 },
{ 0xe3f2c9d2, 0x16512a74, 0x0cd29b00, 0x9832e0f2, 0x30ceaa5f, 0xd830eb0d, 0x300cd4b7, 0x9af8cee3 },
// Round 35 (Function0)
{ 0x7b9ec54b, 0x574d239b, 0x9279f1b5, 0x316796e6, 0x6ee651ff, 0xf3a6e6cc, 0xd3688604, 0x05750a17 },
{ 0xd98176b1, 0xb3cb2bf4, 0xce6c3213, 0x47154778, 0x8452173c, 0x825446ff, 0x62a205f8, 0x486a9323 },
{ 0x0758df38, 0x442e7031, 0x65655e4e, 0x86ca0bd0, 0x897cfcf2, 0xa20940f0, 0x8e5086fc, 0x4e477830 },
{ 0x39eea065, 0x26b29721, 0x8338f7d1, 0x6ff81301, 0x37e95ef7, 0xd1ed44a3, 0xbd3a2ce4, 0xe7de9fef },
{ 0x15dfa08b, 0x7ceca7d8, 0xd9922576, 0x7eb027ab, 0xf6f7853c, 0xda7d8d53, 0xbe42dc12, 0xdea83eaa },
{ 0x93ce25aa, 0xdaef5fc0, 0xd86902bd, 0xa5194a17, 0xfd43f65a, 0x33664d97, 0xf908731a, 0x6a21fd4c },
{ 0x3198b435, 0xa163d09a, 0x701541db, 0x72409751, 0xbb0f1eea, 0xbf9d75f6, 0x9b54cded, 0xe26f4791 }
// 42 rounds...
};
static uint32_t *d_found[MAX_GPUS];
/*swapping bits 16i||16i+1||......||16i+7 with bits 16i+8||16i+9||......||16i+15 of 32-bit x*/
//#define SWAP8(x) (x) = ((((x) & 0x00ff00ffUL) << 8) | (((x) & 0xff00ff00UL) >> 8));
#define SWAP8(x) (x) = __byte_perm(x, x, 0x2301);
/*swapping bits 32i||32i+1||......||32i+15 with bits 32i+16||32i+17||......||32i+31 of 32-bit x*/
//#define SWAP16(x) (x) = ((((x) & 0x0000ffffUL) << 16) | (((x) & 0xffff0000UL) >> 16));
#define SWAP16(x) (x) = __byte_perm(x, x, 0x1032);
/*The MDS transform*/
#define L(m0,m1,m2,m3,m4,m5,m6,m7) \
(m4) ^= (m1); \
(m5) ^= (m2); \
(m6) ^= (m0) ^ (m3); \
(m7) ^= (m0); \
(m0) ^= (m5); \
(m1) ^= (m6); \
(m2) ^= (m4) ^ (m7); \
(m3) ^= (m4);
/*The Sbox*/
#define Sbox(m0,m1,m2,m3,cc) \
m3 = ~(m3); \
m0 ^= ((~(m2)) & (cc)); \
temp0 = (cc) ^ ((m0) & (m1));\
m0 ^= ((m2) & (m3)); \
m3 ^= ((~(m1)) & (m2)); \
m1 ^= ((m0) & (m2)); \
m2 ^= ((m0) & (~(m3))); \
m0 ^= ((m1) | (m3)); \
m3 ^= ((m1) & (m2)); \
m1 ^= (temp0 & (m0)); \
m2 ^= temp0;
__device__ __forceinline__
static void Sbox_and_MDS_layer(uint32_t x[8][4], const int rnd)
{
uint2* cc = (uint2*)&c_E8_bslice32[rnd];
// Sbox and MDS layer
#pragma unroll
for (int i = 0; i < 4; i++, ++cc) {
uint32_t temp0;
Sbox(x[0][i], x[2][i], x[4][i], x[6][i], cc->x);
Sbox(x[1][i], x[3][i], x[5][i], x[7][i], cc->y);
L(x[0][i], x[2][i], x[4][i], x[6][i], x[1][i], x[3][i], x[5][i], x[7][i]);
}
}
static __device__ __forceinline__ void RoundFunction0(uint32_t x[8][4], uint32_t roundnumber)
{
Sbox_and_MDS_layer(x, roundnumber);
#pragma unroll 4
for (int j = 1; j < 8; j = j + 2)
{
x[j][0] = ((x[j][0] & 0x55555555u) << 1) | (x[j][0] & 0xAAAAAAAAu) >> 1;
x[j][1] = ((x[j][1] & 0x55555555u) << 1) | (x[j][1] & 0xAAAAAAAAu) >> 1;
x[j][2] = ((x[j][2] & 0x55555555u) << 1) | (x[j][2] & 0xAAAAAAAAu) >> 1;
x[j][3] = ((x[j][3] & 0x55555555u) << 1) | (x[j][3] & 0xAAAAAAAAu) >> 1;
}
}
static __device__ __forceinline__ void RoundFunction1(uint32_t x[8][4], uint32_t roundnumber)
{
Sbox_and_MDS_layer(x, roundnumber);
#pragma unroll 4
for (int j = 1; j < 8; j = j + 2)
{
x[j][0] = ((x[j][0] & 0x33333333u) << 2) | (x[j][0] & 0xCCCCCCCCu) >> 2;
x[j][1] = ((x[j][1] & 0x33333333u) << 2) | (x[j][1] & 0xCCCCCCCCu) >> 2;
x[j][2] = ((x[j][2] & 0x33333333u) << 2) | (x[j][2] & 0xCCCCCCCCu) >> 2;
x[j][3] = ((x[j][3] & 0x33333333u) << 2) | (x[j][3] & 0xCCCCCCCCu) >> 2;
}
}
static __device__ __forceinline__ void RoundFunction2(uint32_t x[8][4], uint32_t roundnumber)
{
Sbox_and_MDS_layer(x, roundnumber);
#pragma unroll 4
for (int j = 1; j < 8; j = j + 2)
{
x[j][0] = ((x[j][0] & 0x0f0f0f0fu) << 4) | (x[j][0] & 0xF0F0F0F0u) >> 4;
x[j][1] = ((x[j][1] & 0x0f0f0f0fu) << 4) | (x[j][1] & 0xF0F0F0F0u) >> 4;
x[j][2] = ((x[j][2] & 0x0f0f0f0fu) << 4) | (x[j][2] & 0xF0F0F0F0u) >> 4;
x[j][3] = ((x[j][3] & 0x0f0f0f0fu) << 4) | (x[j][3] & 0xF0F0F0F0u) >> 4;
}
}
static __device__ __forceinline__ void RoundFunction3(uint32_t x[8][4], uint32_t roundnumber)
{
Sbox_and_MDS_layer(x, roundnumber);
#pragma unroll 4
for (int j = 1; j < 8; j = j + 2)
{
#pragma unroll 4
for (int i = 0; i < 4; i++) SWAP8(x[j][i]);
}
}
static __device__ __forceinline__ void RoundFunction4(uint32_t x[8][4], uint32_t roundnumber)
{
Sbox_and_MDS_layer(x, roundnumber);
#pragma unroll 4
for (int j = 1; j < 8; j = j + 2)
{
#pragma unroll 4
for (int i = 0; i < 4; i++) SWAP16(x[j][i]);
}
}
static __device__ __forceinline__ void RoundFunction5(uint32_t x[8][4], uint32_t roundnumber)
{
uint32_t temp0;
Sbox_and_MDS_layer(x, roundnumber);
#pragma unroll 4
for (int j = 1; j < 8; j = j + 2)
{
#pragma unroll 2
for (int i = 0; i < 4; i = i + 2) {
temp0 = x[j][i]; x[j][i] = x[j][i + 1]; x[j][i + 1] = temp0;
}
}
}
static __device__ __forceinline__ void RoundFunction6(uint32_t x[8][4], uint32_t roundnumber)
{
uint32_t temp0;
Sbox_and_MDS_layer(x, roundnumber);
#pragma unroll 4
for (int j = 1; j < 8; j = j + 2)
{
#pragma unroll 2
for (int i = 0; i < 2; i++) {
temp0 = x[j][i]; x[j][i] = x[j][i + 2]; x[j][i + 2] = temp0;
}
}
}
/*The bijective function E8, in bitslice form */
static __device__ __forceinline__ void E8(uint32_t x[8][4])
{
/*perform 6 rounds*/
//#pragma unroll 6
for (int i = 0; i < 42; i += 7)
{
RoundFunction0(x, i);
RoundFunction1(x, i + 1);
RoundFunction2(x, i + 2);
RoundFunction3(x, i + 3);
RoundFunction4(x, i + 4);
RoundFunction5(x, i + 5);
RoundFunction6(x, i + 6);
}
}
static __device__ __forceinline__ void F8(uint32_t x[8][4], const uint32_t buffer[16])
{
/*xor the 512-bit message with the fist half of the 1024-bit hash state*/
#pragma unroll 16
for (int i = 0; i < 16; i++) x[i >> 2][i & 3] ^= ((uint32_t*)buffer)[i];
/*the bijective function E8 */
E8(x);
/*xor the 512-bit message with the second half of the 1024-bit hash state*/
#pragma unroll 16
for (int i = 0; i < 16; i++) x[(16 + i) >> 2][(16 + i) & 3] ^= ((uint32_t*)buffer)[i];
}
// Die Hash-Funktion
__global__ __launch_bounds__(256, 4)
void quark_jh512_gpu_hash_64(uint32_t threads, uint32_t startNounce, uint32_t *const __restrict__ g_hash, const uint32_t *const __restrict__ g_nonceVector)
{
const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
if (thread < threads)
{
const uint32_t nounce = (g_nonceVector != NULL) ? g_nonceVector[thread] : (startNounce + thread);
const uint32_t hashPosition = nounce - startNounce;
uint32_t *const Hash = &g_hash[16 * hashPosition];
uint32_t x[8][4] = {
{ 0x964bd16f, 0x17aa003e, 0x052e6a63, 0x43d5157a },
{ 0x8d5e228a, 0x0bef970c, 0x591234e9, 0x61c3b3f2 },
{ 0xc1a01d89, 0x1e806f53, 0x6b05a92a, 0x806d2bea },
{ 0xdbcc8e58, 0xa6ba7520, 0x763a0fa9, 0xf73bf8ba },
{ 0x05e66901, 0x694ae341, 0x8e8ab546, 0x5ae66f2e },
{ 0xd0a74710, 0x243c84c1, 0xb1716e3b, 0x99c15a2d },
{ 0xecf657cf, 0x56f8b19d, 0x7c8806a7, 0x56b11657 },
{ 0xdffcc2e3, 0xfb1785e6, 0x78465a54, 0x4bdd8ccc } };
uint32_t msg[16];
uint28 *phash = (uint28*)Hash;
uint28 *outpt = (uint28*)msg;
outpt[0] = phash[0];
outpt[1] = phash[1];
#pragma unroll 16
for (int i = 0; i < 16; i++) x[i >> 2][i & 3] ^= msg[i];
E8(x);
#pragma unroll 16
for (int i = 0; i < 16; i++) x[(16 + i) >> 2][(16 + i) & 3] ^= msg[i];
x[0 >> 2][0 & 3] ^= 0x80;
x[15 >> 2][15 & 3] ^= 0x00020000;
E8(x);
x[(16 + 0) >> 2][(16 + 0) & 3] ^= 0x80;
x[(16 + 15) >> 2][(16 + 15) & 3] ^= 0x00020000;
Hash[0] = x[4][0];
Hash[1] = x[4][1];
Hash[2] = x[4][2];
Hash[3] = x[4][3];
Hash[4] = x[5][0];
Hash[5] = x[5][1];
Hash[6] = x[5][2];
Hash[7] = x[5][3];
Hash[8] = x[6][0];
Hash[9] = x[6][1];
Hash[10] = x[6][2];
Hash[11] = x[6][3];
Hash[12] = x[7][0];
Hash[13] = x[7][1];
Hash[14] = x[7][2];
Hash[15] = x[7][3];
}
}
// Die Hash-Funktion
#define TPB2 256
__global__ __launch_bounds__(TPB2, 2)
void quark_jh512_gpu_hash_64_final(uint32_t threads, uint32_t startNounce, uint64_t *const __restrict__ g_hash, const uint32_t *const __restrict__ g_nonceVector, uint32_t *const __restrict__ d_found, uint32_t target)
{
const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x);
if (thread < threads)
{
const uint32_t nounce = (g_nonceVector != NULL) ? g_nonceVector[thread] : (startNounce + thread);
const uint32_t hashPosition = nounce - startNounce;
uint32_t *const Hash = (uint32_t*)&g_hash[8 * hashPosition];
uint32_t msg[16];
uint28 *phash = (uint28*)Hash;
uint28 *outpt = (uint28*)msg;
outpt[0] = phash[0];
outpt[1] = phash[1];
uint32_t x[8][4] = {
{ 0x964bd16f, 0x17aa003e, 0x052e6a63, 0x43d5157a },
{ 0x8d5e228a, 0x0bef970c, 0x591234e9, 0x61c3b3f2 },
{ 0xc1a01d89, 0x1e806f53, 0x6b05a92a, 0x806d2bea },
{ 0xdbcc8e58, 0xa6ba7520, 0x763a0fa9, 0xf73bf8ba },
{ 0x05e66901, 0x694ae341, 0x8e8ab546, 0x5ae66f2e },
{ 0xd0a74710, 0x243c84c1, 0xb1716e3b, 0x99c15a2d },
{ 0xecf657cf, 0x56f8b19d, 0x7c8806a7, 0x56b11657 },
{ 0xdffcc2e3, 0xfb1785e6, 0x78465a54, 0x4bdd8ccc } };
F8(x, msg);
x[0][0] ^= 0x80U;
x[3][3] ^= 0x00020000U;
for (int i = 0; i < 35; i += 7)
{
RoundFunction0(x, i);
RoundFunction1(x, i + 1);
RoundFunction2(x, i + 2);
RoundFunction3(x, i + 3);
RoundFunction4(x, i + 4);
RoundFunction5(x, i + 5);
RoundFunction6(x, i + 6);
}
RoundFunction0(x, 35);
RoundFunction1(x, 35 + 1);
RoundFunction2(x, 35 + 2);
RoundFunction3(x, 35 + 3);
RoundFunction4(x, 35 + 4);
RoundFunction5(x, 35 + 5);
RoundFunction6(x, 35 + 6);
if(x[5][3] <= target)
{
uint32_t tmp = atomicExch(&(d_found[0]), nounce);
if(tmp != 0xffffffff)
d_found[1] = tmp;
}
}
}
__host__ void quark_jh512_cpu_hash_64(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash)
{
const uint32_t threadsperblock = 32;
// berechne wie viele Thread Blocks wir brauchen
dim3 grid((threads + threadsperblock-1)/threadsperblock);
dim3 block(threadsperblock);
quark_jh512_gpu_hash_64<<<grid, block, 0, gpustream[thr_id]>>>(threads, startNounce, d_hash, d_nonceVector);
}
// Setup-Funktionen
__host__ void quark_jh512_cpu_init(int thr_id)
{
cudaMalloc(&(d_found[thr_id]), 2 * sizeof(uint32_t));
}
__host__ void quark_jh512_cpu_hash_64_final(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_nonceVector, uint32_t *d_hash, uint32_t target, uint32_t *h_found)
{
dim3 grid((threads + TPB2 - 1) / TPB2);
dim3 block(TPB2);
cudaMemsetAsync(d_found[thr_id], 0xff, 2 * sizeof(uint32_t), gpustream[thr_id]);
quark_jh512_gpu_hash_64_final << <grid, block, 0, gpustream[thr_id] >> >(threads, startNounce, (uint64_t*)d_hash, d_nonceVector, d_found[thr_id], target);
cudaMemcpyAsync(h_found, d_found[thr_id], 2 * sizeof(uint32_t), cudaMemcpyDeviceToHost, gpustream[thr_id]);
}
|
the_stack
|
template <typename T>
void random_data(int verbose, thrust::device_vector<T> &array, int m, int n) {
thrust::host_vector<T> host_array(m * n);
for (int i = 0; i < m * n; i++) {
host_array[i] = (T)rand() / (T)RAND_MAX;
}
array = host_array;
}
/**
* Like copy_data but shuffles the data according to mapping from v
* @tparam T
* @param verbose
* @param v
* @param ord
* @param array
* @param srcdata
* @param q
* @param n
* @param npergpu
* @param d
*/
template <typename T>
void copy_data_shuffled(int verbose, std::vector<int> v, const char ord,
thrust::device_vector<T> &array, const T *srcdata,
int q, int n, int npergpu, int d) {
thrust::host_vector<T> host_array(npergpu * d);
if (ord == 'c') {
log_debug(verbose, "Copy data shuffle COL ORDER -> ROW ORDER");
for (int i = 0; i < npergpu; i++) {
for (size_t j = 0; j < d; j++) {
host_array[i * d + j] =
srcdata[v[q * npergpu + i] + j * n]; // shift by which gpu
}
}
} else {
log_debug(verbose, "Copy data shuffle ROW ORDER not changed");
for (int i = 0; i < npergpu; i++) {
for (size_t j = 0; j < d; j++) {
host_array[i * d + j] =
srcdata[v[q * npergpu + i] * d + j]; // shift by which gpu
}
}
}
array = host_array;
}
template <typename T>
void copy_centroids_shuffled(int verbose, std::vector<int> v, const char ord,
thrust::device_vector<T> &array, const T *srcdata,
int n, int k, int d) {
copy_data_shuffled(verbose, v, ord, array, srcdata, 0, n, k, d);
}
/**
* Copies centroids from initial training set randomly.
* @tparam T
* @param verbose
* @param seed
* @param ord
* @param array
* @param srcdata
* @param q
* @param n
* @param npergpu
* @param d
* @param k
*/
template <typename T>
void random_centroids(int verbose, int seed, const char ord,
thrust::device_vector<T> &array, const T *srcdata, int q,
int n, int npergpu, int d, int k) {
thrust::host_vector<T> host_array(k * d);
if (seed < 0) {
std::random_device
rd; // Will be used to obtain a seed for the random number engine
seed = rd();
}
std::mt19937 gen(seed);
std::uniform_int_distribution<> dis(
0,
n - 1); // random i in range from 0..n-1 (i.e. only 1 gpu gets
// centroids)
if (ord == 'c') {
log_debug(verbose, "Random centroids COL ORDER -> ROW ORDER");
for (int i = 0; i < k; i++) { // clusters
size_t reali =
dis(gen); // + q*npergpu; // row sampled (called indexj above)
for (size_t j = 0; j < d; j++) { // cols
host_array[i * d + j] = srcdata[reali + j * n];
}
}
} else {
log_debug(verbose, "Random centroids ROW ORDER not changed");
for (int i = 0; i < k; i++) { // rows
size_t reali = dis(gen); // + q*npergpu ; // row sampled
for (size_t j = 0; j < d; j++) { // cols
host_array[i * d + j] = srcdata[reali * d + j];
}
}
}
array = host_array;
}
/**
* KMEANS METHODS FIT, PREDICT, TRANSFORM
*/
#define __HBAR__ \
"------------------------------------------------------------------------" \
"--" \
"--\n"
namespace h2o4gpukmeans {
template <typename T>
int kmeans_fit(int verbose, int seed, int gpu_idtry, int n_gputry, size_t rows,
size_t cols, int k, int max_iterations, int init_from_data,
T threshold, const T *srcdata, T **pred_centroids,
int **pred_labels);
template <typename T>
int pick_point_idx_weighted(int seed, std::vector<T> *data,
thrust::host_vector<T> weights) {
T weighted_sum = 0;
for (int i = 0; i < weights.size(); i++) {
if (data) {
weighted_sum += (data->data()[i] * weights.data()[i]);
} else {
weighted_sum += weights.data()[i];
}
}
T best_prob = 0.0;
int best_prob_idx = 0;
std::mt19937 mt(seed);
std::uniform_real_distribution<> dist(0.0, 1.0);
int i = 0;
for (i = 0; i <= weights.size(); i++) {
if (weights.size() == i) {
break;
}
T prob_threshold = (T)dist(mt);
T data_val = weights.data()[i];
if (data) {
data_val *= data->data()[i];
}
T prob_x = (data_val / weighted_sum);
if (prob_x > prob_threshold) {
break;
}
if (prob_x >= best_prob) {
best_prob = prob_x;
best_prob_idx = i;
}
}
return weights.size() == i ? best_prob_idx : i;
}
/**
* Copies cols records, starting at position idx*cols from data to centroids.
* Removes them afterwards from data. Removes record from weights at position
* idx.
* @tparam T
* @param idx
* @param cols
* @param data
* @param weights
* @param centroids
*/
template <typename T>
void add_centroid(int idx, int cols, thrust::host_vector<T> &data,
thrust::host_vector<T> &weights, std::vector<T> ¢roids) {
for (int i = 0; i < cols; i++) {
centroids.push_back(data[idx * cols + i]);
}
weights[idx] = 0;
}
/**
* K-Means++ algorithm
* @tparam T
* @param seed
* @param data
* @param weights
* @param k
* @param cols
* @param centroids
*/
template <typename T>
void kmeans_plus_plus(int verbose, int seed, thrust::host_vector<T> data,
thrust::host_vector<T> weights, int k, int cols,
thrust::host_vector<T> ¢roids) {
std::vector<T> std_centroids(0);
std_centroids.reserve(k * cols);
int centroid_idx =
pick_point_idx_weighted(seed, (std::vector<T> *)NULL, weights);
add_centroid(centroid_idx, cols, data, weights, std_centroids);
std::vector<T> best_pairwise_distances(data.size() /
cols); // one for each row in data
std::vector<T> std_data(data.begin(), data.end());
compute_distances(std_data, std_centroids, best_pairwise_distances,
data.size() / cols, cols, 1);
std::vector<T> curr_pairwise_distances(std_data.size() / cols);
for (int iter = 0; iter < k - 1; iter++) {
log_verbose(verbose, "KMeans++ - Iteraton %d/%d.", iter, k - 1);
centroid_idx =
pick_point_idx_weighted(seed, &best_pairwise_distances, weights);
add_centroid(centroid_idx, cols, data, weights, std_centroids);
std::vector<T> most_recent_centroids;
most_recent_centroids.reserve(cols);
add_centroid(centroid_idx, cols, data, weights, most_recent_centroids);
best_pairwise_distances[centroid_idx] = 0;
compute_distances(std_data, most_recent_centroids, curr_pairwise_distances,
std_data.size() / cols, cols, 1);
for (int i = 0; i < curr_pairwise_distances.size(); i++) {
best_pairwise_distances[i] =
std::min(curr_pairwise_distances[i], best_pairwise_distances[i]);
}
std::fill(curr_pairwise_distances.begin(), curr_pairwise_distances.end(),
(T)0.0);
}
centroids.assign(std_centroids.begin(), std_centroids.end());
}
template <typename T>
struct min_calc_functor {
T *all_costs_ptr;
T *min_costs_ptr;
T max = std::numeric_limits<T>::max();
int potential_k_rows;
int rows_per_run;
min_calc_functor(T *_all_costs_ptr, T *_min_costs_ptr, int _potential_k_rows,
int _rows_per_run) {
all_costs_ptr = _all_costs_ptr;
min_costs_ptr = _min_costs_ptr;
potential_k_rows = _potential_k_rows;
rows_per_run = _rows_per_run;
}
__host__ __device__ void operator()(int idx) const {
T best = max;
for (int j = 0; j < potential_k_rows; j++) {
best = min(best, std::abs(all_costs_ptr[j * rows_per_run + idx]));
}
min_costs_ptr[idx] = min(min_costs_ptr[idx], best);
}
};
/**
* K-Means|| initialization method implementation as described in "Scalable
* K-Means++".
*
* This is a probabilistic method, which tries to choose points as much spread
* out as possible as centroids.
*
* In case it finds more than k centroids a K-Means++ algorithm is ran on
* potential centroids to pick k best suited ones.
*
* http://theory.stanford.edu/~sergei/papers/vldb12-kmpar.pdf
*
* @tparam T
* @param verbose
* @param seed
* @param ord
* @param data
* @param data_dots
* @param centroids
* @param rows
* @param cols
* @param k
* @param num_gpu
* @param threshold
*/
template <typename T>
thrust::host_vector<T> kmeans_parallel(int verbose, int seed,
thrust::device_vector<T> **data,
thrust::device_vector<T> **data_dots,
size_t rows, int cols, int k,
int num_gpu, T threshold) {
if (seed < 0) {
std::random_device rd;
int seed = rd();
}
size_t rows_per_gpu = rows / num_gpu;
std::mt19937 gen(seed);
std::uniform_int_distribution<> dis(0, rows - 1);
// Find the position (GPU idx and idx on that GPU) of the initial centroid
int first_center = dis(gen);
int first_center_idx = first_center % rows_per_gpu;
int first_center_gpu = first_center / rows_per_gpu;
log_verbose(verbose, "KMeans|| - Initial centroid %d on GPU %d.",
first_center_idx, first_center_gpu);
// Copies the initial centroid to potential centroids vector. That vector
// will store all potential centroids found in the previous iteration.
thrust::host_vector<T> h_potential_centroids(cols);
std::vector<thrust::host_vector<T>> h_potential_centroids_per_gpu(num_gpu);
CUDACHECK(cudaSetDevice(first_center_gpu));
thrust::copy(
(*data[first_center_gpu]).begin() + first_center_idx * cols,
(*data[first_center_gpu]).begin() + (first_center_idx + 1) * cols,
h_potential_centroids.begin());
thrust::host_vector<T> h_all_potential_centroids = h_potential_centroids;
// Initial the cost-to-potential-centroids and
// cost-to-closest-potential-centroid matrices. Initial cost is +infinity
std::vector<thrust::device_vector<T>> d_min_costs(num_gpu);
for (int q = 0; q < num_gpu; q++) {
CUDACHECK(cudaSetDevice(q));
d_min_costs[q].resize(rows_per_gpu);
thrust::fill(d_min_costs[q].begin(), d_min_costs[q].end(),
std::numeric_limits<T>::max());
}
double t0 = timer<double>();
// The original white paper claims 8 should be enough
int max_iter = std::min(8, (int)(2 + log(k)));
for (int counter = 0; counter < max_iter; counter++) {
log_verbose(verbose, "KMeans|| - Iteration %d.", counter);
T total_min_cost = 0.0;
int new_potential_centroids = 0;
#pragma omp parallel for
for (int i = 0; i < num_gpu; i++) {
CUDACHECK(cudaSetDevice(i));
thrust::device_vector<T> d_potential_centroids = h_potential_centroids;
int potential_k_rows = d_potential_centroids.size() / cols;
// Compute all the costs to each potential centroid from previous
// iteration
thrust::device_vector<T> centroid_dots(potential_k_rows);
kmeans::detail::batch_calculate_distances(
verbose, 0, rows_per_gpu, cols, potential_k_rows, *data[i],
d_potential_centroids, *data_dots[i], centroid_dots,
[&](int rows_per_run, size_t offset,
thrust::device_vector<T> &pairwise_distances) {
// Find the closest potential center cost for each row
auto min_cost_counter = thrust::make_counting_iterator(0);
auto all_costs_ptr =
thrust::raw_pointer_cast(pairwise_distances.data());
auto min_costs_ptr =
thrust::raw_pointer_cast(d_min_costs[i].data() + offset);
thrust::for_each(
min_cost_counter, min_cost_counter + rows_per_run,
// Functor instead of a lambda b/c nvcc is complaining
// about nesting a __device__ lambda inside a regular
// lambda
min_calc_functor<T>(all_costs_ptr, min_costs_ptr,
potential_k_rows, rows_per_run));
});
}
for (int i = 0; i < num_gpu; i++) {
CUDACHECK(cudaSetDevice(i));
total_min_cost +=
thrust::reduce(d_min_costs[i].begin(), d_min_costs[i].end());
}
log_verbose(verbose, "KMeans|| - Total min cost from centers %g.",
total_min_cost);
if (total_min_cost == (T)0.0) {
continue;
}
std::set<int> copy_from_gpus;
#pragma omp parallel for
for (int i = 0; i < num_gpu; i++) {
CUDACHECK(cudaSetDevice(i));
// Count how many potential centroids there are using probabilities
// The further the row is from the closest cluster center the higher
// the probability
auto pot_cent_filter_counter = thrust::make_counting_iterator(0);
auto min_costs_ptr = thrust::raw_pointer_cast(d_min_costs[i].data());
int pot_cent_num = thrust::count_if(
pot_cent_filter_counter, pot_cent_filter_counter + rows_per_gpu,
[=] __device__(int idx) {
thrust::default_random_engine rng(seed);
thrust::uniform_real_distribution<> dist(0.0, 1.0);
int device;
cudaGetDevice(&device);
rng.discard(idx + device * rows_per_gpu);
T prob_threshold = (T)dist(rng);
T prob_x = ((2.0 * k * min_costs_ptr[idx]) / total_min_cost);
return prob_x > prob_threshold;
});
log_debug(verbose, "KMeans|| - Potential centroids on GPU %d = %d.", i,
pot_cent_num);
if (pot_cent_num > 0) {
copy_from_gpus.insert(i);
// Copy all potential cluster centers
thrust::device_vector<T> d_new_potential_centroids(pot_cent_num * cols);
auto range = thrust::make_counting_iterator(0);
thrust::copy_if(
(*data[i]).begin(), (*data[i]).end(), range,
d_new_potential_centroids.begin(), [=] __device__(int idx) {
int row = idx / cols;
thrust::default_random_engine rng(seed);
thrust::uniform_real_distribution<> dist(0.0, 1.0);
int device;
cudaGetDevice(&device);
rng.discard(row + device * rows_per_gpu);
T prob_threshold = (T)dist(rng);
T prob_x = ((2.0 * k * min_costs_ptr[row]) / total_min_cost);
return prob_x > prob_threshold;
});
h_potential_centroids_per_gpu[i].clear();
h_potential_centroids_per_gpu[i].resize(
d_new_potential_centroids.size());
new_potential_centroids += d_new_potential_centroids.size();
thrust::copy(d_new_potential_centroids.begin(),
d_new_potential_centroids.end(),
h_potential_centroids_per_gpu[i].begin());
}
}
log_verbose(verbose, "KMeans|| - New potential centroids %d.",
new_potential_centroids);
// Gather potential cluster centers from all GPUs
if (new_potential_centroids > 0) {
h_potential_centroids.clear();
h_potential_centroids.resize(new_potential_centroids);
int old_pot_centroids_size = h_all_potential_centroids.size();
h_all_potential_centroids.resize(old_pot_centroids_size +
new_potential_centroids);
int offset = 0;
for (int i = 0; i < num_gpu; i++) {
if (copy_from_gpus.find(i) != copy_from_gpus.end()) {
thrust::copy(h_potential_centroids_per_gpu[i].begin(),
h_potential_centroids_per_gpu[i].end(),
h_potential_centroids.begin() + offset);
offset += h_potential_centroids_per_gpu[i].size();
}
}
thrust::copy(h_potential_centroids.begin(), h_potential_centroids.end(),
h_all_potential_centroids.begin() + old_pot_centroids_size);
}
}
double timeloop = static_cast<double>(timer<double>() - t0);
thrust::host_vector<T> final_centroids(0);
int potential_centroids_num = h_all_potential_centroids.size() / cols;
if (potential_centroids_num <= k) {
final_centroids.resize(k * cols);
thrust::copy(h_all_potential_centroids.begin(),
h_all_potential_centroids.end(), final_centroids.begin());
// TODO what if potential_centroids_num < k ?? we don't want 0s
} else {
// If we found more than k potential cluster centers we need to take
// only a subset This is done using a weighted k-means++ method, since
// the set should be very small it should converge very fast and is all
// done on the CPU.
thrust::host_vector<T> weights(potential_centroids_num);
double tc0 = timer<double>();
// Weights correspond to the number of data points assigned to each
// potential cluster center
count_pts_per_centroid(verbose, num_gpu, rows_per_gpu, cols, data,
data_dots, h_all_potential_centroids, weights);
double timecount = static_cast<double>(timer<double>() - tc0);
double tkpp = timer<double>();
kmeans_plus_plus(verbose, seed, h_all_potential_centroids, weights, k, cols,
final_centroids);
double timekpp = static_cast<double>(timer<double>() - tkpp);
log_verbose(verbose,
"KMeans|| - Time loop: %g Time count: %g Time kpp: %g.",
timeloop, timecount, timekpp);
}
return final_centroids;
}
volatile std::atomic_int flaggpu(0);
inline void my_function_gpu(int sig) { // can be called asynchronously
fprintf(stderr, "Caught signal %d. Terminating shortly.\n", sig);
flaggpu = 1;
}
std::vector<int> kmeans_init(int verbose, int *final_n_gpu, int n_gputry,
int gpu_idtry, int rows) {
if (rows > std::numeric_limits<int>::max()) {
fprintf(stderr, "rows > %d not implemented\n",
std::numeric_limits<int>::max());
fflush(stderr);
exit(0);
}
std::signal(SIGINT, my_function_gpu);
std::signal(SIGTERM, my_function_gpu);
// no more gpus than visible gpus
int n_gpuvis;
cudaGetDeviceCount(&n_gpuvis);
int n_gpu = std::min(n_gpuvis, n_gputry);
// no more than rows
n_gpu = std::min(n_gpu, rows);
if (verbose) {
std::cout << n_gpu << " gpus." << std::endl;
}
int gpu_id = gpu_idtry % n_gpuvis;
// setup GPU list to use
std::vector<int> dList(n_gpu);
for (int idx = 0; idx < n_gpu; idx++) {
int device_idx = (gpu_id + idx) % n_gpuvis;
dList[idx] = device_idx;
}
*final_n_gpu = n_gpu;
return dList;
}
template <typename T>
H2O4GPUKMeans<T>::H2O4GPUKMeans(const T *A, int k, int n, int d) {
_A = A;
_k = k;
_n = n;
_d = d;
}
template <typename T>
int kmeans_fit(int verbose, int seed, int gpu_idtry, int n_gputry, size_t rows,
size_t cols, int k, int max_iterations, int init_from_data,
T threshold, const T *srcdata, T **pred_centroids,
int **pred_labels) {
// init random seed if use the C function rand()
if (seed >= 0) {
srand(seed);
} else {
srand(unsigned(time(NULL)));
}
// no more clusters than rows
if (k > rows) {
k = static_cast<int>(rows);
fprintf(stderr,
"Number of clusters adjusted to be equal to number of rows.\n");
fflush(stderr);
}
int n_gpu;
std::vector<int> dList =
kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows);
double t0t = timer<double>();
thrust::device_vector<T> *data[n_gpu];
thrust::device_vector<int> *labels[n_gpu];
thrust::device_vector<T> *d_centroids[n_gpu];
thrust::device_vector<T> *data_dots[n_gpu];
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(cudaSetDevice(dList[q]));
data[q] = new thrust::device_vector<T>(rows / n_gpu * cols);
d_centroids[q] = new thrust::device_vector<T>(k * cols);
data_dots[q] = new thrust::device_vector<T>(rows / n_gpu);
kmeans::detail::labels_init();
}
log_debug(verbose, "Number of points: %d", rows);
log_debug(verbose, "Number of dimensions: %d", cols);
log_debug(verbose, "Number of clusters: %d", k);
log_debug(verbose, "Max. number of iterations: %d", max_iterations);
log_debug(verbose, "Stopping threshold: %d", threshold);
std::vector<int> v(rows);
std::iota(std::begin(v), std::end(v), 0); // Fill with 0, 1, ..., rows.
if (seed >= 0) {
std::shuffle(v.begin(), v.end(), std::default_random_engine(seed));
} else {
std::random_shuffle(v.begin(), v.end());
}
// Copy the data to devices
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(cudaSetDevice(dList[q]));
if (verbose) {
std::cout << "Copying data to device: " << dList[q] << std::endl;
}
copy_data(verbose, 'r', *data[q], &srcdata[0], q, rows, rows / n_gpu, cols);
// Pre-compute the data matrix norms
kmeans::detail::make_self_dots(rows / n_gpu, cols, *data[q], *data_dots[q]);
}
// Get random points as centroids
int bytecount = cols * k * sizeof(T); // all centroids
if (0 == init_from_data) {
log_debug(verbose, "KMeans - Using random initialization.");
int masterq = 0;
CUDACHECK(cudaSetDevice(dList[masterq]));
copy_centroids_shuffled(verbose, v, 'r', *d_centroids[masterq], &srcdata[0],
rows, k, cols);
// Copy centroids to all devices
std::vector<cudaStream_t *> streams;
streams.resize(n_gpu);
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
if (q == masterq) continue;
CUDACHECK(cudaSetDevice(dList[q]));
if (verbose > 0) {
std::cout << "Copying centroid data to device: " << dList[q]
<< std::endl;
}
streams[q] =
reinterpret_cast<cudaStream_t *>(malloc(sizeof(cudaStream_t)));
cudaStreamCreate(streams[q]);
cudaMemcpyPeerAsync(thrust::raw_pointer_cast(&(*d_centroids[q])[0]),
dList[q],
thrust::raw_pointer_cast(&(*d_centroids[masterq])[0]),
dList[masterq], bytecount, *(streams[q]));
}
//#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
if (q == masterq) continue;
cudaSetDevice(dList[q]);
cudaStreamDestroy(*(streams[q]));
#if (DEBUGKMEANS)
thrust::host_vector<T> h_centroidq = *d_centroids[q];
for (int ii = 0; ii < k * d; ii++) {
fprintf(stderr, "q=%d initcent[%d]=%g\n", q, ii, h_centroidq[ii]);
fflush(stderr);
}
#endif
}
} else if (1 == init_from_data) { // kmeans||
log_debug(verbose, "KMeans - Using K-Means|| initialization.");
thrust::host_vector<T> final_centroids = kmeans_parallel(
verbose, seed, data, data_dots, rows, cols, k, n_gpu, threshold);
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(cudaSetDevice(dList[q]));
cudaMemcpy(thrust::raw_pointer_cast(&(*d_centroids[q])[0]),
thrust::raw_pointer_cast(&final_centroids[0]), bytecount,
cudaMemcpyHostToDevice);
}
}
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(cudaSetDevice(dList[q]));
labels[q] = new thrust::device_vector<int>(rows / n_gpu);
}
double timetransfer = static_cast<double>(timer<double>() - t0t);
double t0 = timer<double>();
int iter = kmeans::kmeans<T>(verbose, &flaggpu, rows, cols, k, data, labels,
d_centroids, data_dots, dList, n_gpu,
max_iterations, threshold, true);
if (iter < 0) {
log_error(verbose, "KMeans algorithm failed.");
return iter;
}
double timefit = static_cast<double>(timer<double>() - t0);
double t1 = timer<double>();
// copy result of centroids (sitting entirely on each device) back to host
// TODO FIXME: When do delete ctr and h_labels memory???
thrust::host_vector<T> *ctr = new thrust::host_vector<T>(*d_centroids[0]);
*pred_centroids = ctr->data();
// copy assigned labels
thrust::host_vector<int> *h_labels = new thrust::host_vector<int>(rows);
//#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
int offset = labels[q]->size() * q;
h_labels->insert(h_labels->begin() + offset, labels[q]->begin(),
labels[q]->end());
}
*pred_labels = h_labels->data();
// debug
if (verbose >= H2O4GPU_LOG_VERBOSE) {
for (unsigned int ii = 0; ii < k; ii++) {
fprintf(stderr, "ii=%d of k=%d ", ii, k);
for (unsigned int jj = 0; jj < cols; jj++) {
fprintf(stderr, "%g ", (*pred_centroids)[cols * ii + jj]);
}
fprintf(stderr, "\n");
fflush(stderr);
}
}
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(cudaSetDevice(dList[q]));
delete (data[q]);
delete (labels[q]);
delete (d_centroids[q]);
delete (data_dots[q]);
kmeans::detail::labels_close();
}
double timecleanup = static_cast<double>(timer<double>() - t1);
if (verbose) {
std::cout << " Time fit: " << timefit << " s" << std::endl;
fprintf(stderr, "Timetransfer: %g Timefit: %g Timecleanup: %g\n",
timetransfer, timefit, timecleanup);
fflush(stderr);
}
return 0;
}
template <typename T>
int kmeans_predict(int verbose, int gpu_idtry, int n_gputry, size_t rows,
size_t cols, int k, const T *srcdata, const T *centroids,
int **pred_labels) {
// Print centroids
if (verbose >= H2O4GPU_LOG_VERBOSE) {
std::cout << std::endl;
for (int i = 0; i < cols * k; i++) {
std::cout << centroids[i] << " ";
if (i % cols == 1) {
std::cout << std::endl;
}
}
}
int n_gpu;
std::vector<int> dList =
kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows);
thrust::device_vector<T> *d_data[n_gpu];
thrust::device_vector<T> *d_centroids[n_gpu];
thrust::device_vector<T> *data_dots[n_gpu];
thrust::device_vector<T> *centroid_dots[n_gpu];
thrust::host_vector<int> *h_labels = new thrust::host_vector<int>(rows);
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
// TODO: that may ignore up to n_gpu - 1 rows
const size_t chunk_size = rows / n_gpu;
CUDACHECK(cudaSetDevice(dList[q]));
kmeans::detail::labels_init();
data_dots[q] = new thrust::device_vector<T>(chunk_size);
centroid_dots[q] = new thrust::device_vector<T>(k);
d_centroids[q] = new thrust::device_vector<T>(k * cols);
d_data[q] = new thrust::device_vector<T>(chunk_size * cols);
copy_data(verbose, 'r', *d_centroids[q], ¢roids[0], 0, k, k, cols);
copy_data(verbose, 'r', *d_data[q], &srcdata[0], q, rows, chunk_size, cols);
kmeans::detail::make_self_dots(chunk_size, cols, *d_data[q], *data_dots[q]);
thrust::device_vector<int> d_labels(chunk_size);
kmeans::detail::batch_calculate_distances(
verbose, q, chunk_size, cols, k, *d_data[q], *d_centroids[q],
*data_dots[q], *centroid_dots[q],
[&](int n, size_t offset,
thrust::device_vector<T> &pairwise_distances) {
kmeans::detail::relabel(n, k, pairwise_distances, d_labels, offset);
});
#pragma omp critical
thrust::copy(d_labels.begin(), d_labels.end(),
h_labels->begin() + q * chunk_size);
}
// TODO: check memory freeing
*pred_labels = h_labels->data();
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
safe_cuda(cudaSetDevice(dList[q]));
kmeans::detail::labels_close();
delete (data_dots[q]);
delete (centroid_dots[q]);
delete (d_centroids[q]);
delete (d_data[q]);
}
return 0;
}
template <typename T>
int kmeans_transform(int verbose, int gpu_idtry, int n_gputry, size_t rows,
size_t cols, int k, const T *srcdata, const T *centroids,
T **preds) {
// Print centroids
if (verbose >= H2O4GPU_LOG_VERBOSE) {
std::cout << std::endl;
for (int i = 0; i < cols * k; i++) {
std::cout << centroids[i] << " ";
if (i % cols == 1) {
std::cout << std::endl;
}
}
}
int n_gpu;
std::vector<int> dList =
kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows);
thrust::device_vector<T> *d_data[n_gpu];
thrust::device_vector<T> *d_centroids[n_gpu];
thrust::device_vector<T> *d_pairwise_distances[n_gpu];
thrust::device_vector<T> *data_dots[n_gpu];
thrust::device_vector<T> *centroid_dots[n_gpu];
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
CUDACHECK(cudaSetDevice(dList[q]));
kmeans::detail::labels_init();
data_dots[q] = new thrust::device_vector<T>(rows / n_gpu);
centroid_dots[q] = new thrust::device_vector<T>(k);
d_pairwise_distances[q] = new thrust::device_vector<T>(rows / n_gpu * k);
d_centroids[q] = new thrust::device_vector<T>(k * cols);
d_data[q] = new thrust::device_vector<T>(rows / n_gpu * cols);
copy_data(verbose, 'r', *d_centroids[q], ¢roids[0], 0, k, k, cols);
copy_data(verbose, 'r', *d_data[q], &srcdata[0], q, rows, rows / n_gpu,
cols);
kmeans::detail::make_self_dots(rows / n_gpu, cols, *d_data[q],
*data_dots[q]);
// TODO batch this
kmeans::detail::calculate_distances(
verbose, q, rows / n_gpu, cols, k, *d_data[q], 0, *d_centroids[q],
*data_dots[q], *centroid_dots[q], *d_pairwise_distances[q]);
}
// Move the resulting labels into host memory from all devices
thrust::host_vector<T> *h_pairwise_distances = new thrust::host_vector<T>(0);
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
h_pairwise_distances->insert(h_pairwise_distances->end(),
d_pairwise_distances[q]->begin(),
d_pairwise_distances[q]->end());
}
*preds = h_pairwise_distances->data();
// Print centroids
if (verbose >= H2O4GPU_LOG_VERBOSE) {
std::cout << std::endl;
for (int i = 0; i < rows * cols; i++) {
std::cout << h_pairwise_distances->data()[i] << " ";
if (i % cols == 1) {
std::cout << std::endl;
}
}
}
#pragma omp parallel for
for (int q = 0; q < n_gpu; q++) {
safe_cuda(cudaSetDevice(dList[q]));
kmeans::detail::labels_close();
delete (d_pairwise_distances[q]);
delete (data_dots[q]);
delete (centroid_dots[q]);
delete (d_centroids[q]);
delete (d_data[q]);
}
return 0;
}
template <typename T>
int makePtr_dense(int dopredict, int verbose, int seed, int gpu_idtry,
int n_gputry, size_t rows, size_t cols, int k,
int max_iterations, int init_from_data, T threshold,
const T *srcdata, const T *centroids, T **pred_centroids,
int **pred_labels) {
if (dopredict == 0) {
return kmeans_fit(verbose, seed, gpu_idtry, n_gputry, rows, cols, k,
max_iterations, init_from_data, threshold, srcdata,
pred_centroids, pred_labels);
} else {
return kmeans_predict(verbose, gpu_idtry, n_gputry, rows, cols, k, srcdata,
centroids, pred_labels);
}
}
template int makePtr_dense<float>(int dopredict, int verbose, int seed,
int gpu_id, int n_gpu, size_t rows,
size_t cols, int k, int max_iterations,
int init_from_data, float threshold,
const float *srcdata, const float *centroids,
float **pred_centroids, int **pred_labels);
template int makePtr_dense<double>(int dopredict, int verbose, int seed,
int gpu_id, int n_gpu, size_t rows,
size_t cols, int k, int max_iterations,
int init_from_data, double threshold,
const double *srcdata,
const double *centroids,
double **pred_centroids, int **pred_labels);
template int kmeans_fit<float>(int verbose, int seed, int gpu_idtry,
int n_gputry, size_t rows, size_t cols, int k,
int max_iterations, int init_from_data,
float threshold, const float *srcdata,
float **pred_centroids, int **pred_labels);
template int kmeans_fit<double>(int verbose, int seed, int gpu_idtry,
int n_gputry, size_t rows, size_t cols, int k,
int max_iterations, int init_from_data,
double threshold, const double *srcdata,
double **pred_centroids, int **pred_labels);
template int kmeans_predict<float>(int verbose, int gpu_idtry, int n_gputry,
size_t rows, size_t cols, int k,
const float *srcdata, const float *centroids,
int **pred_labels);
template int kmeans_predict<double>(int verbose, int gpu_idtry, int n_gputry,
size_t rows, size_t cols, int k,
const double *srcdata,
const double *centroids, int **pred_labels);
template int kmeans_transform<float>(int verbose, int gpu_id, int n_gpu,
size_t m, size_t n, int k,
const float *src_data,
const float *centroids, float **preds);
template int kmeans_transform<double>(int verbose, int gpu_id, int n_gpu,
size_t m, size_t n, int k,
const double *src_data,
const double *centroids, double **preds);
// Explicit template instantiation.
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE == 1
template class H2O4GPUKMeans<double>;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE == 1
template class H2O4GPUKMeans<float>;
#endif
} // namespace h2o4gpukmeans
/*
* Interface for other languages
*/
// Fit and Predict
int make_ptr_float_kmeans(int dopredict, int verbose, int seed, int gpu_id,
int n_gpu, size_t mTrain, size_t n, int k,
int max_iterations, int init_from_data,
float threshold, const float *srcdata,
const float *centroids, float **pred_centroids,
int **pred_labels) {
return h2o4gpukmeans::makePtr_dense<float>(
dopredict, verbose, seed, gpu_id, n_gpu, mTrain, n, k, max_iterations,
init_from_data, threshold, srcdata, centroids, pred_centroids,
pred_labels);
}
int make_ptr_double_kmeans(int dopredict, int verbose, int seed, int gpu_id,
int n_gpu, size_t mTrain, size_t n, int k,
int max_iterations, int init_from_data,
double threshold, const double *srcdata,
const double *centroids, double **pred_centroids,
int **pred_labels) {
return h2o4gpukmeans::makePtr_dense<double>(
dopredict, verbose, seed, gpu_id, n_gpu, mTrain, n, k, max_iterations,
init_from_data, threshold, srcdata, centroids, pred_centroids,
pred_labels);
}
// Transform
int kmeans_transform_float(int verbose, int gpu_id, int n_gpu, size_t m,
size_t n, int k, const float *src_data,
const float *centroids, float **preds) {
return h2o4gpukmeans::kmeans_transform<float>(verbose, gpu_id, n_gpu, m, n, k,
src_data, centroids, preds);
}
int kmeans_transform_double(int verbose, int gpu_id, int n_gpu, size_t m,
size_t n, int k, const double *src_data,
const double *centroids, double **preds) {
return h2o4gpukmeans::kmeans_transform<double>(verbose, gpu_id, n_gpu, m, n,
k, src_data, centroids, preds);
}
|
the_stack
|
#include <iostream>
#include <algorithm>
#include <chrono>
#include <nppdefs.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
#include "Common.cuh"
//////////////////////////////
// forward
//////////////////////////////
__global__ void kernal_fp32_MaxPooling_Forward(
float const *x_buf,
float *y_buf,
int *argmax_buf,
int filter_h_size,
int filter_w_size,
int input_w_size,
int input_h_size,
int output_w_size,
int output_h_size,
int c_size,
int frame_size,
int frame_stride
)
{
int frame_base = threadIdx.x;
int frame_step = blockDim.x;
int x = blockIdx.y * blockDim.y + threadIdx.y;
int y = blockIdx.x;
int c = blockIdx.z * blockDim.z + threadIdx.z;
if (y >= output_h_size || x >= output_w_size) {
return;
}
for ( int frame = frame_base; frame < frame_size; frame += frame_step ) {
// 最大値探索
float max_val = -NPP_MAXABS_32F; // -1.0e7f;
int arg = 0;
int argmax = 0;
for (int fy = 0; fy < filter_h_size; ++fy) {
int iy = y * filter_h_size + fy;
if ( iy < input_h_size ) {
for (int fx = 0; fx < filter_w_size; ++fx) {
int ix = x * filter_w_size + fx;
if ( ix < input_w_size ) {
float sig = x_buf[((c * input_h_size + iy) * input_w_size + ix) * frame_stride + frame];
if ( sig > max_val ) {
max_val = sig;
argmax = arg;
}
arg++;
}
}
}
}
// 出力
y_buf[((c * output_h_size + y) * output_w_size + x) * frame_stride + frame] = max_val;
argmax_buf[((c * output_h_size + y) * output_w_size + x) * frame_stride + frame] = argmax;
}
}
BBCU_DLL_EXPORT int bbcu_fp32_MaxPooling_Forward
(
float const * dev_x_buf,
float* dev_y_buf,
int* dev_argmax_buf,
int filter_h_size,
int filter_w_size,
int input_w_size,
int input_h_size,
int output_w_size,
int output_h_size,
int c_size,
int frame_size,
int frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 block(32, 32, 1);
dim3 grid;
grid.x = output_h_size;
grid.y = (output_w_size + (block.y-1)) / block.y;
grid.z = c_size;
block.x = min(block.x, frame_size);
block.y = min(block.y, output_w_size);
kernal_fp32_MaxPooling_Forward<<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
dev_argmax_buf,
filter_h_size,
filter_w_size,
input_w_size,
input_h_size,
output_w_size,
output_h_size,
c_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
//////////////////
__global__ void kernal_bit_MaxPooling_Forward(
int const *x_buf,
int *y_buf,
int filter_h_size,
int filter_w_size,
int input_w_size,
int input_h_size,
int output_w_size,
int output_h_size,
int c_size,
int frame_size,
int frame_stride
)
{
int id = threadIdx.x;
int id_step = blockDim.x;
int x = blockIdx.y * blockDim.y + threadIdx.y;
int y = blockIdx.x;
int c = blockIdx.z * blockDim.z + threadIdx.z;
if (y < output_h_size && x < output_w_size) {
int loop_size = ((frame_size + 0x1f) & ~0x1f);
for ( int frame = id; frame < loop_size; frame += id_step ) {
int unit = (frame >> 5);
int bit = (frame & 0x1f);
int bit_mask = (1 << bit);
// 最大値探索
int y_val = 0;
if ( frame < frame_size ) {
for (int fy = 0; fy < filter_h_size; ++fy) {
int iy = y * filter_h_size + fy;
if ( iy < input_h_size ) {
for (int fx = 0; fx < filter_w_size; ++fx) {
int ix = x * filter_w_size + fx;
if ( ix < input_w_size ) {
int x_val = x_buf[((c * input_h_size + iy) * input_w_size + ix) * frame_stride + unit];
y_val |= x_val;
}
}
}
}
}
y_val = device_int_ShuffleOr(y_val & bit_mask);
// 出力
if ( bit == 0 ) {
y_buf[((c * output_h_size + y) * output_w_size + x) * frame_stride + unit] = y_val;
}
}
}
}
BBCU_DLL_EXPORT int bbcu_bit_MaxPooling_Forward
(
int const *dev_x_buf,
int *dev_y_buf,
int filter_h_size,
int filter_w_size,
int input_w_size,
int input_h_size,
int output_w_size,
int output_h_size,
int c_size,
int frame_size,
int frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 block(32, 32, 1);
dim3 grid;
grid.x = output_h_size;
grid.y = (output_w_size + (block.y-1)) / block.y;
grid.z = c_size;
// block.x = min(block.x, frame_size);
block.y = min(block.y, output_w_size);
kernal_bit_MaxPooling_Forward<<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
filter_h_size,
filter_w_size,
input_w_size,
input_h_size,
output_w_size,
output_h_size,
c_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
//////////////////////////////
// backward
//////////////////////////////
__global__ void kernal_fp32_MaxPooling_Backward(
int const *argmax_buf,
float const *dy_buf,
float *dx_buf,
int filter_h_size,
int filter_w_size,
int input_w_size,
int input_h_size,
int output_w_size,
int output_h_size,
int c_size,
int frame_size,
int frame_stride
)
{
int frame_base = threadIdx.x;
int frame_step = blockDim.x;
int x = blockIdx.y * blockDim.y + threadIdx.y;
int y = blockIdx.x;
int c = blockIdx.z * blockDim.z + threadIdx.z;
if (y >= output_h_size || x >= output_w_size) {
return;
}
// 最大値箇所のみ伝播
for ( int frame = frame_base; frame < frame_size; frame += frame_step ) {
int arg = 0;
int argmax = argmax_buf[((c * output_h_size + y) * output_w_size + x) * frame_stride + frame];
float grad = dy_buf[((c * output_h_size + y) * output_w_size + x) * frame_stride + frame];
for (int fy = 0; fy < filter_h_size; ++fy) {
int iy = y * filter_h_size + fy;
if ( iy < input_h_size ) {
for (int fx = 0; fx < filter_w_size; ++fx) {
int ix = x * filter_w_size + fx;
if ( ix < input_w_size ) {
dx_buf[((c * input_h_size + iy) * input_w_size + ix) * frame_stride + frame] = (arg == argmax) ? grad : 0;
arg++;
}
}
}
}
}
}
BBCU_DLL_EXPORT int bbcu_fp32_MaxPooling_Backward
(
int const *dev_argmax_buf,
float const *dev_dy_buf,
float *dev_dx_buf,
int filter_h_size,
int filter_w_size,
int input_w_size,
int input_h_size,
int output_w_size,
int output_h_size,
int c_size,
int frame_size,
int frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 block(32, 32, 1);
dim3 grid;
grid.x = output_h_size;
grid.y = (output_w_size + (block.y-1)) / block.y;
grid.z = c_size;
block.x = min(block.x, frame_size);
block.y = min(block.y, output_w_size);
kernal_fp32_MaxPooling_Backward<<<grid, block, 0, streamId>>>(
dev_argmax_buf,
dev_dy_buf,
dev_dx_buf,
filter_h_size,
filter_w_size,
input_w_size,
input_h_size,
output_w_size,
output_h_size,
c_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
///////////
__global__ void kernal_bit_fp32_MaxPooling_Backward(
int const *x_buf,
float const *dy_buf,
float *dx_buf,
int filter_h_size,
int filter_w_size,
int input_w_size,
int input_h_size,
int output_w_size,
int output_h_size,
int c_size,
int frame_size,
int forward_frame_stride,
int backward_frame_stride
)
{
int id = threadIdx.x;
int id_step = blockDim.x;
int x = blockIdx.y * blockDim.y + threadIdx.y;
int y = blockIdx.x;
int c = blockIdx.z * blockDim.z + threadIdx.z;
if (y < output_h_size && x < output_w_size) {
// 最大値箇所のみ伝播
for ( int frame = id; frame < frame_size; frame += id_step ) {
int bit = (1 << (frame & 0x1f));
int unit = (frame >> 5);
float grad = dy_buf[((c * output_h_size + y) * output_w_size + x) * backward_frame_stride + frame];
for (int fy = 0; fy < filter_h_size; ++fy) {
int iy = y * filter_h_size + fy;
if ( iy < input_h_size ) {
for (int fx = 0; fx < filter_w_size; ++fx) {
int ix = x * filter_w_size + fx;
if ( ix < input_w_size ) {
float in_sig = (x_buf[((c * input_h_size + iy) * input_w_size + ix) * forward_frame_stride + unit] & bit);
dx_buf[((c * input_h_size + iy) * input_w_size + ix) * backward_frame_stride + frame] = in_sig ? grad : 0;
if ( in_sig ) {
grad = 0;
}
}
}
}
}
}
}
}
BBCU_DLL_EXPORT int bbcu_bit_fp32_MaxPooling_Backward
(
int const *dev_x_buf,
float const *dev_dy_buf,
float *dev_dx_buf,
int filter_h_size,
int filter_w_size,
int input_w_size,
int input_h_size,
int output_w_size,
int output_h_size,
int c_size,
int frame_size,
int forward_frame_stride,
int backward_frame_stride,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 block(32, 32, 1);
dim3 grid;
grid.x = output_h_size;
grid.y = (output_w_size + (block.y-1)) / block.y;
grid.z = c_size;
block.x = std::min((int)block.x, frame_size);
block.y = std::min((int)block.y, output_w_size);
kernal_bit_fp32_MaxPooling_Backward<<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_dy_buf,
dev_dx_buf,
filter_h_size,
filter_w_size,
input_w_size,
input_h_size,
output_w_size,
output_h_size,
c_size,
frame_size,
forward_frame_stride,
backward_frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
// end of file
|
the_stack
|
///////////////////////////////////////////////////////////////////// Headers //
// Project
#include "Config.h"
#include "Pba.h"
#include "Geometry.h"
#include "PerfTimer.h"
#include "GDelData.h"
#include "GDelKernels.h"
#include "GDelCommon.h"
///////////////////////////////////////////////////////////////////// Globals //
////
// Note: Do NOT set these values!
// They are set by init function.
////
const float DataExpansionFactor = 1.3f;
int* DGrid = NULL;
int GridWidth = 0;
int ThreadsPerBlock = -1;
int BlocksPerGrid = -1;
int ThreadNum = -1;
int PredThreadsPerBlock = -1;
int PredBlocksPerGrid = -1;
int PredThreadNum = -1;
int InsertNum = -1;
int WorksetSizeMax = -1;
int InsertPointMax = -1;
int LoopNum = -1;
int FacetMax = -1;
bool DoSorting = false;
bool DoCheck = false;
bool LogVerbose = false;
bool LogStats = false;
bool LogTiming = false;
bool LogMemory = false;
// Containers
ActiveData DActiveData;
BeneathData DBeneathData;
HistoryData DHistoryData;
InsertionData DInsertData;
MissingData DMissingData;
PointData DPointData;
PredicateInfo DPredicateInfo;
StarData DStarData;
TetraData DTetraData;
// Device vectors
IntDVec* DTriBufVec; // Used as scratch vector by many functions
// Host vectors
TetraHVec HTetraVec; // Used to store final tetra
////////////////////////////////////////////////////////////////// Probe data //
HostTimer loopTimer;
HostTimer insertTimer;
HostTimer expandTimer;
HostTimer sortTimer;
HostTimer initTimer;
HostTimer drownTimer;
HostTimer getInsertTimer;
double expandTime = 0;
double insertTime = 0;
double sortTime = 0;
double drownTime = 0;
double getInsertTime = 0;
int LogStarInsNum = 0;
int LogSplayInsNum = 0;
int LogLoopNum = 0;
////////////////////////////////////////////////////////////////////////////////
// Stars Init
////////////////////////////////////////////////////////////////////////////////
void initPredicate()
{
DPredicateInfo.init();
// Predicate constants
DPredicateInfo._consts = cuNew< RealType >( DPredicateBoundNum );
// Predicate arrays
DPredicateInfo._data = cuNew< RealType >( PredicateTotalSize * PredThreadNum );
// Set predicate constants
kerInitPredicate<<< 1, 1 >>>( DPredicateInfo._consts );
CudaCheckError();
return;
}
void starsInit
(
Point3HVec& pointHVec,
Point3HVec& scaledHVec,
const Config& config,
Point3DVec** outPointDVec
)
{
GridWidth = config._gridSize;
FacetMax = config._facetMax;
LogVerbose = config._logVerbose;
LogStats = config._logStats;
LogTiming = config._logTiming;
DoSorting = config._doSorting;
DoCheck = config._doCheck;
ThreadsPerBlock = MAX_THREADS_PER_BLOCK;
BlocksPerGrid = 512;
ThreadNum = ThreadsPerBlock * BlocksPerGrid;
PredThreadsPerBlock = ( config._predThreadNum < 0 ) ? 32 : config._predThreadNum;
PredBlocksPerGrid = ( config._predBlockNum < 0 ) ? 32 : config._predBlockNum;
PredThreadNum = PredThreadsPerBlock * PredBlocksPerGrid;
// Sanity check
assert( ThreadsPerBlock <= MAX_THREADS_PER_BLOCK );
assert( PredThreadsPerBlock <= MAX_PRED_THREADS_PER_BLOCK );
assert( PredBlocksPerGrid >= MIN_BLOCKS_PER_MP );
initPredicate();
////
// Set kernels to prefer L1 cache
////
CudaSafeCall( cudaFuncSetCacheConfig( kerAppendValueToKey, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerCheckStarConsistency, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerComputeTriangleCount, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerConvertMapToCount, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerCopyInsertionToNewHistory, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerCopyOldToNewHistory, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerCopyPumpedUsingAtomic, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerCopyWorksets, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerCountPerStarInsertions, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerCountPointsOfStar, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerGetActiveTriPos, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerGetActiveTriInsCount, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerGetCloneTriInfo, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerGetProofExact, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerGetProofFast, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerGatherPumpedInsertions, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerGetPerTriangleInsertions, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerGetPerTriangleCount, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerGrabTetrasFromStars, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerMakeInitialConeExact, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerMakeInitialConeFast, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerMakeCloneFacets, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerMakeMissingData, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerMakeOldToNewTriMap, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerMarkBeneathTrianglesExact, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerMarkBeneathTrianglesFast, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerMarkDuplicateDrownedPairs, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerMarkDuplicates, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerMarkIfInsertionInHistory, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerMarkLowerHullTetra, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerMarkReversePairs, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerMarkSubmergedInsertions, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerNoteOwnerTriangles, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerOrderDrownedPairs, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerReadPairsFromGrid, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerRemoveValueFromKey, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerRestoreDrownedPairs, cudaFuncCachePreferL1 ) );
CudaSafeCall( cudaFuncSetCacheConfig( kerStitchPointToHole, cudaFuncCachePreferL1 ) );
// Move points to device and sort them
DPointData.init( pointHVec, scaledHVec );
////
// Other stuff
////
const int pointNum = ( int ) DPointData._pointVec->size();
getPool().init( pointNum * sizeof( int ) );
DActiveData.init();
DBeneathData.init( pointNum );
DHistoryData.init();
DInsertData.init();
DMissingData.init();
DStarData.init( pointNum );
DTetraData.init();
DTriBufVec = new IntDVec();
// Return device pointer to points
*outPointDVec = scaledHVec.empty() ? DPointData._pointVec : DPointData._scaledVec;
return;
}
void starsDeinit()
{
DActiveData.deInit();
DBeneathData.deInit();
DHistoryData.deInit();
DInsertData.deInit();
DMissingData.deInit();
DPointData.deinit();
DPredicateInfo.deInit();
DStarData.deInit();
DTetraData.deInit();
safeDeleteDevConPtr( &DTriBufVec );
getPool().deInit();
return;
}
////////////////////////////////////////////////////////////////////////////////
// makeStarsFromGrid()
////////////////////////////////////////////////////////////////////////////////
void _collectMissingData()
{
const int pointNum = ( int ) DPointData._pointVec->size();
// Prepare for missing data
DMissingData._memberVec->resize( pointNum );
DMissingData._leaderVec->resize( pointNum );
// Create member list (list of all points)
thrust::sequence( DMissingData._memberVec->begin(), DMissingData._memberVec->end() );
// Read grid to find leader of each point
kerMakeMissingData<<< BlocksPerGrid, ThreadsPerBlock >>>(
DGrid,
GridWidth,
DPointData.toKernel(),
DMissingData.toKernel() );
CudaCheckError();
// Remove point-pairs which are winners (voted for themselves)
compactBothIfEqual( *DMissingData._memberVec, *DMissingData._leaderVec );
if ( LogVerbose )
{
cout << "Missing points: " << DMissingData._memberVec->size() << endl;
}
return;
}
void _readGridPairs()
{
////
// Count pairs
////
const int BlocksPerGrid = GridWidth + 2;
const int ThreadsPerBlock = GridWidth;
const int ThreadNum = BlocksPerGrid * ThreadsPerBlock;
// Use this array to gather count of pairs-per-thread
DInsertData._vertVec->resize( ThreadNum );
// Get per-thread pair count
kerReadPairsFromGrid<<< BlocksPerGrid, ThreadsPerBlock >>>(
DGrid,
GridWidth,
DInsertData.toKernel(),
CountPerThreadPairs );
CudaCheckError();
// Convert count to per-thread map
const int worksetNum = makeMapAndSum( *DInsertData._vertVec, *DInsertData._starVertMap );
////
// Grab pairs
////
// Prepare workset array
DInsertData._vertVec->resize( worksetNum );
DInsertData._vertStarVec->resize( worksetNum );
if ( LogVerbose )
{
cout << "Workset pairs: " << DInsertData._vertVec->size() << " (before sort and unique)" << endl;
}
// Read pairs from grid
kerReadPairsFromGrid<<< BlocksPerGrid, ThreadsPerBlock >>>(
DGrid,
GridWidth,
DInsertData.toKernel(),
GrabPerThreadPairs );
CudaCheckError();
// Output Voronoi diagram is no longer useful, so free it
CudaSafeCall( cudaFree( DGrid ) );
////
// Sort workset pairs and remove duplicates
////
// 80ms
sortAndUniqueUsingKeyValAppend( *DInsertData._vertStarVec, *DInsertData._vertVec, DPointData._bitsPerIndex );
if ( LogVerbose )
{
cout << "Workset pairs: " << DInsertData._vertVec->size() << " (after sort and unique)" << endl;
}
return;
}
void _pumpWorksets()
{
////
// Create array with capacity to hold:
// <-- curWorkset --><-- curWorkset (flipped) --><-- missing --><-- missing (flipped) -->
////
const int compactWorksetNum = DInsertData._vertVec->size();
const int missingNum = DMissingData._memberVec->size();
const int pairNum = compactWorksetNum + missingNum;
const int tmpWorksetNum = 2 * pairNum;
IntDVec wsetVertVec( tmpWorksetNum );
IntDVec wsetStarVec( tmpWorksetNum );
////
// Make flipped copies of worksets and missing data
////
// Copy workset pairs
thrust::copy( DInsertData._vertStarVec->begin(), DInsertData._vertStarVec->end(), wsetStarVec.begin() );
thrust::copy( DInsertData._vertVec->begin(), DInsertData._vertVec->end(), wsetVertVec.begin() );
// Copy missing pairs
thrust::copy( DMissingData._memberVec->begin(), DMissingData._memberVec->end(), wsetStarVec.begin() + compactWorksetNum );
thrust::copy( DMissingData._leaderVec->begin(), DMissingData._leaderVec->end(), wsetVertVec.begin() + compactWorksetNum );
// Reciprocate the pairs
thrust::copy( wsetVertVec.begin(), wsetVertVec.begin() + pairNum, wsetStarVec.begin() + pairNum );
thrust::copy( wsetStarVec.begin(), wsetStarVec.begin() + pairNum, wsetVertVec.begin() + pairNum );
// Missing data is not needed anymore
DMissingData.deInit();
if ( LogVerbose )
{
cout << "Workset pairs: " << tmpWorksetNum << " (after missing and reciprocated)" << endl;
}
////
// Sort and make map of worksets
////
// 20 ms
thrust::sort_by_key( wsetStarVec.begin(), wsetStarVec.end(), wsetVertVec.begin() );
const int pointNum = DPointData._pointVec->size();
IntDVec wsetCountVec;
IntDVec wsetMap; // Used later below
makeAllStarMap( wsetStarVec, wsetMap, pointNum );
convertMapToCountVec( wsetMap, wsetCountVec, wsetStarVec.size() );
////
// Make pump map *only* for stars that need pumping
////
IntDVec actStarVec( pointNum );
thrust::sequence( actStarVec.begin(), actStarVec.end() );
IntDVec pumpMap( pointNum );
thrust::transform( wsetCountVec.begin(), wsetCountVec.end(), pumpMap.begin(), GetPumpedWorksetSize() );
compactBothIfZero( pumpMap, actStarVec );
const int pumpNum = makeInPlaceMapAndSum( pumpMap );
////
// Get unique reciprocated pumped insertions and make map
////
// Create with space for pumped + reciprocated pumped
IntDVec pumpStarVec( 2 * pumpNum );
IntDVec pumpVertVec( 2 * pumpNum );
// Resize it back to required size
pumpStarVec.resize( pumpNum );
pumpVertVec.resize( pumpNum );
kerGatherPumpedInsertions<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( wsetMap ),
toKernelArray( wsetStarVec ),
toKernelPtr( wsetVertVec ),
toKernelArray( actStarVec ),
toKernelPtr( pumpMap ),
toKernelArray( pumpStarVec ),
toKernelPtr( pumpVertVec ) );
CudaCheckError();
// Write reciprocated pump insertions
pumpStarVec.resize( 2 * pumpNum );
pumpVertVec.resize( 2 * pumpNum );
thrust::copy( pumpStarVec.begin(), pumpStarVec.begin() + pumpNum, pumpVertVec.begin() + pumpNum );
thrust::copy( pumpVertVec.begin(), pumpVertVec.begin() + pumpNum, pumpStarVec.begin() + pumpNum );
// 3 ms
sortAndUniqueUsingKeyValAppend( pumpStarVec, pumpVertVec, DPointData._bitsPerIndex );
const int compRecPumpNum = pumpStarVec.size();
// Create pump count per star
IntDVec pumpCountVec( pointNum );
makeAllStarCountVec( pumpStarVec, pumpCountVec, pointNum );
////
// Get insertions per star
////
DStarData._insCountVec->resize( pointNum );
thrust::transform(
wsetCountVec.begin(), wsetCountVec.end(), // From
pumpCountVec.begin(), // From
DStarData._insCountVec->begin(), // To
thrust::plus< int >() );
WorksetSizeMax = *( thrust::max_element( DStarData._insCountVec->begin(), DStarData._insCountVec->end() ) );
makeMapAndSum( pumpCountVec, pumpMap );
////
// Make final workset map
// finalMap = wsetMap + pumpMap
////
DInsertData._starVertMap->resize( pointNum );
thrust::transform(
wsetMap.begin(), wsetMap.end(), // From-1
pumpMap.begin(), // From-2
DInsertData._starVertMap->begin(), // To = From-1 + From-2
thrust::plus< int >() );
const int finalWorksetNum = tmpWorksetNum + compRecPumpNum;
////
// Copy worksets and pumps to final array
////
DInsertData._vertStarVec->resize( finalWorksetNum );
DInsertData._vertVec->resize( finalWorksetNum );
kerCopyWorksets<<< BlocksPerGrid, ThreadsPerBlock >>>(
DInsertData.toKernel(),
toKernelArray( wsetStarVec ),
toKernelPtr( wsetVertVec ),
toKernelPtr( wsetMap ) );
CudaCheckError();
// Destination index for pumped insertions
thrust::transform(
wsetCountVec.begin(), wsetCountVec.end(), // From
DInsertData._starVertMap->begin(), // From
wsetCountVec.begin(), // To
thrust::plus< int >() );
kerCopyPumpedUsingAtomic<<< BlocksPerGrid, ThreadsPerBlock >>>(
DInsertData.toKernel(),
toKernelArray( pumpStarVec ),
toKernelPtr( pumpVertVec ),
toKernelPtr( wsetCountVec ) );
CudaCheckError();
if ( LogVerbose )
{
cout << "Largest working set: " << WorksetSizeMax << endl;
cout << "Workset pairs: " << DInsertData._vertVec->size() << endl;
cout << "Average workset per star: " << DInsertData._vertVec->size() / DInsertData._starVertMap->size() << endl;
}
if ( LogStats )
LogStarInsNum = DInsertData._vertVec->size();
return;
}
// Initialize history using initial insertions
void _initHistory()
{
DHistoryData._vertVec[ 0 ]->copyFrom( *DInsertData._vertVec );
DHistoryData._vertStarVec[ 0 ]->copyFrom( *DInsertData._vertStarVec );
DHistoryData._starVertMap[ 0 ]->copyFrom( *DInsertData._starVertMap );
DHistoryData._starVertMap[ 1 ]->resize( DInsertData._starVertMap->size(), 0 );
return;
}
// Create 4-simplex for every star
void _makeInitialCones()
{
if ( LogVerbose )
{
cout << endl << __FUNCTION__ << endl;
}
////
// Prepare star arrays
////
DStarData._starNum = DInsertData._starVertMap->size();
const int triNum = get2SphereTriangleNum( DStarData._starNum, DInsertData._vertVec->size() );
DStarData._triData.resize( triNum, 0, Free ); // Allocate only array[0] in the beginning
// Buffer to reuse for triangle related arrays
const int expTriNum = ( int ) ( triNum * DataExpansionFactor );
DTriBufVec->resize( expTriNum );
if ( LogStats )
{
cout << "Triangles allocated: " << triNum << endl;
}
////
// Create initial 4-simplex for each star
////
kerMakeInitialConeFast<<< BlocksPerGrid, ThreadsPerBlock >>>(
DPredicateInfo,
DPointData.toKernel(),
DStarData.toKernel(),
DBeneathData.toKernel(),
DInsertData.toKernel() );
CudaCheckError();
kerMakeInitialConeExact<<< PredBlocksPerGrid, PredThreadsPerBlock >>>(
DPredicateInfo,
DPointData.toKernel(),
DStarData.toKernel(),
DBeneathData.toKernel(),
DInsertData.toKernel() );
CudaCheckError();
return;
}
void __addOnePointToStar
(
TriPositionDVec& activeTriPosVec,
ShortDVec& activeTriInsNumVec,
int insIdx
)
{
kerMarkBeneathTrianglesFast<<< BlocksPerGrid, ThreadsPerBlock >>>(
DPredicateInfo,
DPointData.toKernel(),
DStarData.toKernel(),
DBeneathData.toKernel(),
DInsertData.toKernel(),
toKernelArray( activeTriPosVec ),
toKernelArray( activeTriInsNumVec ),
insIdx );
CudaCheckError();
if ( LogStats )
{
if ( (*DBeneathData._flagVec)[ ExactTriCount ] > 0 )
{
cout << "Exact check triangles : " << (*DBeneathData._flagVec)[ ExactTriCount ] << endl;
}
}
kerMarkBeneathTrianglesExact<<< PredBlocksPerGrid, PredThreadsPerBlock >>>(
DPredicateInfo,
DPointData.toKernel(),
DStarData.toKernel(),
DBeneathData.toKernel(),
DInsertData.toKernel(),
toKernelArray( activeTriPosVec ),
toKernelArray( activeTriInsNumVec ),
insIdx );
CudaCheckError();
kerStitchPointToHole<<< BlocksPerGrid, ThreadsPerBlock >>>(
DStarData.toKernel(),
DBeneathData.toKernel(),
DInsertData.toKernel(),
toKernelArray( DActiveData._starVec ),
insIdx );
CudaCheckError();
return;
}
void updateActiveTriData
(
TriPositionDVec& activeTriPosVec,
ShortDVec& activeTriInsNumVec
)
{
kerGetActiveTriPos<<< BlocksPerGrid, ThreadsPerBlock >>>(
DStarData.toKernel(),
toKernelArray( DActiveData._starVec ),
toKernelArray( DActiveData._starTriMap ),
toKernelArray( activeTriPosVec ),
toKernelArray( activeTriInsNumVec ) );
CudaCheckError();
kerGetActiveTriInsCount<<< BlocksPerGrid, ThreadsPerBlock >>>(
DStarData.toKernel(),
toKernelArray( activeTriPosVec ),
toKernelArray( activeTriInsNumVec ) );
CudaCheckError();
return;
}
void _makeStars()
{
if ( LogVerbose )
{
cout << endl << __FUNCTION__ << endl;
}
////
// Prepare to work only on active triangles
////
DActiveData._starVec->resize( DStarData._starNum ); // Assume all stars as active in beginning
DActiveData._starTriMap->resize( DStarData._starNum );
thrust::sequence( DActiveData._starVec->begin(), DActiveData._starVec->end() );
int activeTriNum = DStarData._triData.totalSize();
////
// Insert workset points into stars
////
bool isActiveBoundTight = true; // Cannot be false! Will not work with false!
TriPositionDVec& activeTriPosVec = *DTriBufVec;
ShortDVec activeTriInsNumVec( activeTriNum );
DBeneathData._flagVec->fill( 0 );
DBeneathData._beneathTriPosVec->fill( -1 );
for ( int insIdx = 4; insIdx < WorksetSizeMax; ++insIdx )
{
if ( isActiveBoundTight )
{
kerGetActiveTriCount<<< BlocksPerGrid, ThreadsPerBlock >>>(
DStarData.toKernel(),
toKernelArray( DActiveData._starVec ),
toKernelArray( DActiveData._starTriMap ),
insIdx,
isActiveBoundTight );
CudaCheckError();
compactBothIfZero( *DActiveData._starTriMap, *DActiveData._starVec );
activeTriNum = makeInPlaceMapAndSum( *DActiveData._starTriMap );
// Turn OFF careful mode when too few active triangles
if ( activeTriNum < ThreadNum )
{
isActiveBoundTight = false;
// Careful mode has been turned OFF
// Get loose bound of active triangles one last time
kerGetActiveTriCount<<< BlocksPerGrid, ThreadsPerBlock >>>(
DStarData.toKernel(),
toKernelArray( DActiveData._starVec ),
toKernelArray( DActiveData._starTriMap ),
insIdx,
isActiveBoundTight );
CudaCheckError();
activeTriNum = makeInPlaceMapAndSum( *DActiveData._starTriMap );
}
if ( LogStats )
{
cout << "activeStar = " << DActiveData._starVec->size() << ", activeTri = " << activeTriNum << endl;
}
activeTriPosVec.resize( activeTriNum );
activeTriInsNumVec.resize( activeTriNum );
updateActiveTriData( activeTriPosVec, activeTriInsNumVec );
}
__addOnePointToStar( activeTriPosVec, activeTriInsNumVec, insIdx );
}
kerCountPointsOfStar<<< BlocksPerGrid, ThreadsPerBlock >>>( DStarData.toKernel() );
CudaCheckError();
return;
}
// Initialize link vertices of each star from tetra.
void makeStarsFromGrid( int* grid )
{
DGrid = grid;
if ( LogTiming )
{
initTimer.start();
}
_collectMissingData();
if ( LogTiming )
{
initTimer.stop();
initTimer.print( "_collectMissingData" );
initTimer.start();
}
_readGridPairs();
if ( LogTiming )
{
initTimer.stop();
initTimer.print( "_readGridPairs" );
initTimer.start();
}
_pumpWorksets();
if ( LogTiming )
{
initTimer.stop();
initTimer.print( "_pumpWorksets" );
initTimer.start();
}
_initHistory();
if ( LogTiming )
{
initTimer.stop();
initTimer.print( "_initHistory" );
initTimer.start();
}
_makeInitialCones();
if ( LogTiming )
{
initTimer.stop();
initTimer.print( "_makeInitialCones" );
initTimer.start();
}
_makeStars();
if ( LogTiming )
{
initTimer.stop();
initTimer.print( "_makeStars" );
cout << endl;
}
return;
}
////////////////////////////////////////////////////////////////////////////////
// processFacets()
////////////////////////////////////////////////////////////////////////////////
// Drowned are insertions not present in link of star
void __getDrownedInsertions()
{
// Insertions found inside cone are already marked
// Now mark insertions which are not in link of star
kerMarkSubmergedInsertions<<< BlocksPerGrid, ThreadsPerBlock >>>(
DStarData.toKernel(),
DInsertData.toKernel() );
CudaCheckError();
// Remove all insertions except drowned
compactBothIfNegative( *DInsertData._vertStarVec, *DInsertData._vertVec );
return;
}
void __removeMutualDrownedInsertions()
{
// Order pairs so that (star < vert) and make key as key-val
kerOrderDrownedPairs<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( DInsertData._vertStarVec ),
toKernelPtr( DInsertData._vertVec ),
DPointData._bitsPerIndex );
CudaCheckError();
// Sort by key that is actually key-val
thrust::sort_by_key(
DInsertData._vertStarVec->begin(), DInsertData._vertStarVec->end(), // Key
DInsertData._vertVec->begin() ); // Val
// If dup found, mark both original *and* duplicate
kerMarkDuplicateDrownedPairs<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( DInsertData._vertStarVec ),
toKernelPtr( DInsertData._vertVec ) );
CudaCheckError();
// Remove duplicates (both original and its match)
compactBothIfNegative( *DInsertData._vertVec, *DInsertData._vertStarVec );
// Restore back pairs to original order
kerRestoreDrownedPairs<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( DInsertData._vertStarVec ),
toKernelPtr( DInsertData._vertVec ),
DPointData._bitsPerIndex );
CudaCheckError();
const int drownedNum = DInsertData._vertVec->size();
if ( drownedNum > ThreadNum )
{
thrust::sort_by_key( DInsertData._vertStarVec->begin(), DInsertData._vertStarVec->end(), DInsertData._vertVec->begin() );
}
if ( LogStats )
{
cout << "Drowned: " << drownedNum << endl;
}
return;
}
void _getDrownedInsertions()
{
if ( LogTiming )
{
drownTimer.start();
}
__getDrownedInsertions();
__removeMutualDrownedInsertions();
if ( LogTiming )
{
drownTimer.stop();
drownTime += drownTimer.value();
}
return;
}
int __gatherInsertions()
{
IntDVec proofStarVec;
IntDVec proofVertVec;
const int drownedNum = DInsertData._vertVec->size();
if ( drownedNum > 0 )
{
/////
// Gather proof insertions
////
const int proofNum = ProofPointsPerStar * drownedNum;
proofStarVec.resize( proofNum );
proofVertVec.resize( proofNum );
kerGetProofFast<<< BlocksPerGrid, ThreadsPerBlock >>>(
DPredicateInfo,
DPointData.toKernel(),
DStarData.toKernel(),
toKernelArray( DInsertData._vertStarVec ),
toKernelPtr( DInsertData._vertVec ),
toKernelPtr( proofStarVec ),
toKernelPtr( proofVertVec ) );
CudaCheckError();
kerGetProofExact<<< PredBlocksPerGrid, PredThreadsPerBlock >>>(
DPredicateInfo,
DPointData.toKernel(),
DStarData.toKernel(),
toKernelArray( DInsertData._vertStarVec ),
toKernelPtr( DInsertData._vertVec ),
toKernelPtr( proofStarVec ),
toKernelPtr( proofVertVec ) );
CudaCheckError();
if ( LogStats )
cout << "Proof insertions: " << proofNum << endl;
}
////
// Get insertion count from triangles
////
IntDVec& triInsertMap = *DTriBufVec;
triInsertMap.resize( DStarData._triData.totalSize() );
kerGetPerTriangleCount<<< BlocksPerGrid, ThreadsPerBlock >>>(
DStarData.toKernel(),
toKernelPtr( triInsertMap ) );
CudaCheckError();
const int triInsertNum = makeInPlaceMapAndSum( triInsertMap );
const int boundedTriInsertNum = std::min( triInsertNum, FacetMax );
const int proofNum = proofStarVec.size();
int insertNum = proofNum + boundedTriInsertNum;
if ( LogStats )
{
cout << "Triangle insertions: " << triInsertNum << endl;
cout << "Bounded triangle insertions: " << boundedTriInsertNum << endl;
}
if ( 0 == insertNum )
{
return insertNum; // No insertions to do
}
////
// Gather triangle insertions
////
if ( 0 == proofNum )
{
++insertNum; // +1 space to hold insertion of triangle on FacetMax boundary
}
else
{
// +1 space of proof insertions, it is overwritten later anyway
}
if ( DInsertData._vertVec->capacity() < insertNum )
{
DInsertData._vertStarVec->resize( insertNum );
DInsertData._vertVec->resize( insertNum );
}
DInsertData._vertStarVec->expand( boundedTriInsertNum );
DInsertData._vertVec->expand( boundedTriInsertNum );
kerGetPerTriangleInsertions<<< BlocksPerGrid, ThreadsPerBlock >>>(
DStarData.toKernel(),
toKernelArray( triInsertMap ),
toKernelArray( DInsertData._vertStarVec ),
toKernelPtr( DInsertData._vertVec ),
triInsertNum );
CudaCheckError();
////
// Collect all insertions together
////
DInsertData._vertStarVec->expand( insertNum ); // insertNum = boundedTriInsertNum + proofNum
DInsertData._vertVec->expand( insertNum );
// Copy proof insertions
thrust::copy( proofStarVec.begin(), proofStarVec.end(), DInsertData._vertStarVec->begin() + boundedTriInsertNum );
thrust::copy( proofVertVec.begin(), proofVertVec.end(), DInsertData._vertVec->begin() + boundedTriInsertNum );
// Sort and remove duplicates of ordered pairs
sortAndUniqueUsingKeyValAppend( *DInsertData._vertStarVec, *DInsertData._vertVec, DPointData._bitsPerIndex );
////
// Remove insertions if already in history
////
kerMarkIfInsertionInHistory<<< BlocksPerGrid, ThreadsPerBlock >>>(
DHistoryData.toKernel(),
toKernelArray( DInsertData._vertStarVec ),
toKernelPtr( DInsertData._vertVec ),
DStarData._starNum );
CudaCheckError();
compactBothIfNegative( *DInsertData._vertVec, *DInsertData._vertStarVec );
const int compInsertNum = DInsertData._vertVec->size();
////
// Reciprocate insertions
////
const int recInsertNum = 2 * compInsertNum;
// Expand if needed
if ( DInsertData._vertVec->capacity() < recInsertNum )
{
cout << "This should almost never happen! Remove this cout when it happens." << endl;
IntDVec tmpStarVec( recInsertNum );
IntDVec tmpVertVec( recInsertNum );
thrust::copy( DInsertData._vertStarVec->begin(), DInsertData._vertStarVec->end(), tmpStarVec.begin() );
thrust::copy( DInsertData._vertVec->begin(), DInsertData._vertVec->end(), tmpVertVec.begin() );
DInsertData._vertStarVec->swapAndFree( tmpStarVec );
DInsertData._vertVec->swapAndFree( tmpVertVec );
}
else
{
DInsertData._vertStarVec->expand( recInsertNum );
DInsertData._vertVec->expand( recInsertNum );
}
thrust::copy( DInsertData._vertStarVec->begin(), DInsertData._vertStarVec->begin() + compInsertNum, DInsertData._vertVec->begin() + compInsertNum );
thrust::copy( DInsertData._vertVec->begin(), DInsertData._vertVec->begin() + compInsertNum, DInsertData._vertStarVec->begin() + compInsertNum );
if ( LogStats )
{
cout << "Reciprocated insertions: " << recInsertNum << endl;
}
return recInsertNum;
}
int _gatherInsertions()
{
if ( LogTiming )
{
getInsertTimer.start();
}
const int insNum = __gatherInsertions();
if ( LogTiming )
{
getInsertTimer.stop();
getInsertTime += getInsertTimer.value();
}
return insNum;
}
void __sortInsertions()
{
// There cannot be any duplicates since we used ordered pairs
// Only sort is enough
thrust::sort_by_key(
DInsertData._vertStarVec->begin(), DInsertData._vertStarVec->end(), // Key
DInsertData._vertVec->begin() ); // Val
if ( LogVerbose )
{
cout << "Unique insertions: " << DInsertData._vertVec->size() << endl;
}
makeAllStarMap( *DInsertData._vertStarVec, *DInsertData._starVertMap, DStarData._starNum );
convertAllStarMapToSimpleMap( *DInsertData._starVertMap, DInsertData._vertStarVec->size() );
////
// Find insertion point count for each star
////
// Go back and update the point count of each star (current count + intended insertion count)
kerCountPerStarInsertions<<< BlocksPerGrid, ThreadsPerBlock >>>(
DStarData.toKernel(),
DInsertData.toKernel() );
CudaCheckError();
////
// Find largest insertion point count
////
InsertPointMax = *( thrust::max_element( DStarData._insCountVec->begin(), DStarData._insCountVec->end() ) );
if ( LogVerbose )
{
cout << "Largest insertion set: " << InsertPointMax << endl;
}
return;
}
void _sortInsertions()
{
if ( LogTiming )
{
sortTimer.start();
}
__sortInsertions();
if ( LogTiming )
{
sortTimer.stop();
sortTime += sortTimer.value();
}
return;
}
void __expandStarsForInsertion()
{
if ( LogStats )
{
cout << endl << __FUNCTION__ << endl;
}
////
// Calculate triangle/segment count for insertion
////
// Prepare triangle count array
IntDVec dTriNumVec( DStarData._starNum );
// Estimate triangles needed for insertion
kerComputeTriangleCount<<< BlocksPerGrid, ThreadsPerBlock >>>(
DStarData.toKernel(),
toKernelArray( dTriNumVec ) );
CudaCheckError();
////
// Expand triangles *only* if needed
////
// Compute new triangles map and sum
IntDVec newTriMap;
const int newTriNum = makeMapAndSum( dTriNumVec, newTriMap );
dTriNumVec.free();
// Check if triangle array 2 needs to be expanded
const int curTriNum = ( int ) DStarData._triData.size( 1 );
if ( curTriNum < newTriNum )
{
if ( LogStats )
{
cout << "Expanding triangles From: " << curTriNum << " To: " << newTriNum << endl;
}
DStarData.expandTriangles( newTriNum, newTriMap );
}
return;
}
void _expandStarsForInsertion()
{
if ( LogTiming )
{
expandTimer.start();
}
__expandStarsForInsertion();
if ( LogTiming )
{
expandTimer.stop();
expandTime += expandTimer.value();
}
return;
}
// History[0] arrays remain as they are
// Only history[1] arrays are updated here
void __copyInsertionsToHistory()
{
if ( LogStats )
{
cout << __FUNCTION__ << endl;
}
const int insHistNum = DInsertData._vertVec->size();
const int oldHistNum = DHistoryData._vertVec[1]->size();
const int newHistNum = oldHistNum + insHistNum;
assert( ( 0 == ( insHistNum % 2 ) ) && "Should be even number since insertions are reciprocated!" );
// History[1] does not exist
if ( 0 == oldHistNum )
{
DHistoryData._vertVec[1]->copyFrom( *DInsertData._vertVec );
DHistoryData._vertStarVec[1]->copyFrom( *DInsertData._vertStarVec );
DHistoryData._starVertMap[1]->copyFrom( *DInsertData._starVertMap );
}
else // History[1] already exists
{
IntDVec newVertVec( newHistNum );
IntDVec newStarVec( newHistNum );
IntDVec newMap( DStarData._starNum );
// Make destination map
thrust::transform(
DInsertData._starVertMap->begin(), DInsertData._starVertMap->end(), // From-1
DHistoryData._starVertMap[1]->begin(), // From-2
newMap.begin(), // Res = From-1 + From-2
thrust::plus< int >() );
// Copy old history to destination
kerCopyOldToNewHistory<<< BlocksPerGrid, ThreadsPerBlock >>>(
DHistoryData.toKernel(),
toKernelPtr( newVertVec ),
toKernelPtr( newStarVec ),
toKernelPtr( newMap ) );
CudaCheckError();
// Copy insertions to destination
kerCopyInsertionToNewHistory<<< BlocksPerGrid, ThreadsPerBlock >>>(
toKernelArray( DInsertData._vertVec ),
toKernelPtr( DInsertData._vertStarVec ),
toKernelArray( DInsertData._starVertMap ),
toKernelPtr( DHistoryData._starVertMap[1] ),
oldHistNum,
toKernelPtr( newVertVec ),
toKernelPtr( newStarVec ),
toKernelPtr( newMap ) );
CudaCheckError();
DHistoryData._vertVec[1]->swapAndFree( newVertVec );
DHistoryData._vertStarVec[1]->swapAndFree( newStarVec );
DHistoryData._starVertMap[1]->swapAndFree( newMap );
}
return;
}
void __insertPointsToStars()
{
if ( LogStats )
cout << endl << __FUNCTION__ << endl;
////
// Prepare to work only on active triangles
// "Active" triangles/stars are those that have some insertion
////
// Find active stars
DActiveData._starVec->resize( DStarData._starNum );
thrust::sequence( DActiveData._starVec->begin(), DActiveData._starVec->end() );
compactIfZero( *DActiveData._starVec, *DStarData._insCountVec );
DActiveData._starTriMap->resize( DActiveData._starVec->size() );
// Find triangle count for each active star
kerGetActiveTriCount<<< BlocksPerGrid, ThreadsPerBlock >>>(
DStarData.toKernel(),
toKernelArray( DActiveData._starVec ),
toKernelArray( DActiveData._starTriMap ) );
CudaCheckError();
// Get active triangle number and triangle map
const int activeTriNum = makeInPlaceMapAndSum( *DActiveData._starTriMap );
if ( LogStats )
{
const int activeStarNum = DActiveData._starVec->size();
cout << "Stars with insertion: " << activeStarNum << endl;
LogSplayInsNum += ( int ) DInsertData._vertVec->size();
const int insSum = thrust::reduce( DStarData._insCountVec->begin(), DStarData._insCountVec->end() );
const int insAvg = insSum / activeStarNum;
cout << "Average insertions per star: " << insAvg << endl;
}
__copyInsertionsToHistory();
// Store triangle index of active triangles for reuse
TriPositionDVec& activeTriPosVec = *DTriBufVec;
activeTriPosVec.resize( activeTriNum );
ShortDVec activeTriInsNumVec( activeTriNum );
// Find insertion count per triangle of active stars
updateActiveTriData( activeTriPosVec, activeTriInsNumVec );
////
// Insert points to stars
////
DBeneathData._flagVec->fill( 0 );
DBeneathData._beneathTriPosVec->fill( -1 );
if ( LogStats )
{
cout << "activeStar = " << DActiveData._starVec->size() << ", activeTri = " << activeTriNum << endl;
}
for ( int insIdx = 0; insIdx < InsertPointMax; ++insIdx )
{
__addOnePointToStar( activeTriPosVec, activeTriInsNumVec, insIdx );
}
kerCountPointsOfStar<<< BlocksPerGrid, ThreadsPerBlock >>>( DStarData.toKernel() );
CudaCheckError();
return;
}
void _insertPointsToStars()
{
if ( LogTiming )
{
insertTimer.start();
}
__insertPointsToStars();
if ( LogTiming )
{
insertTimer.stop();
insertTime += insertTimer.value();
}
return;
}
void areStarsConsistent()
{
if ( !DoCheck )
{
return;
}
// Clear flags
DBeneathData._flagVec->fill( 0 );
kerCheckStarConsistency<<< BlocksPerGrid, ThreadsPerBlock >>>(
DStarData.toKernel(),
DBeneathData.toKernel() );
CudaCheckError();
if ( 0 == (*DBeneathData._flagVec)[ ExactTriCount ] )
{
cout << "Stars are consistent!" << endl;
}
else
{
cout << "Star are NOT consistent!!!" << endl;
}
return;
}
void processFacets()
{
LoopNum = 0;
do
{
bool isSplayingDone = false;
if ( LogVerbose )
{
cout << endl << "Loop: " << LoopNum << endl;
}
if ( LogTiming )
{
loopTimer.start();
}
////
// Splay
////
_getDrownedInsertions();
const int insertNum = _gatherInsertions();
if ( 0 == insertNum )
{
isSplayingDone = true;
}
else
{
_sortInsertions();
_expandStarsForInsertion();
_insertPointsToStars();
}
////
// Finish splaying
////
if ( isSplayingDone )
{
areStarsConsistent();
break;
}
if ( LogTiming )
{
loopTimer.stop();
char loopStr[10];
sprintf( loopStr, "Loop %d", LoopNum );
loopTimer.print( loopStr );
}
++LoopNum;
} while ( true );
if ( LogStats )
{
cout << endl;
cout << "Total splay insertions: " << LogSplayInsNum << endl;
LogLoopNum = LoopNum;
}
if ( LogTiming )
{
cout << endl;
cout << "Drowned time: " << drownTime << endl;
cout << "Get insert time: " << getInsertTime << endl;
cout << "Expand time: " << expandTime << endl;
cout << "Sort time: " << sortTime << endl;
cout << "Insert time: " << insertTime << endl;
}
// No longer needed
DInsertData.deInit();
return;
}
////////////////////////////////////////////////////////////////////////////////
// makeTetraFromStars()
////////////////////////////////////////////////////////////////////////////////
void _doHistoryStats()
{
// History statistics
if ( LogStats )
{
IntHVec histMap[2];
IntHVec vertVec[2];
for ( int i = 0; i < 2; ++i )
{
DHistoryData._starVertMap[i]->copyToHost( histMap[i] );
DHistoryData._vertVec[i]->copyToHost( vertVec[i] );
}
const int starNum = histMap[0].size();
int maxVert = 0;
// Iterate history of each star
for ( int star = 0; star < starNum; ++star )
{
int totVertNum = 0;
for ( int hi = 0; hi < 2; ++hi )
{
const int vertBeg = histMap[ hi ][ star ];
const int vertEnd = ( ( star + 1 ) < starNum ) ? histMap[ hi ][ star + 1 ] : vertVec[hi].size();
const int vertNum = vertEnd - vertBeg;
totVertNum += vertNum;
}
if ( totVertNum > maxVert )
{
maxVert = totVertNum;
}
}
cout << "Total history: " << vertVec[0].size() + vertVec[1].size() << endl;
cout << "Max: " << maxVert << endl << endl;
}
return;
}
void _gatherTetraFromStars()
{
_doHistoryStats();
////
// Make tetra-tri map by looking at owner triangles
////
const int triNum = DStarData._triData.totalSize();
IntDVec& ownTriVec = *DTriBufVec;
ownTriVec.resize( triNum, -1 );
kerNoteOwnerTriangles<<< BlocksPerGrid, ThreadsPerBlock >>>(
DStarData.toKernel(),
toKernelArray( ownTriVec ) );
CudaCheckError();
// Both lower- and upper-hull tetra are here
const int lowUpTetraNum = compactIfNegative( ownTriVec );
// lowUpTetraNum <<< triNum, so copy to new array
IntDVec tetraTriMap;
tetraTriMap.copyFrom( ownTriVec );
////
// Mark and keep lower-hull tetra info
////
kerMarkLowerHullTetra<<< BlocksPerGrid, ThreadsPerBlock >>>(
DPredicateInfo,
DPointData.toKernel(),
DStarData.toKernel(),
toKernelArray( tetraTriMap ) );
CudaCheckError();
const int tetraNum = compactIfNegative( tetraTriMap );
////
// Create facets for tetra
////
IntDVec& triTetraMap = *DTriBufVec; // Reuse above array
triTetraMap.resize( triNum, -1 );
IntDVec facetStarVec( tetraNum );
IntDVec facetTriVec( tetraNum );
kerMakeCloneFacets<<< BlocksPerGrid, ThreadsPerBlock >>>(
DStarData.toKernel(),
toKernelArray( tetraTriMap ),
toKernelPtr( triTetraMap ),
toKernelPtr( facetStarVec ),
toKernelPtr( facetTriVec ) );
CudaCheckError();
////
// Get clone triangle info
////
thrust::sort_by_key( facetStarVec.begin(), facetStarVec.end(), facetTriVec.begin() );
LocTriIndexDVec tetraCloneTriVec( tetraNum );
kerGetCloneTriInfo<<< BlocksPerGrid, ThreadsPerBlock >>>(
DStarData.toKernel(),
toKernelArray( facetStarVec ),
toKernelPtr( facetTriVec ),
toKernelPtr( triTetraMap ),
toKernelPtr( tetraCloneTriVec ) );
CudaCheckError();
////
// Create final tetra and its adjacencies
////
DTetraData._vec->resize( tetraNum );
kerGrabTetrasFromStars<<< BlocksPerGrid, ThreadsPerBlock >>>(
DStarData.toKernel(),
DTetraData.toKernel(),
toKernelArray( tetraTriMap ),
toKernelPtr( triTetraMap ),
toKernelPtr( tetraCloneTriVec ) );
CudaCheckError();
return;
}
void _copyTetraToHost()
{
HTetraVec.clear();
DTetraData._vec->copyToHost( HTetraVec );
return;
}
void makeTetraFromStars()
{
_gatherTetraFromStars();
_copyTetraToHost();
return;
}
const TetraHVec& getHostTetra()
{
return HTetraVec;
}
////////////////////////////////////////////////////////////////////////////////
|
the_stack
|
#define WARP_SIZE 32
#define TREE_NODE_SIZE WARP_SIZE
#define TREE_FANOUT (TREE_NODE_SIZE + 1)
#define N_MULTI_P 16
#define BLCK_PER_MP_create 256 // blocks per multiprocessor during tree creation
#define BLCK_PER_MP_search 512 // blocks per multiprocessor during tree searching
#define WAPRS_PER_BLCK_join 8//16 // blocks per multiprocessor during tree creation
#define BLCK_PER_MP_join 512//256 // blocks per multiprocessor during tree searching
#define THRD_PER_BLCK_create TREE_NODE_SIZE
#define BLCK_PER_GRID_create (N_MULTI_P * BLCK_PER_MP_create)
#define THRD_PER_BLCK_search TREE_NODE_SIZE
#define BLCK_PER_GRID_search (N_MULTI_P * BLCK_PER_MP_search)
#define THRD_PER_GRID_search (THRD_PER_BLCK_search * BLCK_PER_GRID_search)
#define THRD_PER_BLCK_join (WARP_SIZE * WAPRS_PER_BLCK_join)
#define BLCK_PER_GRID_join (N_MULTI_P * BLCK_PER_MP_join)
#define THRD_PER_GRID_join (THRD_PER_BLCK_join * BLCK_PER_GRID_join)
#define TEST_MAX 100
typedef int IKeyType;
typedef int Record;
typedef struct {
int keys[TREE_NODE_SIZE];
} IDirectoryNode;
typedef struct {
Record records[TREE_NODE_SIZE];
} IDataNode;
typedef struct {
IDataNode* data;
unsigned int nDataNodes;
IDirectoryNode* dir;
unsigned int nDirNodes;
} CUDA_CSSTree;
struct to_neg
{
__host__ __device__
bool operator()(const int &r1)
{
if(r1 < 0)
return 1;
return 0;
}
};
__host__ __device__ unsigned int uintCeilingLog(unsigned int base, unsigned int num)
{
unsigned int result = 0;
for(unsigned int temp = 1; temp < num; temp *= base)
result++;
return result;
}
__host__ __device__ unsigned int uintCeilingDiv(unsigned int dividend, unsigned int divisor)
{
return (dividend + divisor - 1) / divisor;
}
__host__ __device__ unsigned int uintPower(unsigned int base, unsigned int pow)
{
unsigned int result = 1;
for(; pow; pow--)
result *= base;
return result;
}
__device__ int getRightMostDescIdx(int tree_size, int nodeIdx)
{
int tmp = nodeIdx * TREE_NODE_SIZE + TREE_FANOUT;
int n = uintCeilingLog(TREE_FANOUT, uintCeilingDiv(TREE_NODE_SIZE * tree_size + TREE_FANOUT, tmp)) - 1;
int result = (tmp * uintPower(TREE_FANOUT, n) - TREE_FANOUT) / TREE_NODE_SIZE;
return result;
}
__device__ int getDataArrayIdx(int dirSize, int tree_size, int bottom_start, int treeIdx)
{
int idx;
if(treeIdx < dirSize) {
idx = tree_size - bottom_start - 1;
}
else if( treeIdx < bottom_start ) {
idx = tree_size - bottom_start + treeIdx - dirSize;
}
else {
idx = treeIdx - bottom_start;
}
return idx;
}
// Binary Search
__device__ int firstMatchingKeyInDirNode1(int keys[], int key)
{
int min = 0;
int max = TREE_NODE_SIZE;
int mid;
int cut;
while(max - min > 1) {
mid = (min + max) / 2;
cut = keys[mid];
if(key > cut)
min = mid;
else
max = mid;
}
if(keys[min] >= key)
return min;
return max;
}
// Binary Search
__device__ int firstMatchingKeyInDataNode2(Record records[], IKeyType key)
{
int min = 0;
int max = TREE_NODE_SIZE;
int mid;
int cut;
while(max - min > 1) {
mid = (min + max) / 2;
cut = records[mid];
if(key > cut)
min = mid;
else
max = mid;
}
if(records[min] == key)
return min;
if(max < TREE_NODE_SIZE && records[max] == key)
return max;
return -1;
}
__global__ void gCreateIndex(IDataNode data[], IDirectoryNode dir[], int dirSize, int tree_size, int bottom_start, int nNodesPerBlock)
{
int startIdx = blockIdx.x * nNodesPerBlock;
int endIdx = startIdx + nNodesPerBlock;
if(endIdx > dirSize)
endIdx = dirSize;
int keyIdx = threadIdx.x;
// Proceed only when in internal nodes
for(int nodeIdx = startIdx; nodeIdx < endIdx; nodeIdx++)
{
int childIdx = nodeIdx * TREE_FANOUT + keyIdx + 1; // One step down to the left
// Then look for the right most descendent
int rightMostDesIdx;
// Common cases
if(childIdx < tree_size) {
rightMostDesIdx = getRightMostDescIdx(tree_size, childIdx);
}
// versus the unusual case when the tree is incomplete and the node does not have the full set of children
else {
// pick the last node in the tree (largest element of the array)
rightMostDesIdx = tree_size - 1;
}
int dataArrayIdx = getDataArrayIdx(dirSize, tree_size, bottom_start, rightMostDesIdx);
dir[nodeIdx].keys[keyIdx] = data[dataArrayIdx].records[TREE_NODE_SIZE - 1];
}
}
__global__ void gSearchTree(IDataNode* data, int nDataNodes, IDirectoryNode* dir, int nDirNodes, int lvlDir, Record* arr, int locations[], int nSearchKeys, int nKeysPerThread, int tree_size, int bottom_start)
{
// Bringing the root node (visited by every tuple) to the faster shared memory
__shared__ IKeyType RootNodeKeys[TREE_NODE_SIZE];
RootNodeKeys[threadIdx.x] = dir->keys[threadIdx.x];
__syncthreads();
int OverallThreadIdx = blockIdx.x * THRD_PER_BLCK_search + threadIdx.x;
for(int keyIdx = OverallThreadIdx; keyIdx < nSearchKeys; keyIdx += THRD_PER_GRID_search)
{
IKeyType val = arr[keyIdx];
int loc = firstMatchingKeyInDirNode1(RootNodeKeys, val) + 1;
for(int i = 1; i < lvlDir && loc < nDirNodes; i++) {
int kid = firstMatchingKeyInDirNode1(dir[loc].keys, val);
loc = loc * TREE_FANOUT + kid + 1;
}
if(loc >= tree_size)
loc = nDataNodes - 1;
else
loc = getDataArrayIdx(nDirNodes, tree_size, bottom_start, loc);
int offset = firstMatchingKeyInDataNode2(data[loc].records, val);
locations[keyIdx] = (offset <0)?-1:(loc * TREE_NODE_SIZE + offset);
}
}
/*Counts the number of times a row in 'S' is to be joined to a row in 'R'.*/
__global__ void gIndexJoin(int *R, int *S, int g_locations[], int sLen, int g_ResNums[])
{
int s_cur = blockIdx.x * blockDim.x + threadIdx.x;
if(s_cur < sLen)
{
int count = 1;
int r_cur = g_locations[s_cur];
int s_key;
if(r_cur >= 0)
{
s_key = S[s_cur];
r_cur++;
while(s_key == R[r_cur])
{
count++;
r_cur++;
}
g_ResNums[s_cur] = count;
}
}
}
/*Corrects 'gSearchTree' results when dealing with a negative multijoin. Uses the values found in 'g_locations' which indicate, for each row in 'R', if its going
to be joined (positive number) or not (-1). Works by checking the additional columns to be joined (i.e. all except the two used by 'gSearchTree') and changing to -1
in 'g_locations' those rows that have equal values in the checked columns.*/
__global__ void gIndexMultiJoinNegative(int *R, int *S, int g_locations[], int rLen, int *p1, int *p2, int of1, int of2, int *mloc, int *sloc, int *muljoin, int wj)
{
extern __shared__ int shared[];
int r_cur = blockIdx.x * blockDim.x + threadIdx.x;
int posr, poss, x;
if(threadIdx.x < wj)
shared[threadIdx.x] = muljoin[threadIdx.x];
__syncthreads();
if(r_cur < rLen)
{
int s_cur = g_locations[r_cur];
int r_key;
if(s_cur >= 0)
{
r_key = R[r_cur];
if(mloc == NULL)
posr = r_cur * of1;
else
posr = mloc[r_cur] * of1;
while(r_key == S[s_cur])
{
poss = sloc[s_cur] * of2;
for(x = 0; x < wj; x += 2)
{
if(p1[posr + shared[x]] != p2[poss + shared[x+1]])
break;
}
if(x >= wj)
return;
s_cur++;
}
g_locations[r_cur] = -1;
}
}
}
/*Corrects 'gSearchTree' results when dealing with a multijoin. Uses the values found in 'g_locations' which indicate, for each row in 'S', if its going
to be joined (positive number) or not (-1). Works by checking the additional columns to be joined (i.e. all except the two used by 'gSearchTree') and counting the number of
times a row in 'S' is to be joined to its corresponding row in 'R', storing the new result in 'g_locations'.*/
__global__ void gIndexMultiJoin(int *R, int *S, int g_locations[], int sLen, int g_ResNums[], int *p1, int *p2, int of1, int of2, int *mloc, int *sloc, int *muljoin, int wj)
{
extern __shared__ int shared[];
int s_cur = blockIdx.x * blockDim.x + threadIdx.x;
int posr, poss, x;
if(threadIdx.x < wj)
shared[threadIdx.x] = muljoin[threadIdx.x];
__syncthreads();
if(s_cur < sLen)
{
int count = 0;
int r_cur = g_locations[s_cur];
int s_key;
if(r_cur >= 0)
{
s_key = S[s_cur];
if(sloc == NULL)
poss = s_cur * of2;
else
poss = sloc[s_cur] * of2;
while(s_key == R[r_cur])
{
posr = mloc[r_cur] * of1;
for(x = 0; x < wj; x += 2)
{
if(p1[posr + shared[x]] != p2[poss + shared[x+1]])
break;
}
if(x >= wj)
count++;
r_cur++;
}
if(count > 0)
g_ResNums[s_cur] = count;
}
}
}
/*Writes the result of the join and projects the necessary columns as defined by 'rule'. The difference between this function and 'gJoinWithWrite' is the comparison of the additional join
columns.*/
__global__ void multiJoinWithWrite(int g_locations[], int sLen, int g_PrefixSums[], int g_joinResultBuffers[], int *p1, int *p2, int of1, int of2, int *rule, int halfrul, int lenrul, int *mloc, int *sloc, int wj)
{
extern __shared__ int shared[];
int *extjoins = &shared[lenrul];
int s_cur = blockIdx.x * blockDim.x + threadIdx.x;
if(threadIdx.x < (lenrul + wj))
shared[threadIdx.x] = rule[threadIdx.x];
__syncthreads();
if(s_cur < sLen)
{
int r_cur = g_locations[s_cur];
if(r_cur >= 0)
{
int x, y, pos, posr, poss;
int num1 = g_PrefixSums[s_cur];
int num2 = g_PrefixSums[s_cur+1];
int tmp1, tmp2;
if(sloc == NULL)
poss = s_cur * of2;
else
poss = sloc[s_cur] * of2;
for(x = num1; x < num2; x++, r_cur++)
{
pos = mloc[r_cur] * of1;
for(y = 0; y < wj; y += 2) /*Additional comparison*/
{
tmp1 = p1[pos + extjoins[y]];
tmp2 = p2[poss + extjoins[y+1]];
if(tmp1 != tmp2)
break;
}
if(y < wj)
{
x--;
continue;
}
posr = x * lenrul;
for(y = 0; y < halfrul; y++)
g_joinResultBuffers[posr + y] = p1[pos + shared[y]];
for(; y < lenrul; y++)
g_joinResultBuffers[posr + y] = p2[poss + shared[y]];
}
}
}
}
/*Writes the result of the join and projects the necessary columns as defined by 'rule'. The difference between this function and 'gJoinWithWrite2' is the comparison of the additional join
columns.*/
__global__ void multiJoinWithWrite2(int g_locations[], int sLen, int g_PrefixSums[], int g_joinResultBuffers[], int *p1, int *p2, int of1, int of2, int *rule, int cols, int *mloc, int *sloc, int wj)
{
extern __shared__ int shared[];
int *extjoins = &shared[cols];
int s_cur = blockIdx.x * blockDim.x + threadIdx.x;
if(threadIdx.x < (cols + wj))
shared[threadIdx.x] = rule[threadIdx.x];
__syncthreads();
if(s_cur < sLen)
{
int r_cur = g_locations[s_cur];
if(r_cur >= 0)
{
int x, y, pos, pos2, posr, cond;
int num1 = g_PrefixSums[s_cur];
int num2 = g_PrefixSums[s_cur+1];
if(sloc == NULL)
pos2 = s_cur * of2 - 1;
else
pos2 = sloc[s_cur] * of2 - 1;
for(x = num1; x < num2; x++, r_cur++)
{
pos = mloc[r_cur] * of1 - 1;
for(y = 0; y < wj; y += 2) /*Additional comparison*/
{
if(p1[pos + extjoins[y] + 1] != p2[pos2 + extjoins[y+1] + 1])
break;
}
if(y < wj)
{
x--;
continue;
}
posr = x * cols;
for(y = 0; y < cols; y++)
{
cond = shared[y];
if(cond > 0)
g_joinResultBuffers[posr + y] = p1[pos + cond];
else
g_joinResultBuffers[posr + y] = p2[pos2 - cond];
}
}
}
}
}
/*Writes the result of the join and projects the necessary columns as defined by 'rule'. The difference between this function and 'gJoinWithWrite2' is that only the columns in the positve
predicate are projected.*/
__global__ void gJoinWithWriteNegative(int g_locations[], int rLen, int g_joinResultBuffers[], int *p1, int of1, int *rule, int halfrul, int *mloc)
{
extern __shared__ int shared[];
int r_cur = blockIdx.x * blockDim.x + threadIdx.x;
int posr;
if(threadIdx.x < halfrul)
shared[threadIdx.x] = rule[threadIdx.x];
__syncthreads();
if(r_cur < rLen)
{
posr = g_locations[r_cur];
if(g_locations[r_cur+1] != posr)
{
int y, pos;
if(mloc == NULL)
pos = r_cur * of1;
else
pos = mloc[r_cur] * of1;
posr *= halfrul;
for(y = 0; y < halfrul; y++)
g_joinResultBuffers[posr + y] = p1[pos + shared[y]];
}
}
}
/*Writes the result of the join and projects the necessary columns as defined by 'rule'. The difference between this function and 'gJoinWithWrite' is that only the columns in the positve
predicate are projected.*/
__global__ void gJoinWithWriteNegative2(int g_locations[], int rLen, int g_joinResultBuffers[], int *p1, int of1, int *rule, int cols, int *mloc)
{
extern __shared__ int shared[];
int r_cur = blockIdx.x * blockDim.x + threadIdx.x;
int posr;
if(threadIdx.x < cols)
shared[threadIdx.x] = rule[threadIdx.x];
__syncthreads();
if(r_cur < rLen)
{
posr = g_locations[r_cur];
if(g_locations[r_cur+1] != posr)
{
int y, pos;
if(mloc == NULL)
pos = r_cur * of1 - 1;
else
pos = mloc[r_cur] * of1 - 1;
posr *= cols;
for(y = 0; y < cols; y++)
g_joinResultBuffers[posr + y] = p1[pos + shared[y]];
}
}
}
/*Writes the result of the join and projects the necessary columns as defined by 'rule'.*/
__global__ void gJoinWithWrite(int g_locations[], int sLen, int g_PrefixSums[], int g_joinResultBuffers[], int *p1, int *p2, int of1, int of2, int *rule, int halfrul, int lenrul, int *mloc, int *sloc)
{
extern __shared__ int shared[];
int s_cur = blockIdx.x * blockDim.x + threadIdx.x;
if(threadIdx.x < lenrul)
shared[threadIdx.x] = rule[threadIdx.x];
__syncthreads();
if(s_cur < sLen)
{
int r_cur = g_locations[s_cur];
if(r_cur >= 0)
{
int x, y, pos, posr, poss;
int num1 = g_PrefixSums[s_cur];
int num2 = g_PrefixSums[s_cur+1];
if(sloc == NULL)
poss = s_cur * of2;
else
poss = sloc[s_cur] * of2;
for(x = num1; x < num2; x++, r_cur++)
{
pos = mloc[r_cur] * of1;
posr = x * lenrul;
for(y = 0; y < halfrul; y++)
g_joinResultBuffers[posr + y] = p1[pos + shared[y]];
for(; y < lenrul; y++)
g_joinResultBuffers[posr + y] = p2[poss + shared[y]];
}
}
}
}
/*Writes the result of the join and projects the necessary columns as defined by 'rule'. This version is used when performing the final join of the rule and its only difference is the
projection, which is performed based on the variables in the head of the rule.*/
__global__ void gJoinWithWrite2(int g_locations[], int sLen, int g_PrefixSums[], int g_joinResultBuffers[], int *p1, int *p2, int of1, int of2, int *rule, int cols, int *mloc, int *sloc)
{
extern __shared__ int shared[];
int s_cur = blockIdx.x * blockDim.x + threadIdx.x;
if(threadIdx.x < cols)
shared[threadIdx.x] = rule[threadIdx.x];
__syncthreads();
if(s_cur < sLen)
{
int r_cur = g_locations[s_cur];
if(r_cur >= 0)
{
int x, y, pos, pos2, posr, cond;
int num1 = g_PrefixSums[s_cur];
int num2 = g_PrefixSums[s_cur+1];
if(sloc == NULL)
pos2 = s_cur * of2 - 1;
else
pos2 = sloc[s_cur] * of2 - 1;
for(x = num1; x < num2; x++, r_cur++)
{
pos = mloc[r_cur] * of1 - 1;
posr = x * cols;
for(y = 0; y < cols; y++)
{
cond = shared[y];
if(cond > 0)
g_joinResultBuffers[posr + y] = p1[pos + cond];
else
g_joinResultBuffers[posr + y] = p2[pos2 - cond];
}
}
}
}
}
/*Load part of column 'wj' of 'p' in 'R'. Which values are loaded is defined by the prefix sum results in 'pos'.*/
__global__ void llenar(int *p, int *R, int len, int of, int wj, int *pos, int *ids)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int cond;
if(id < len)
{
cond = pos[id];
if(pos[id+1] != cond)
{
R[cond] = p[id * of + wj];
ids[cond] = id;
}
}
}
/*Load an entire column from 'p' into 'R'.*/
__global__ void llenarnosel(int *p, int *R, int len, int of, int wj)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < len)
R[id] = p[id * of + wj];
}
__global__ void projectfinal(int *res, int rows, int cols, int *rule, int *out)
{
extern __shared__ int shared[];
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(threadIdx.x < cols)
shared[threadIdx.x] = rule[threadIdx.x];
__syncthreads();
if(id < rows)
{
id *= cols;
for(int y = 0; y < cols; y++)
out[id + y] = res[id + shared[y]];
}
}
void project(int *res, int resrows, int numcols1, int numcols2, int *proj, int **ret, int type)
{
int z, *dcons, *d_Rout;
int numthreads = 1024;
//numthreads = 32;
int blockllen = resrows / numthreads + 1;
int sizepro = numcols2 * sizeof(int);
reservar(&dcons, sizepro);
if(type)
{
int *pt = (int *)malloc(sizepro);
for(z = 0; z < numcols2; z++)
pt[z] = proj[z] - 1;
cudaMemcpy(dcons, pt, sizepro, cudaMemcpyHostToDevice);
//cudaDeviceSynchronize(); //Small cudaMemcpys are asynchronous, uncomment this line if the pointer is being liberated before it is copied.
free(pt);
}
else
cudaMemcpy(dcons, proj, sizepro, cudaMemcpyHostToDevice);
reservar(&d_Rout, resrows * sizepro);
projectfinal<<<blockllen, numthreads, sizepro>>>(res, resrows, numcols1, dcons, d_Rout);
cudaFree(dcons);
cudaFree(*ret);
*ret = d_Rout;
}
__global__ void projectadd(int *dop1, int *dop2, int rows1, int rows2, int cols1, int cols2, int *dhead, int hsize, int *res)
{
extern __shared__ int shared[];
int id = blockIdx.x * blockDim.x + threadIdx.x;
int pos2, posr, x, y, cond;
if(threadIdx.x < hsize)
shared[threadIdx.x] = dhead[threadIdx.x];
__syncthreads();
if(id < rows2)
{
posr = id * hsize * rows1;
pos2 = id * cols2 - 1;
for(x = 0; x < rows1; x++)
{
for(y = 0; y < hsize; y++)
{
cond = shared[y];
if(cond > 0)
res[posr + y] = dop1[cond-1];
else
res[posr + y] = dop2[pos2 - cond];
}
posr += hsize;
}
}
}
void juntar(int *dop1, int *dop2, int rows1, int rows2, int cols1, int cols2, int *proj, int pcols, int **ret)
{
int sizepro, *dcons, *d_Rout;
int numthreads = 1024;
//numthreads = 32;
int blockllen = rows2 / numthreads + 1;
sizepro = pcols * sizeof(int);
reservar(&dcons, sizepro);
cudaMemcpy(dcons, proj, sizepro, cudaMemcpyHostToDevice);
reservar(&d_Rout, rows1 * rows2 * sizepro);
projectadd<<<blockllen, numthreads, sizepro>>>(dop1, dop2, rows1, rows2, cols1, cols2, dcons, pcols, d_Rout);
cudaFree(dcons);
*ret = d_Rout;
}
/*Joins two predicates. Starts by performing all preliminary operations (selections, selfjoins, comparisons) on both predicates. Then a column pair is used to construct
a CSS-Tree and that tree is searched for join positions. The positions are used in a prefix sum and its result allows us to write the result. Multijoins and negative
predicates follow roughly the same process, but use different kernels.*/
int join(int *p1, int *p2, int rLen, int sLen, int of1, int of2, list<rulenode>::iterator rule, int pos, int bothops, int **ret, int ANDlogic)
{
int pos2 = pos + 1;
int *sel1 = NULL, nsel1 = 0;
int *sel2 = rule->select[pos2];
int nsel2 = rule->numsel[pos2];
int *proj = rule->project[pos];
int2 projp = rule->projpos[pos];
int *sjoin1 = NULL, nsj1 = 0;
int *sjoin2 = rule->selfjoin[pos2];
int nsj2 = rule->numselfj[pos2];
int *pred1 = NULL;
int2 npred1 = make_int2(0,0);
int *pred2 = rule->preds[pos2];
int2 npred2 = rule->numpreds[pos2];
int npred2tot = npred2.x + npred2.y;
int *wherej = rule->wherejoin[pos];
int numj = rule->numjoin[pos];
int negative = rule->negatives[pos2+1];
int flag;
#ifdef ROCKIT
ANDlogic = 0;
#endif
if(negative)
ANDlogic = 1;
#if TIMER
cuda_stats.joins++;
#endif
int size, sizet, sizet2;
if(bothops)
{
sel1 = rule->select[pos];
nsel1 = rule->numsel[pos];
sjoin1 = rule->selfjoin[pos];
nsj1 = rule->numselfj[pos];
pred1 = rule->preds[pos];
npred1 = rule->numpreds[pos];
sizet = maximo(10, of1, of2, nsel1, nsel2, projp.y + numj - 2, nsj1, nsj2, numj, npred1.x, npred2tot) * sizeof(int);
}
else
sizet = maximo(7, of1, of2, nsel2, projp.y + numj - 2, nsj2, numj, npred2tot) * sizeof(int);
int *dcons, *temp, *temp2 = NULL;
int *d_R, *d_S;
int blockllen, numthreads;
int extraspace = TREE_NODE_SIZE - rLen % TREE_NODE_SIZE;
int m32rLen = rLen + extraspace;
int extraspaceS = TREE_NODE_SIZE - sLen % TREE_NODE_SIZE;
int m32sLen = sLen + extraspaceS;
if(m32rLen > m32sLen)
sizet2 = (m32rLen + 1) * sizeof(int);
else
sizet2 = (m32sLen + 1) * sizeof(int);
reservar(&dcons, sizet);
reservar(&temp, sizet2);
thrust::device_ptr<int> res = thrust::device_pointer_cast(temp);
numthreads = 1024;
//numthreads = 32;
blockllen = sLen / numthreads + 1;
int memSizeS, newLen = 0;
int *posR = NULL, *posS = NULL;
int sizem32S = 0, sizextra;
#ifdef TIMER
//cout << "INICIO" << endl;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
if(npred2.x > 0 || npred2.y > 0 || nsel2 > 0 || nsj2 > 0)
{
newLen = sLen + 1;
cudaMemsetAsync(temp, 0, newLen * sizeof(int));
}
if(npred2.x > 0 || npred2.y > 0)
{
size = npred2tot * sizeof(int);
cudaMemcpy(dcons, pred2, size, cudaMemcpyHostToDevice);
if(npred2.y > 0) /*Fix case when a(X,Y),b(Y,Z),Z > Y*/
{
reservar(&temp2, sizet2);
cudaMemsetAsync(temp2, 0, newLen * sizeof(int));
//res = thrust::device_pointer_cast(temp2);
bpreds<<<blockllen, numthreads, size>>>(p1, p2, sLen, of1, of2, dcons, npred2tot, npred2.x, temp + 1, temp2 + 1);
}
else
{
if(negative)
bpreds<<<blockllen, numthreads, size>>>(p1, p2, sLen, of1, of2, dcons, npred2tot, npred2.x, temp + 1, NULL);
else
bpredsOR<<<blockllen, numthreads, size>>>(p1, p2, sLen, of1, of2, dcons, npred2tot, npred2.x, temp + 1, NULL);
}
if(nsel2 > 0)
{
size = nsel2 * sizeof(int);
cudaMemcpy(dcons, sel2, size, cudaMemcpyHostToDevice);
marcar<<<blockllen, numthreads, size>>>(p2, sLen, of2, dcons, nsel2, temp + 1);
}
if(nsj2 > 0)
{
size = nsj2 * sizeof(int);
cudaMemcpy(dcons, sjoin2, size, cudaMemcpyHostToDevice);
samejoin<<<blockllen, numthreads, size>>>(p2, sLen, of2, dcons, nsj2, temp + 1);
}
}
else
{
if(nsel2 > 0)
{
size = nsel2 * sizeof(int);
cudaMemcpy(dcons, sel2, size, cudaMemcpyHostToDevice);
marcar2<<<blockllen, numthreads, size>>>(p2, sLen, of2, dcons, nsel2, temp + 1);
if(nsj2 > 0)
{
size = nsj2 * sizeof(int);
cudaMemcpy(dcons, sjoin2, size, cudaMemcpyHostToDevice);
samejoin<<<blockllen, numthreads, size>>>(p2, sLen, of2, dcons, nsj2, temp + 1);
}
}
else
{
if(nsj2 > 0)
{
size = nsj2 * sizeof(int);
cudaMemcpy(dcons, sjoin2, size, cudaMemcpyHostToDevice);
samejoin2<<<blockllen, numthreads, size>>>(p2, sLen, of2, dcons, nsj2, temp + 1);
}
else
{
sizem32S = m32sLen * sizeof(int);
reservar(&d_S, sizem32S);
cudaMemsetAsync(d_S + sLen, 0x7f, extraspaceS * sizeof(int));
llenarnosel<<<blockllen, numthreads>>>(p2, d_S, sLen, of2, wherej[1]);
}
}
}
if(npred2.x > 0 || npred2.y > 0 || nsel2 > 0 || nsj2 > 0)
{
flag = 0;
while(flag != 1)
{
try
{
thrust::inclusive_scan(res + 1, res + newLen, res + 1);
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("inclusive scan in join", 0);
}
}
newLen = res[sLen];
if(newLen == 0) // && !negative) ARREGLAR
{
cudaFree(temp);
cudaFree(dcons);
return 0;
}
extraspaceS = TREE_NODE_SIZE - newLen % TREE_NODE_SIZE;
sizextra = extraspaceS * sizeof(int);
m32sLen = newLen + extraspaceS;
sizem32S = m32sLen * sizeof(int);
reservar(&d_S, sizem32S);
reservar(&posS, sizem32S);
cudaMemsetAsync(d_S + newLen, 0x7f, sizextra);
cudaMemsetAsync(posS + newLen, 0x7f, sizextra);
llenar<<<blockllen, numthreads>>>(p2, d_S, sLen, of2, wherej[1], temp, posS);
sLen = newLen;
}
#ifdef TIMER
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
//cout << "Select1 = " << time << endl;
cuda_stats.select1_time += time;
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
blockllen = rLen / numthreads + 1;
int sizem32;
if(bothops)
{
if(temp2 != NULL)
{
cudaFree(temp);
temp = temp2;
res = thrust::device_pointer_cast(temp);
newLen = rLen + 1;
if(nsel1 > 0)
{
size = nsel1 * sizeof(int);
cudaMemcpy(dcons, sel1, size, cudaMemcpyHostToDevice);
marcar<<<blockllen, numthreads, size>>>(p1, rLen, of1, dcons, nsel1, temp + 1);
}
if(nsj1 > 0)
{
size = nsj1 * sizeof(int);
cudaMemcpy(dcons, sjoin1, size, cudaMemcpyHostToDevice);
samejoin<<<blockllen, numthreads, size>>>(p1, rLen, of1, dcons, nsj1, temp + 1);
}
if(npred1.x > 0)
{
size = npred1.x * sizeof(int);
cudaMemcpy(dcons, pred1, size, cudaMemcpyHostToDevice);
if(ANDlogic)
bpredsnormal<<<blockllen, numthreads, size>>>(p1, rLen, of1, dcons, npred1.x, temp + 1);
else
bpredsorlogic<<<blockllen, numthreads, size>>>(p1, rLen, of1, dcons, npred1.x, temp + 1);
}
}
else
{
if(npred1.x > 0 || nsel1 > 0 || nsj1 > 0)
{
newLen = rLen + 1;
cudaMemsetAsync(temp, 0, newLen * sizeof(int));
}
if(nsel1 > 0)
{
size = nsel1 * sizeof(int);
cudaMemcpy(dcons, sel1, size, cudaMemcpyHostToDevice);
marcar2<<<blockllen, numthreads, size>>>(p1, rLen, of1, dcons, nsel1, temp + 1);
if(nsj1 > 0)
{
size = nsj1 * sizeof(int);
cudaMemcpy(dcons, sjoin1, size, cudaMemcpyHostToDevice);
samejoin<<<blockllen, numthreads, size>>>(p1, rLen, of1, dcons, nsj1, temp + 1);
}
if(npred1.x > 0)
{
size = npred1.x * sizeof(int);
cudaMemcpy(dcons, pred1, size, cudaMemcpyHostToDevice);
if(ANDlogic)
bpredsnormal<<<blockllen, numthreads, size>>>(p1, rLen, of1, dcons, npred1.x, temp + 1);
else
bpredsorlogic<<<blockllen, numthreads, size>>>(p1, rLen, of1, dcons, npred1.x, temp + 1);
}
}
else
{
if(nsj1 > 0)
{
size = nsj1 * sizeof(int);
cudaMemcpy(dcons, sjoin1, size, cudaMemcpyHostToDevice);
samejoin2<<<blockllen, numthreads, size>>>(p1, rLen, of1, dcons, nsj1, temp + 1);
if(npred1.x > 0)
{
size = npred1.x * sizeof(int);
cudaMemcpy(dcons, pred1, size, cudaMemcpyHostToDevice);
if(ANDlogic)
bpredsnormal<<<blockllen, numthreads, size>>>(p1, rLen, of1, dcons, npred1.x, temp + 1);
else
bpredsorlogic<<<blockllen, numthreads, size>>>(p1, rLen, of1, dcons, npred1.x, temp + 1);
}
}
else
{
if(npred1.x > 0)
{
size = npred1.x * sizeof(int);
cudaMemcpy(dcons, pred1, size, cudaMemcpyHostToDevice);
if(ANDlogic)
bpredsnormal2<<<blockllen, numthreads, size>>>(p1, rLen, of1, dcons, npred1.x, temp + 1);
else
bpredsorlogic2<<<blockllen, numthreads, size>>>(p1, rLen, of1, dcons, npred1.x, temp + 1);
}
}
}
}
if(temp2 != NULL || npred1.x > 0 || nsel1 > 0 || nsj1 > 0)
{
thrust::inclusive_scan(res + 1, res + newLen, res + 1);
newLen = res[rLen];
if(newLen == 0)
{
cudaFree(temp);
cudaFree(dcons);
cudaFree(d_S);
if(posS != NULL)
cudaFree(posS);
return 0;
}
extraspace = TREE_NODE_SIZE - newLen % TREE_NODE_SIZE;
sizextra = extraspace * sizeof(int);
m32rLen = newLen + extraspace;
sizem32 = m32rLen * sizeof(int);
reservar(&d_R, sizem32);
reservar(&posR, sizem32);
cudaMemsetAsync(d_R + newLen, 0x7f, sizextra);
cudaMemsetAsync(posR + newLen, 0x7f, sizextra);
llenar<<<blockllen, numthreads>>>(p1, d_R, rLen, of1, wherej[0], temp, posR);
rLen = newLen;
}
else
{
sizem32 = m32rLen * sizeof(int);
reservar(&d_R, sizem32);
cudaMemsetAsync(d_R + rLen, 0x7f, extraspace * sizeof(int));
llenarnosel<<<blockllen, numthreads>>>(p1, d_R, rLen, of1, wherej[0]);
}
}
else
{
sizem32 = m32rLen * sizeof(int);
reservar(&d_R, sizem32);
cudaMemsetAsync(d_R + rLen, 0x7f, extraspace * sizeof(int));
llenarnosel<<<blockllen, numthreads>>>(p1, d_R, rLen, of1, wherej[0]);
}
#ifdef TIMER
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
//cout << "Select2 = " << time << endl;
cuda_stats.select2_time += time;
#endif
#ifdef TIMER
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
thrust::device_ptr<Record> dvp1;
thrust::device_ptr<Record> permutation;
if(negative)
{
dvp1 = thrust::device_pointer_cast(d_S);
if(posS == NULL)
{
reservar(&posS, sizem32S);
permutation = thrust::device_pointer_cast(posS);
thrust::sequence(permutation, permutation + m32sLen);
}
else
permutation = thrust::device_pointer_cast(posS);
flag = 0;
while(flag != 1)
{
try
{
thrust::stable_sort_by_key(dvp1, dvp1 + m32sLen, permutation);
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("inclusive scan in join", 0);
}
}
}
else
{
dvp1 = thrust::device_pointer_cast(d_R);
if(posR == NULL)
{
reservar(&posR, sizem32);
permutation = thrust::device_pointer_cast(posR);
thrust::sequence(permutation, permutation + m32rLen);
}
else
permutation = thrust::device_pointer_cast(posR);
flag = 0;
while(flag != 1)
{
try
{
thrust::stable_sort_by_key(dvp1, dvp1 + m32rLen, permutation);
flag = 1;
}
catch(std::bad_alloc &e)
{
limpiar("inclusive scan in join", 0);
}
}
}
#ifdef TIMER
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
//cout << "Sort = " << time << endl;
cuda_stats.sort_time += time;
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
IDataNode* d_data;
IDirectoryNode* d_dir;
unsigned int nDataNodes;
if(negative)
{
nDataNodes = uintCeilingDiv(sLen, TREE_NODE_SIZE);
d_data=(IDataNode *)d_S;
}
else
{
nDataNodes = uintCeilingDiv(rLen, TREE_NODE_SIZE);
d_data=(IDataNode *)d_R;
}
unsigned int lvlDir = uintCeilingLog(TREE_FANOUT, nDataNodes);
unsigned int nDirNodes = uintCeilingDiv(nDataNodes - 1, TREE_NODE_SIZE);
unsigned int tree_size = nDirNodes + nDataNodes;
unsigned int bottom_start = (uintPower(TREE_FANOUT, lvlDir) - 1) / TREE_NODE_SIZE;
d_dir = (IDirectoryNode *)temp;
unsigned int nNodesPerBlock = uintCeilingDiv(nDirNodes, BLCK_PER_GRID_create);
dim3 Dbc(THRD_PER_BLCK_create, 1, 1);
dim3 Dgc(BLCK_PER_GRID_create, 1, 1);
gCreateIndex <<<Dgc, Dbc>>> (d_data, d_dir, nDirNodes, tree_size, bottom_start, nNodesPerBlock);
int *d_locations;
int memSizeR;
unsigned int nSearchKeys;
if(negative)
{
memSizeR = (rLen + 1) * sizeof(int);
reservar(&d_locations, memSizeR);
cudaMemsetAsync(d_locations, 0, sizeof(int));
nSearchKeys = rLen;
}
else
{
memSizeS = sLen * sizeof(int);
reservar(&d_locations, memSizeS);
nSearchKeys = sLen;
}
dim3 Dbs(THRD_PER_BLCK_search, 1, 1);
dim3 Dgs(BLCK_PER_GRID_search, 1, 1);
unsigned int nKeysPerThread = uintCeilingDiv(nSearchKeys, THRD_PER_GRID_search);
if(negative)
{
gSearchTree <<<Dgs, Dbs>>> (d_data, nDataNodes, d_dir, nDirNodes, lvlDir, d_R, d_locations + 1, nSearchKeys, nKeysPerThread, tree_size, bottom_start);
cudaMemsetAsync(temp, 0, memSizeR);
}
else
{
gSearchTree <<<Dgs, Dbs>>> (d_data, nDataNodes, d_dir, nDirNodes, lvlDir, d_S, d_locations, nSearchKeys, nKeysPerThread, tree_size, bottom_start);
cudaMemsetAsync(temp, 0, memSizeS);
}
int muljoin = 0, muljoinsize = 0, sum;
int *d_Rout;
int resSize, sizepro;
if(negative)
{
blockllen = rLen / numthreads + 1;
if(numj > 2)
{
muljoin = numj - 2;
muljoinsize = muljoin * sizeof(int);
cudaMemcpy(dcons, wherej + 2, muljoinsize, cudaMemcpyHostToDevice);
gIndexMultiJoinNegative<<<blockllen, numthreads, muljoinsize>>> (d_R, d_S, d_locations + 1, rLen, p1, p2, of1, of2, posR, posS, dcons, muljoin);
}
res = thrust::device_pointer_cast(d_locations);
thrust::transform(res + 1, res + rLen + 1, res + 1, to_neg());
thrust::inclusive_scan(res + 1, res + rLen + 1, res + 1);
sum = res[rLen];
if(pos == (rule->num_rows - 3))
{
sizepro = rule->num_columns * sizeof(int);
cudaMemcpy(dcons, proj, sizepro, cudaMemcpyHostToDevice);
resSize = sum * sizepro;
reservar(&d_Rout, resSize);
gJoinWithWriteNegative2<<<blockllen, numthreads, sizepro>>> (d_locations, rLen, d_Rout, p1, of1, dcons, rule->num_columns, posR);
}
else
{
sizepro = projp.x * sizeof(int);
cudaMemcpy(dcons, proj, sizepro, cudaMemcpyHostToDevice);
resSize = sum * sizepro;
reservar(&d_Rout, resSize);
gJoinWithWriteNegative<<<blockllen, numthreads, sizepro>>> (d_locations, rLen, d_Rout, p1, of1, dcons, projp.x, posR);
}
cudaFree(d_R);
cudaFree(d_S);
}
else
{
blockllen = sLen / numthreads + 1;
if(numj > 2)
{
muljoin = numj - 2;
muljoinsize = muljoin * sizeof(int);
cudaMemcpy(dcons, wherej + 2, muljoinsize, cudaMemcpyHostToDevice);
gIndexMultiJoin<<<blockllen, numthreads, muljoinsize>>> (d_R, d_S, d_locations, sLen, temp, p1, p2, of1, of2, posR, posS, dcons, muljoin);
}
else
gIndexJoin<<<blockllen, numthreads>>> (d_R, d_S, d_locations, sLen, temp);
cudaFree(d_R);
cudaFree(d_S);
sum = res[sLen-1];
thrust::exclusive_scan(res, res + sLen, res);
sum += res[sLen-1];
if(sum == 0)
{
cudaFree(dcons);
cudaFree(d_locations);
cudaFree(temp);
if(posS != NULL)
cudaFree(posS);
if(posR != NULL)
cudaFree(posR);
return 0;
}
res[sLen] = sum;
if(pos == (rule->num_rows - 3))
{
sizepro = rule->num_columns * sizeof(int);
cudaMemcpy(dcons, proj, sizepro, cudaMemcpyHostToDevice);
resSize = sum * sizepro;
reservar(&d_Rout, resSize);
if(numj > 2)
{
cudaMemcpy(dcons + rule->num_columns, wherej + 2, muljoinsize, cudaMemcpyHostToDevice);
multiJoinWithWrite2<<<blockllen, numthreads, sizepro + muljoinsize>>> (d_locations, sLen, temp, d_Rout, p1, p2, of1, of2, dcons, rule->num_columns, posR, posS, muljoin);
}
else
gJoinWithWrite2<<<blockllen, numthreads, sizepro>>> (d_locations, sLen, temp, d_Rout, p1, p2, of1, of2, dcons, rule->num_columns, posR, posS);
}
else
{
sizepro = projp.y * sizeof(int);
cudaMemcpy(dcons, proj, sizepro, cudaMemcpyHostToDevice);
resSize = sum * sizepro;
reservar(&d_Rout, resSize);
if(numj > 2)
{
cudaMemcpy(dcons + projp.y, wherej + 2, muljoinsize, cudaMemcpyHostToDevice);
multiJoinWithWrite<<<blockllen, numthreads, sizepro + muljoinsize>>> (d_locations, sLen, temp, d_Rout, p1, p2, of1, of2, dcons, projp.x, projp.y, posR, posS, muljoin);
}
else
gJoinWithWrite<<<blockllen, numthreads, sizepro>>> (d_locations, sLen, temp, d_Rout, p1, p2, of1, of2, dcons, projp.x, projp.y, posR, posS);
}
}
cudaFree(dcons);
cudaFree(d_locations);
cudaFree(temp);
if(posS != NULL)
cudaFree(posS);
if(posR != NULL)
cudaFree(posR);
if(*ret != NULL)
cudaFree(*ret);
*ret = d_Rout;
#ifdef TIMER
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
//cout << "Join = " << time << endl;
//cout << "FIN" << endl;
cuda_stats.join_time += time;
#endif
return sum;
}
|
the_stack
|
#include <glm/glm.hpp>
#include <glm/ext.hpp>
#include "../../src/SharedStructs.h"
#include "getShadingData.h"
#include "random.h"
#include <Settings.h>
#include "tools.h"
#include "lights.h"
#include "bsdf.h"
#define NEXTMULTIPLEOF(a, b) (((a) + ((b)-1)) & (0x7fffffff - ((b)-1)))
using namespace glm;
#ifndef __launch_bounds__ // Fix errors in IDE
#define __launch_bounds__(x, y)
int __float_as_int(float x) { return int(x); }
uint __float_as_uint(float x) { return uint(x); }
template <typename T, typename B> T atomicAdd(T *, B);
template <typename T, int x> struct surface
{
};
template <typename T> void surf2Dwrite(T value, surface<void, cudaSurfaceType2D> output, size_t stride, size_t y, cudaSurfaceBoundaryMode mode) {}
#endif
surface<void, cudaSurfaceType2D> output;
__constant__ __device__ float geometryEpsilon;
__constant__ __device__ CameraView *view;
__constant__ __device__ Counters *counters;
__constant__ __device__ glm::vec4 *accumulator;
__constant__ __device__ uint stride;
__constant__ __device__ glm::vec4 *pathStates;
__constant__ __device__ glm::vec4 *pathOrigins;
__constant__ __device__ glm::vec4 *pathDirections;
__constant__ __device__ glm::vec4 *pathThroughputs;
__constant__ __device__ glm::vec3 *skybox;
__constant__ __device__ uint skyboxWidth;
__constant__ __device__ uint skyboxHeight;
__constant__ __device__ uint scrWidth;
__constant__ __device__ uint scrHeight;
__constant__ __device__ uint *blueNoise;
__constant__ __device__ float clampValue;
__constant__ __device__ glm::vec4 *normals;
__constant__ __device__ glm::vec4 *albedos;
__constant__ __device__ glm::vec4 *inputNormals;
__constant__ __device__ glm::vec4 *inputAlbedos;
__constant__ __device__ glm::vec4 *inputPixels;
__constant__ __device__ glm::vec4 *outputPixels;
__constant__ __device__ PotentialContribution *connectData;
__constant__ __device__ DeviceInstanceDescriptor *instances;
__global__ void initCountersExtent(unsigned int pathCount)
{
if (threadIdx.x != 0)
return; // Only run a single thread
counters->activePaths = pathCount;
counters->shaded = 0; // Thread atomic for shade kernel
counters->extensionRays = 0; // Compaction counter for extension rays
counters->shadowRays = 0; // Compaction counter for connections
counters->totalExtensionRays = pathCount;
counters->totalShadowRays = 0;
}
__global__ void initCountersSubsequent()
{
if (threadIdx.x != 0)
return;
counters->totalExtensionRays += counters->extensionRays;
counters->activePaths = counters->extensionRays; // Remaining active paths
counters->shaded = 0; // Thread atomic for shade kernel
counters->extensionRays = 0; // Compaction counter for extension rays
counters->shadowRays = 0;
}
#define IS_SPECULAR 1
__global__ __launch_bounds__(128 /* Max block size */, 8 /* Min blocks per sm */) void shade(const uint pathLength, const glm::mat3 toEyeSpace)
{
const int jobIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (jobIndex >= counters->activePaths)
return;
const uint bufferIndex = pathLength % 2;
const uint nextBufferIndex = 1 - bufferIndex;
const vec4 hitData = pathStates[jobIndex + bufferIndex * stride];
const vec4 O4 = pathOrigins[jobIndex + bufferIndex * stride];
const vec4 D4 = pathDirections[jobIndex + bufferIndex * stride];
vec4 T4 = pathLength == 0 ? vec4(1.0f) : pathThroughputs[jobIndex + bufferIndex * stride];
uint flags = __float_as_uint(O4.w) & 0xFF;
vec3 throughput = vec3(T4);
const float bsdfPdf = T4.w;
const vec3 D = glm::vec3(D4);
const uint pathIndex = (__float_as_uint(O4.w) >> 8u);
const int primIdx = __float_as_int(hitData.z);
if (primIdx < 0)
{
// formulas by Paul Debevec, http://www.pauldebevec.com/Probes
const uint u = static_cast<uint>(static_cast<float>(skyboxWidth) * 0.5f * (1.0f + atan2(D.x, -D.z) * glm::one_over_pi<float>()));
const uint v = static_cast<uint>(static_cast<float>(skyboxHeight) * acos(D.y) * glm::one_over_pi<float>());
const uint idx = u + v * skyboxWidth;
const vec3 skySample = idx < skyboxHeight * skyboxWidth ? skybox[idx] : vec3(0);
vec3 contribution = throughput * (1.0f / bsdfPdf) * vec3(skySample);
if (any(isnan(contribution)))
return;
clampIntensity(contribution, clampValue);
accumulator[pathIndex] += vec4(contribution, 0.0f);
#if ALLOW_DENOISER
if (pathLength == 0)
{
if (counters->samplesTaken == 0)
{
albedos[pathIndex] = vec4(contribution, 0.0f);
normals[pathIndex] = vec4(0.0f);
}
else
albedos[pathIndex] += vec4(contribution, 0.0f);
}
#endif
return;
}
const vec3 O = glm::vec3(O4);
const vec3 I = O + D * hitData.w;
const uint uintBaryCentrics = __float_as_uint(hitData.x);
const vec2 barycentrics = vec2(static_cast<float>(uintBaryCentrics & 65535), static_cast<float>(uintBaryCentrics >> 16)) * (1.0f / 65536.0f);
const int instanceIdx = __float_as_uint(hitData.y);
const DeviceInstanceDescriptor &instance = instances[instanceIdx];
const DeviceTriangle &triangle = instance.triangles[primIdx];
glm::vec3 N, iN, T, B;
const ShadingData shadingData =
getShadingData(D, barycentrics.x, barycentrics.y, view->spreadAngle * hitData.w, triangle, instanceIdx, N, iN, T, B, instance.invTransform);
if (counters->samplesTaken == 0 && pathLength == 0 && pathIndex == counters->probeIdx)
{
counters->probedInstanceId = instanceIdx;
counters->probedPrimId = primIdx;
counters->probedDistance = hitData.w;
}
// Detect alpha in the shading code.
if (shadingData.flags & 1)
{
if (pathLength < MAX_PATH_LENGTH)
{
if (any(isnan(throughput)))
return;
const uint extensionRayIdx = atomicAdd(&counters->extensionRays, 1);
pathOrigins[extensionRayIdx + nextBufferIndex * stride] = vec4(I + D * geometryEpsilon, O4.w);
pathDirections[extensionRayIdx + nextBufferIndex * stride] = D4;
pathStates[extensionRayIdx + nextBufferIndex * stride] = T4;
// TODO: this never gets hit, fix this
}
return;
}
// Terminate path on light
if (shadingData.isEmissive()) /* r, g or b exceeds 1 */
{
const float DdotNL = -dot(D, N);
vec3 contribution = vec3(0);
if (DdotNL > 0)
{
if (pathLength == 0)
{
// Only camera rays will be treated special
contribution = shadingData.color;
}
else if (flags & IS_SPECULAR)
{
contribution = throughput * shadingData.color * (1.0f / bsdfPdf);
}
else
{
// Last vertex was not specular: apply MIS
const vec3 lastN = UnpackNormal(floatBitsToUint(D4.w));
const float lightPdf = CalculateLightPDF(D, hitData.w, triangle.getArea(), N);
const int triangleIdx = int(triangle.getLightTriangleIndex());
const float pickProb = LightPickProb(triangleIdx, O, lastN, I);
if ((bsdfPdf + lightPdf * pickProb) <= 0)
return;
contribution = throughput * shadingData.color * (1.0f / (bsdfPdf + lightPdf * pickProb));
}
}
if (any(isnan(contribution)))
contribution = vec3(0);
#if ALLOW_DENOISER
if (pathLength == 0)
{
const vec3 albedo = min(contribution, vec3(1.0f));
if (counters->samplesTaken == 0)
{
albedos[pathIndex] = vec4(albedo, 0.0f);
normals[pathIndex] = vec4(toEyeSpace * iN, 0.0f);
}
else
{
albedos[pathIndex] += vec4(albedo, 0.0f);
normals[pathIndex] += vec4(toEyeSpace * iN, 0.0f);
}
}
#endif
clampIntensity(contribution, clampValue);
accumulator[pathIndex] += vec4(contribution, 0.0f);
return;
}
if (shadingData.getRoughness() < MIN_ROUGHNESS)
flags |= IS_SPECULAR; // Object was specular
else
flags &= ~IS_SPECULAR; // Object was not specular
uint seed = WangHash(pathIndex * 16789 + counters->samplesTaken * 1791 + pathLength * 720898027);
const float flip = (dot(D, N) > 0) ? -1.0f : 1.0f;
N *= flip; // Fix geometric normal
iN *= flip; // Fix interpolated normal (consistent normal interpolation)
throughput *= 1.0f / bsdfPdf; // Apply postponed bsdf pdf
if ((flags & IS_SPECULAR) == 0 && (lightCounts.areaLightCount + lightCounts.pointLightCount + lightCounts.directionalLightCount +
lightCounts.spotLightCount) > 0) // Only cast shadow rays for non-specular objects
{
vec3 lightColor;
float r0, r1, pickProb, lightPdf = 0;
#if BLUENOISE
if (counters->samplesTaken < 256)
{
const int x = int(pathIndex % scrWidth);
const int y = int(pathIndex / scrWidth);
r0 = blueNoiseSampler(blueNoise, x, y, int(counters->samplesTaken), 4);
r1 = blueNoiseSampler(blueNoise, x, y, int(counters->samplesTaken), 5);
}
else
{
r0 = RandomFloat(seed);
r1 = RandomFloat(seed);
}
#else
r0 = RandomFloat(seed);
r1 = RandomFloat(seed);
#endif
vec3 L = RandomPointOnLight(r0, r1, I, iN, pickProb, lightPdf, lightColor) - I;
const float dist = length(L);
L *= 1.0f / dist;
const float NdotL = dot(L, iN);
if (NdotL > 0 && lightPdf > 0)
{
float shadowPdf;
const vec3 sampledBSDF = EvaluateBSDF(shadingData, iN, T, B, D * -1.0f, L, shadowPdf, seed);
if (shadowPdf > 0)
{
// calculate potential contribution
vec3 contribution = throughput * sampledBSDF * lightColor * (NdotL / (shadowPdf + lightPdf * pickProb));
clampIntensity(contribution, clampValue);
if (!any(isnan(contribution)))
{
// Add fire-and-forget shadow ray to the connections buffer
const uint shadowRayIdx = atomicAdd(&counters->shadowRays, 1); // compaction
connectData[shadowRayIdx].Origin = vec4(SafeOrigin(I, L, N, geometryEpsilon), 0);
connectData[shadowRayIdx].Direction = vec4(L, dist);
connectData[shadowRayIdx].Emission = vec4(contribution, uintBitsToFloat(pathIndex));
}
}
}
}
if (pathLength >= MAX_PATH_LENGTH) // Early out in case we reached maximum path length
return;
vec3 R, bsdf;
float newBsdfPdf = 0.0f;
// float r3, r4;
//#if BLUENOISE // TODO
// if (counters->samplesTaken < 256) // Blue noise
// {
// const int x = int(pathIndex % scrWidth) & 127;
// const int y = int(pathIndex / scrWidth) & 127;
// r3 = blueNoiseSampler(blueNoise, x, y, int(counters->samplesTaken), 4);
// r4 = blueNoiseSampler(blueNoise, x, y, int(counters->samplesTaken), 5);
// }
// else
// {
// r3 = RandomFloat(seed);
// r4 = RandomFloat(seed);
// }
//#else
// r3 = RandomFloat(seed);
// r4 = RandomFloat(seed);
//#endif
bsdf = SampleBSDF(shadingData, iN, N, T, B, D * -1.0f, hitData.w, flip < 0, R, newBsdfPdf, seed);
throughput = throughput * 1.0f / SurvivalProbability(throughput) * bsdf * abs(dot(iN, R));
#if ALLOW_DENOISER
if (pathLength == 0)
{
if (counters->samplesTaken == 0)
{
albedos[pathIndex] = vec4(shadingData.color * abs(dot(iN, R)), 0.0f);
normals[pathIndex] = vec4(toEyeSpace * iN, 0.0f);
}
else
{
albedos[pathIndex] += vec4(shadingData.color * abs(dot(iN, R)), 0.0f);
normals[pathIndex] += vec4(toEyeSpace * iN, 0.0f);
}
}
#endif
if (newBsdfPdf < 1e-6f || isnan(newBsdfPdf) || any(lessThan(throughput, vec3(0.0f))))
return; // Early out in case we have an invalid bsdf
const uint extensionRayIdx = atomicAdd(&counters->extensionRays, 1u); // Get compacted index for extension ray
pathOrigins[extensionRayIdx + nextBufferIndex * stride] = vec4(SafeOrigin(I, R, N, geometryEpsilon), uintBitsToFloat((pathIndex << 8u) | flags));
pathDirections[extensionRayIdx + nextBufferIndex * stride] = vec4(R, uintBitsToFloat(PackNormal(iN)));
pathThroughputs[extensionRayIdx + nextBufferIndex * stride] = vec4(throughput, newBsdfPdf);
}
__global__ void finalize(const uint scrwidth, const uint scrheight, const float pixelValueScale)
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= scrwidth || y >= scrheight)
return;
const auto index = x + y * scrwidth;
const glm::vec3 normal = vec3(normals[index]) * pixelValueScale;
const glm::vec3 albedo = vec3(albedos[index]) * pixelValueScale;
inputNormals[index] = vec4(normal, 1.0f);
inputAlbedos[index] = vec4(albedo, 1.0f);
const glm::vec4 value = accumulator[index] * pixelValueScale;
inputPixels[index] = value;
}
__global__ void finalizeBlit(const uint scrwidth, const uint scrheight, const float pixelValueScale)
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= scrwidth || y >= scrheight)
return;
const auto index = x + y * scrwidth;
const glm::vec4 value = accumulator[index] * pixelValueScale;
surf2Dwrite<glm::vec4>(value, output, x * sizeof(float4), y, cudaBoundaryModeClamp);
}
__global__ void tonemap(const uint scrwidth, const uint scrheight, const float pixelValueScale, const float brightness, const float contrastFactor)
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= scrwidth || y >= scrheight)
return;
const auto index = x + y * scrwidth;
const glm::vec4 value = outputPixels[index];
const float r = sqrt(max(0.0f, (value.x - 0.5f) * contrastFactor + 0.5f + brightness));
const float g = sqrt(max(0.0f, (value.y - 0.5f) * contrastFactor + 0.5f + brightness));
const float b = sqrt(max(0.0f, (value.z - 0.5f) * contrastFactor + 0.5f + brightness));
surf2Dwrite<glm::vec4>(glm::vec4(r, g, b, value.w), output, x * sizeof(float4), y, cudaBoundaryModeClamp);
}
__global__ void blitDenoised(const uint scrwidth, const uint scrheight)
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= scrwidth || y >= scrheight)
return;
const auto index = x + y * scrwidth;
const glm::vec4 value = outputPixels[index];
surf2Dwrite<glm::vec4>(value, output, x * sizeof(float4), y, cudaBoundaryModeClamp);
}
__host__ void setCameraView(rfw::CameraView *ptr) { cudaMemcpyToSymbol(view, &ptr, sizeof(void *)); }
__host__ void setCounters(Counters *ptr) { cudaMemcpyToSymbol(counters, &ptr, sizeof(void *)); }
__host__ void setAccumulator(glm::vec4 *ptr) { cudaMemcpyToSymbol(accumulator, &ptr, sizeof(void *)); }
__host__ void setStride(uint s) { cudaMemcpyToSymbol(stride, &s, sizeof(void *)); }
__host__ void setPathStates(glm::vec4 *ptr) { cudaMemcpyToSymbol(pathStates, &ptr, sizeof(void *)); }
__host__ void setPathOrigins(glm::vec4 *ptr) { cudaMemcpyToSymbol(pathOrigins, &ptr, sizeof(void *)); }
__host__ void setPathDirections(glm::vec4 *ptr) { cudaMemcpyToSymbol(pathDirections, &ptr, sizeof(void *)); }
__host__ void setPathThroughputs(glm::vec4 *ptr) { cudaMemcpyToSymbol(pathThroughputs, &ptr, sizeof(void *)); }
__host__ void setPotentialContributions(PotentialContribution *ptr) { cudaMemcpyToSymbol(connectData, &ptr, sizeof(void *)); }
__host__ void setMaterials(DeviceMaterial *ptr) { cudaMemcpyToSymbol(materials, &ptr, sizeof(void *)); }
__host__ void setFloatTextures(glm::vec4 *ptr) { cudaMemcpyToSymbol(floatTextures, &ptr, sizeof(void *)); }
__host__ void setUintTextures(uint *ptr) { cudaMemcpyToSymbol(uintTextures, &ptr, sizeof(void *)); }
__host__ void setSkybox(glm::vec3 *ptr) { cudaMemcpyToSymbol(skybox, &ptr, sizeof(void *)); }
__host__ void setSkyDimensions(uint width, uint height)
{
cudaMemcpyToSymbol(skyboxWidth, &width, sizeof(uint));
cudaMemcpyToSymbol(skyboxHeight, &height, sizeof(uint));
}
__host__ void setInstanceDescriptors(DeviceInstanceDescriptor *ptr) { cudaMemcpyToSymbol(instances, &ptr, sizeof(void *)); }
__host__ void setGeometryEpsilon(float value) { cudaMemcpyToSymbol(geometryEpsilon, &value, sizeof(float)); }
__host__ void setBlueNoiseBuffer(uint *ptr) { cudaMemcpyToSymbol(blueNoise, &ptr, sizeof(void *)); }
__host__ void setScreenDimensions(uint width, uint height)
{
cudaMemcpyToSymbol(scrWidth, &width, sizeof(uint));
cudaMemcpyToSymbol(scrHeight, &height, sizeof(uint));
}
__host__ void setLightCount(rfw::LightCount lightCount) { cudaMemcpyToSymbol(lightCounts, &lightCount, sizeof(rfw::LightCount)); }
__host__ void setAreaLights(rfw::DeviceAreaLight *als) { cudaMemcpyToSymbol(areaLights, &als, sizeof(void *)); }
__host__ void setPointLights(rfw::DevicePointLight *pls) { cudaMemcpyToSymbol(pointLights, &pls, sizeof(void *)); }
__host__ void setSpotLights(rfw::DeviceSpotLight *sls) { cudaMemcpyToSymbol(spotLights, &sls, sizeof(void *)); }
__host__ void setDirectionalLights(rfw::DeviceDirectionalLight *dls) { cudaMemcpyToSymbol(directionalLights, &dls, sizeof(void *)); }
__host__ void setClampValue(float value) { cudaMemcpyToSymbol(clampValue, &value, sizeof(float)); }
__host__ void setNormalBuffer(glm::vec4 *ptr) { cudaMemcpyToSymbol(normals, &ptr, sizeof(void *)); }
__host__ void setAlbedoBuffer(glm::vec4 *ptr) { cudaMemcpyToSymbol(albedos, &ptr, sizeof(void *)); }
__host__ void setInputNormalBuffer(glm::vec4 *ptr) { cudaMemcpyToSymbol(inputNormals, &ptr, sizeof(void *)); }
__host__ void setInputAlbedoBuffer(glm::vec4 *ptr) { cudaMemcpyToSymbol(inputAlbedos, &ptr, sizeof(void *)); }
__host__ void setInputPixelBuffer(glm::vec4 *ptr) { cudaMemcpyToSymbol(inputPixels, &ptr, sizeof(void *)); }
__host__ void setOutputPixelBuffer(glm::vec4 *ptr) { cudaMemcpyToSymbol(outputPixels, &ptr, sizeof(void *)); }
__host__ const surfaceReference *getOutputSurfaceReference()
{
const surfaceReference *ref;
cudaGetSurfaceReference(&ref, &output);
return ref;
}
__host__ void InitCountersForExtend(unsigned int pathCount) { initCountersExtent<<<1, 32>>>(pathCount); }
__host__ void InitCountersSubsequent() { initCountersSubsequent<<<1, 32>>>(); }
__host__ cudaError launchShade(const uint pathCount, const uint pathLength, const glm::mat3 &toEyeSpace)
{
const dim3 gridDim = dim3(NEXTMULTIPLEOF(pathCount, 128) / 128);
const dim3 blockDim = dim3(128);
shade<<<gridDim, blockDim>>>(pathLength, toEyeSpace);
return cudaGetLastError();
}
__host__ cudaError launchFinalize(bool blit, const unsigned int scrwidth, const unsigned int scrheight, const unsigned int samples, const float brightness,
const float contrast)
{
const unsigned int alignedWidth = NEXTMULTIPLEOF(scrwidth, 16) / 16;
const unsigned int alignedHeight = NEXTMULTIPLEOF(scrheight, 16) / 16;
const dim3 gridDim = dim3(alignedWidth, alignedHeight, 1);
const dim3 blockDim = dim3(16, 16, 1);
if (blit)
finalizeBlit<<<gridDim, blockDim>>>(scrwidth, scrheight, 1.0f / float(samples));
else
finalize<<<gridDim, blockDim>>>(scrwidth, scrheight, 1.0f / float(samples));
return cudaGetLastError();
}
__host__ cudaError blitBuffer(const unsigned int scrwidth, const unsigned int scrheight)
{
const unsigned int alignedWidth = NEXTMULTIPLEOF(scrwidth, 16) / 16;
const unsigned int alignedHeight = NEXTMULTIPLEOF(scrheight, 16) / 16;
const dim3 gridDim = dim3(alignedWidth, alignedHeight, 1);
const dim3 blockDim = dim3(16, 16, 1);
blitDenoised<<<gridDim, blockDim>>>(scrwidth, scrheight);
return cudaGetLastError();
}
cudaError launchTonemap(unsigned int scrwidth, unsigned int scrheight, unsigned int samples, float brightness, float contrast)
{
const unsigned int alignedWidth = NEXTMULTIPLEOF(scrwidth, 16) / 16;
const unsigned int alignedHeight = NEXTMULTIPLEOF(scrheight, 16) / 16;
const dim3 gridDim = dim3(alignedWidth, alignedHeight, 1);
const dim3 blockDim = dim3(16, 16, 1);
const float contrastFactor = (259.0f * (contrast * 256.0f + 255.0f)) / (255.0f * (259.0f - 256.0f * contrast));
tonemap<<<gridDim, blockDim>>>(scrwidth, scrheight, 1.0f / float(samples), brightness, contrastFactor);
return cudaGetLastError();
}
|
the_stack
|
typedef unsigned int uint32_t;
typedef unsigned long long uint64_t;
#if __CUDA_ARCH__ < 350
#define ROTL32(x,n) (((x) << (n % 32)) | ((x) >> (32 - (n % 32))))
#define ROTR32(x,n) (((x) >> (n % 32)) | ((x) << (32 - (n % 32))))
#else
#define ROTL32(x,n) __funnelshift_l((x), (x), (n))
#define ROTR32(x,n) __funnelshift_r((x), (x), (n))
#endif
#define min(a,b) ((a<b) ? a : b)
#define mul_hi(a, b) __umulhi(a, b)
#define clz(a) __clz(a)
#define popcount(a) __popc(a)
#define DEV_INLINE __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ > 8)
#define SHFL(x, y, z) __shfl_sync(0xFFFFFFFF, (x), (y), (z))
#else
#define SHFL(x, y, z) __shfl((x), (y), (z))
#endif
#define PROGPOW_LANES 16
#define PROGPOW_REGS 32
#define PROGPOW_DAG_LOADS 4
#define PROGPOW_CACHE_WORDS 4096
#define PROGPOW_CNT_DAG 64
#define PROGPOW_CNT_MATH 20
typedef struct __align__(16) {uint32_t s[PROGPOW_DAG_LOADS];} dag_t;
// Inner loop for prog_seed 4222
__device__ __forceinline__ void progPowLoop(const uint32_t loop,
uint32_t mix[PROGPOW_REGS],
const dag_t *g_dag,
const uint32_t c_dag[PROGPOW_CACHE_WORDS],
const bool hack_false)
{
dag_t data_dag;
uint32_t offset, data;
const uint32_t lane_id = threadIdx.x & (PROGPOW_LANES-1);
// global load
offset = SHFL(mix[0], loop%PROGPOW_LANES, PROGPOW_LANES);
offset %= PROGPOW_DAG_ELEMENTS;
offset = offset * PROGPOW_LANES + (lane_id ^ loop) % PROGPOW_LANES;
data_dag = g_dag[offset];
// hack to prevent compiler from reordering LD and usage
if (hack_false) __threadfence_block();
// cache load 0
offset = mix[5] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[13] = (mix[13] * 33) + data;
// random math 0
data = clz(mix[27]) + clz(mix[8]);
mix[6] = (mix[6] ^ data) * 33;
// cache load 1
offset = mix[16] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[7] = (mix[7] ^ data) * 33;
// random math 1
data = mix[25] ^ mix[16];
mix[4] = ROTL32(mix[4], 7) ^ data;
// cache load 2
offset = mix[8] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[1] = ROTR32(mix[1], 12) ^ data;
// random math 2
data = mix[12] & mix[23];
mix[27] = ROTL32(mix[27], 26) ^ data;
// cache load 3
offset = mix[11] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[8] = (mix[8] ^ data) * 33;
// random math 3
data = clz(mix[29]) + clz(mix[11]);
mix[19] = ROTL32(mix[19], 17) ^ data;
// cache load 4
offset = mix[28] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[12] = ROTR32(mix[12], 12) ^ data;
// random math 4
data = mul_hi(mix[22], mix[13]);
mix[28] = (mix[28] * 33) + data;
// cache load 5
offset = mix[9] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[30] = ROTR32(mix[30], 22) ^ data;
// random math 5
data = mix[18] * mix[0];
mix[24] = ROTR32(mix[24], 23) ^ data;
// cache load 6
offset = mix[27] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[17] = ROTL32(mix[17], 11) ^ data;
// random math 6
data = clz(mix[5]) + clz(mix[6]);
mix[10] = ROTL32(mix[10], 16) ^ data;
// cache load 7
offset = mix[18] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[0] = (mix[0] * 33) + data;
// random math 7
data = mix[6] & mix[14];
mix[22] = ROTL32(mix[22], 7) ^ data;
// cache load 8
offset = mix[23] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[9] = (mix[9] * 33) + data;
// random math 8
data = mix[25] * mix[16];
mix[3] = ROTL32(mix[3], 20) ^ data;
// cache load 9
offset = mix[24] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[26] = (mix[26] * 33) + data;
// random math 9
data = mix[23] + mix[20];
mix[15] = (mix[15] ^ data) * 33;
// cache load 10
offset = mix[12] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[16] = (mix[16] * 33) + data;
// random math 10
data = mix[13] * mix[11];
mix[29] = (mix[29] ^ data) * 33;
// cache load 11
offset = mix[13] % PROGPOW_CACHE_WORDS;
data = c_dag[offset];
mix[11] = ROTL32(mix[11], 17) ^ data;
// random math 11
data = mix[14] | mix[11];
mix[14] = (mix[14] * 33) + data;
// random math 12
data = popcount(mix[9]) + popcount(mix[28]);
mix[31] = ROTR32(mix[31], 4) ^ data;
// random math 13
data = mix[12] + mix[6];
mix[23] = (mix[23] ^ data) * 33;
// random math 14
data = mix[7] & mix[0];
mix[2] = (mix[2] * 33) + data;
// random math 15
data = popcount(mix[15]) + popcount(mix[0]);
mix[18] = ROTR32(mix[18], 5) ^ data;
// random math 16
data = popcount(mix[0]) + popcount(mix[21]);
mix[21] = ROTR32(mix[21], 18) ^ data;
// random math 17
data = popcount(mix[7]) + popcount(mix[6]);
mix[25] = (mix[25] * 33) + data;
// random math 18
data = popcount(mix[14]) + popcount(mix[7]);
mix[20] = (mix[20] ^ data) * 33;
// random math 19
data = mul_hi(mix[27], mix[17]);
mix[5] = (mix[5] ^ data) * 33;
// consume global load data
// hack to prevent compiler from reordering LD and usage
if (hack_false) __threadfence_block();
mix[0] = ROTR32(mix[0], 12) ^ data_dag.s[0];
mix[13] = (mix[13] * 33) + data_dag.s[1];
mix[6] = (mix[6] ^ data_dag.s[2]) * 33;
mix[7] = ROTR32(mix[7], 26) ^ data_dag.s[3];
}
#ifndef MAX_SEARCH_RESULTS
#define MAX_SEARCH_RESULTS 4U
#endif
typedef struct {
uint32_t count;
struct {
// One word for gid and 8 for mix hash
uint32_t gid;
uint32_t mix[8];
} result[MAX_SEARCH_RESULTS];
} Search_results;
typedef struct
{
uint32_t uint32s[32 / sizeof(uint32_t)];
} hash32_t;
// Implementation based on:
// https://github.com/mjosaarinen/tiny_sha3/blob/master/sha3.c
__device__ __constant__ const uint32_t keccakf_rndc[24] = {
0x00000001, 0x00008082, 0x0000808a, 0x80008000, 0x0000808b, 0x80000001,
0x80008081, 0x00008009, 0x0000008a, 0x00000088, 0x80008009, 0x8000000a,
0x8000808b, 0x0000008b, 0x00008089, 0x00008003, 0x00008002, 0x00000080,
0x0000800a, 0x8000000a, 0x80008081, 0x00008080, 0x80000001, 0x80008008
};
// Implementation of the permutation Keccakf with width 800.
__device__ __forceinline__ void keccak_f800_round(uint32_t st[25], const int r)
{
const uint32_t keccakf_rotc[24] = {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
};
const uint32_t keccakf_piln[24] = {
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
};
uint32_t t, bc[5];
// Theta
for (int i = 0; i < 5; i++)
bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20];
for (int i = 0; i < 5; i++) {
t = bc[(i + 4) % 5] ^ ROTL32(bc[(i + 1) % 5], 1);
for (uint32_t j = 0; j < 25; j += 5)
st[j + i] ^= t;
}
// Rho Pi
t = st[1];
for (int i = 0; i < 24; i++) {
uint32_t j = keccakf_piln[i];
bc[0] = st[j];
st[j] = ROTL32(t, keccakf_rotc[i]);
t = bc[0];
}
// Chi
for (uint32_t j = 0; j < 25; j += 5) {
for (int i = 0; i < 5; i++)
bc[i] = st[j + i];
for (int i = 0; i < 5; i++)
st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5];
}
// Iota
st[0] ^= keccakf_rndc[r];
}
__device__ __forceinline__ uint32_t cuda_swab32(const uint32_t x)
{
return __byte_perm(x, x, 0x0123);
}
// Keccak - implemented as a variant of SHAKE
// The width is 800, with a bitrate of 576, a capacity of 224, and no padding
// Only need 64 bits of output for mining
__device__ __noinline__ uint64_t keccak_f800(hash32_t header, uint64_t seed, hash32_t digest)
{
uint32_t st[25];
for (int i = 0; i < 25; i++)
st[i] = 0;
for (int i = 0; i < 8; i++)
st[i] = header.uint32s[i];
st[8] = seed;
st[9] = seed >> 32;
for (int i = 0; i < 8; i++)
st[10+i] = digest.uint32s[i];
for (int r = 0; r < 21; r++) {
keccak_f800_round(st, r);
}
// last round can be simplified due to partial output
keccak_f800_round(st, 21);
// Byte swap so byte 0 of hash is MSB of result
return (uint64_t)cuda_swab32(st[0]) << 32 | cuda_swab32(st[1]);
}
#define fnv1a(h, d) (h = (uint32_t(h) ^ uint32_t(d)) * uint32_t(0x1000193))
typedef struct {
uint32_t z, w, jsr, jcong;
} kiss99_t;
// KISS99 is simple, fast, and passes the TestU01 suite
// https://en.wikipedia.org/wiki/KISS_(algorithm)
// http://www.cse.yorku.ca/~oz/marsaglia-rng.html
__device__ __forceinline__ uint32_t kiss99(kiss99_t &st)
{
st.z = 36969 * (st.z & 65535) + (st.z >> 16);
st.w = 18000 * (st.w & 65535) + (st.w >> 16);
uint32_t MWC = ((st.z << 16) + st.w);
st.jsr ^= (st.jsr << 17);
st.jsr ^= (st.jsr >> 13);
st.jsr ^= (st.jsr << 5);
st.jcong = 69069 * st.jcong + 1234567;
return ((MWC^st.jcong) + st.jsr);
}
__device__ __forceinline__ void fill_mix(uint64_t seed, uint32_t lane_id, uint32_t mix[PROGPOW_REGS])
{
// Use FNV to expand the per-warp seed to per-lane
// Use KISS to expand the per-lane seed to fill mix
uint32_t fnv_hash = 0x811c9dc5;
kiss99_t st;
st.z = fnv1a(fnv_hash, seed);
st.w = fnv1a(fnv_hash, seed >> 32);
st.jsr = fnv1a(fnv_hash, lane_id);
st.jcong = fnv1a(fnv_hash, lane_id);
#pragma unroll
for (int i = 0; i < PROGPOW_REGS; i++)
mix[i] = kiss99(st);
}
__global__ void
progpow_search(
uint64_t start_nonce,
const hash32_t header,
const uint64_t target,
const dag_t *g_dag,
volatile Search_results* g_output,
bool hack_false
)
{
__shared__ uint32_t c_dag[PROGPOW_CACHE_WORDS];
uint32_t const gid = blockIdx.x * blockDim.x + threadIdx.x;
uint64_t const nonce = start_nonce + gid;
const uint32_t lane_id = threadIdx.x & (PROGPOW_LANES - 1);
// Load the first portion of the DAG into the cache
for (uint32_t word = threadIdx.x*PROGPOW_DAG_LOADS; word < PROGPOW_CACHE_WORDS; word += blockDim.x*PROGPOW_DAG_LOADS)
{
dag_t load = g_dag[word/PROGPOW_DAG_LOADS];
for(int i=0; i<PROGPOW_DAG_LOADS; i++)
c_dag[word + i] = load.s[i];
}
hash32_t digest;
for (int i = 0; i < 8; i++)
digest.uint32s[i] = 0;
// keccak(header..nonce)
uint64_t seed = keccak_f800(header, nonce, digest);
__syncthreads();
#pragma unroll 1
for (uint32_t h = 0; h < PROGPOW_LANES; h++)
{
uint32_t mix[PROGPOW_REGS];
// share the hash's seed across all lanes
uint64_t hash_seed = SHFL(seed, h, PROGPOW_LANES);
// initialize mix for all lanes
fill_mix(hash_seed, lane_id, mix);
#pragma unroll 1
for (uint32_t l = 0; l < PROGPOW_CNT_DAG; l++)
progPowLoop(l, mix, g_dag, c_dag, hack_false);
// Reduce mix data to a per-lane 32-bit digest
uint32_t digest_lane = 0x811c9dc5;
#pragma unroll
for (int i = 0; i < PROGPOW_REGS; i++)
fnv1a(digest_lane, mix[i]);
// Reduce all lanes to a single 256-bit digest
hash32_t digest_temp;
#pragma unroll
for (int i = 0; i < 8; i++)
digest_temp.uint32s[i] = 0x811c9dc5;
for (int i = 0; i < PROGPOW_LANES; i += 8)
#pragma unroll
for (int j = 0; j < 8; j++)
fnv1a(digest_temp.uint32s[j], SHFL(digest_lane, i + j, PROGPOW_LANES));
if (h == lane_id)
digest = digest_temp;
}
// keccak(header .. keccak(header..nonce) .. digest);
if (keccak_f800(header, seed, digest) >= target)
return;
uint32_t index = atomicInc((uint32_t *)&g_output->count, 0xffffffff);
if (index >= MAX_SEARCH_RESULTS)
return;
g_output->result[index].gid = gid;
#pragma unroll
for (int i = 0; i < 8; i++)
g_output->result[index].mix[i] = digest.uint32s[i];
}
|
the_stack
|
#include <thrust/partition.h>
#include <logger.h>
using namespace std;
namespace amgx
{
namespace cf_jacobi_solver
{
struct is_coarse
{
__host__ __device__
int operator()(const int &x)
{
return (int) (x == COARSE);
}
};
struct is_fine
{
__host__ __device__
int operator()(const int &x)
{
return (int) (x == FINE);
}
};
struct is_eq_minus_one
{
__host__ __device__
int operator()(const int &x)
{
return (int) (x == -1);
}
};
template <typename ValueTypeA, typename ValueTypeB>
struct jacobi_presmooth_functor
{
ValueTypeB omega;
jacobi_presmooth_functor( ValueTypeB omega ) : omega( omega ) {}
__host__ __device__ ValueTypeB operator()( const ValueTypeB &b, const ValueTypeA &d ) const { return isNotCloseToZero(d) ? omega * b / d : omega * b / epsilon(d); }
};
template <typename ValueTypeA, typename ValueTypeB>
struct jacobi_postsmooth_functor
{
ValueTypeB omega;
jacobi_postsmooth_functor( ValueTypeB omega ) : omega( omega ) {}
template<typename Tuple> __host__ __device__ ValueTypeB operator( )( const Tuple &t ) const
{
ValueTypeB x = thrust::get<0>(t);
ValueTypeA d = thrust::get<1>(t);
ValueTypeB b = thrust::get<2>(t);
ValueTypeB y = thrust::get<3>(t);
// return x + omega * (b - y) / d.
d = isNotCloseToZero(d) ? d : epsilon(d);
d = ValueTypeA( 1 ) / d;
b -= y;
b *= omega;
return b * d + x;
}
};
template <typename ValueTypeB>
struct add_functor
{
__host__ __device__ ValueTypeB operator()( const ValueTypeB &x, const ValueTypeB &y )const { return x + y; }
};
template<typename T>
__device__ __forceinline__ T fmnaOp (T a, T b, T c)
{
return -(a * b) + c;
}
template<typename T>
__device__ __forceinline__ T mulOp (T a, T b)
{
return a * b;
}
template<typename T>
__device__ __forceinline__ T rcpOp (T a)
{
return 1.0 / (isNotCloseToZero(a) ? a : epsilon(a));
}
template<typename T>
__device__ __forceinline__ T absOp (T a)
{
return fabs(a);
}
// -----------------------------------
// KERNELS
// -----------------------------------
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__
void jacobi_zero_ini_masked_step(const int *row_ids, const int nrows_to_process, const ValueTypeA *Dinv, const ValueTypeB *b, const ValueTypeB relaxation_factor, ValueTypeB *x)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < nrows_to_process)
{
int i = row_ids[tid];
ValueTypeB d = Dinv[i];
d = isNotCloseToZero(d) ? d : epsilon(d);
x[i] = relaxation_factor * b[i] / d;
tid += (blockDim.x * gridDim.x);
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__
void jacobi_masked_step(const int *row_ids, const int nrows_to_process, const ValueTypeA *Dinv, const ValueTypeB *b, const ValueTypeB *y, const ValueTypeB relaxation_factor, ValueTypeB *x)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < nrows_to_process)
{
int i = row_ids[tid];
ValueTypeB d = Dinv[i];
d = isNotCloseToZero(d) ? d : epsilon(d);
x[i] += relaxation_factor * (b[i] - y[i]) / d;
tid += (blockDim.x * gridDim.x);
}
}
__global__
void agg_write_agg(const int *agg_map, const int nrows, int *dst)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
while (tid < nrows)
{
dst[agg_map[tid]] = tid;
tid += (blockDim.x * gridDim.x);
}
}
//--------------------------------
// Methods
//--------------------------------
// Constructor
template<class T_Config>
CFJacobiSolver_Base<T_Config>::CFJacobiSolver_Base( AMG_Config &cfg, const std::string &cfg_scope) : Solver<T_Config>( cfg, cfg_scope)
{
weight = cfg.AMG_Config::getParameter<double>("relaxation_factor", cfg_scope);
int param_mode = cfg.AMG_Config::getParameter<int>("cf_smoothing_mode", cfg_scope);
switch (param_mode)
{
case 0:
this->mode = CF_CF;
break;
case 1:
this->mode = CF_FC;
break;
case 2:
this->mode = CF_FCF;
break;
case 3:
this->mode = CF_CFC;
break;
default:
this->mode = CF_CF;
}
if (weight == 0)
{
weight = 1.;
amgx_printf("Warning, setting weight to 1 instead of estimating largest_eigen_value in Block Jacobi smoother\n");
}
}
// Destructor
template<class T_Config>
CFJacobiSolver_Base<T_Config>::~CFJacobiSolver_Base()
{
this->Dinv.resize(0);
}
template<class T_Config>
void
CFJacobiSolver_Base<T_Config>::printSolverParameters() const
{
std::cout << "relaxation_factor= " << this->weight << std::endl;
}
// Solver setup
template<class T_Config>
void
CFJacobiSolver_Base<T_Config>::solver_setup(bool reuse_matrix_structure)
{
Matrix<T_Config> *A_as_matrix = dynamic_cast<Matrix<T_Config>*>(this->m_A);
if (!A_as_matrix)
{
FatalError("CFJacobiSolver only works with explicit matrices", AMGX_ERR_INTERNAL);
}
computeDinv( *A_as_matrix );
if ( A_as_matrix->getBlockFormat() != ROW_MAJOR )
{
FatalError(" CFJacobiSolver only supports row major format", AMGX_ERR_CONFIGURATION);
}
if (A_as_matrix->hasParameter("cf_map")) // classical case
{
IVector *cf_map = A_as_matrix->template getParameterPtr< IVector >("cf_map");
int nrows = A_as_matrix->get_num_rows();
this->num_coarse = thrust::count(cf_map->begin(), cf_map->end(), (int)COARSE);
this->c_rows.resize(this->num_coarse);
this->f_rows.resize(nrows - this->num_coarse);
thrust::counting_iterator<int> zero(0);
thrust::counting_iterator<int> zero_plus_nrows = zero + nrows;
thrust::copy_if(zero, zero_plus_nrows, cf_map->begin(), this->c_rows.begin(), is_coarse());
thrust::copy_if(zero, zero_plus_nrows, cf_map->begin(), this->f_rows.begin(), is_fine());
cudaCheckError();
// partitioning check
/*
{
typedef typename TConfig::template setMemSpace<AMGX_host >::Type TConfig_h;
typedef typename TConfig_h::template setVecPrec<AMGX_vecInt>::Type ivec_value_type_h;
typedef Vector<ivec_value_type_h> IVector_h;
IVector_h cf_map_h = *cf_map;
IVector_h cf_rows = this->cf_rows;
for (int i = 0; i < cf_map_h.size(); i++)
{
if ((cf_map_h[cf_rows[i]] == FINE && i < this->num_coarse) || (cf_map_h[cf_rows[i]] == COARSE && i >= this->num_coarse))
printf("CFJ FAIL at i==%d: cf_rows[i]==%d, cf_map[row]==%d, num_coarse==%d\n", i, cf_rows[i], cf_map_h[cf_rows[i]], this->num_coarse);
}
}
*/
}
else if (A_as_matrix->hasParameter("aggregates_map")) // aggregation case
{
IVector *agg_map = A_as_matrix->template getParameterPtr< IVector >("aggregates_map");
int agg_num = A_as_matrix->template getParameter< int >("aggregates_num");
this->num_coarse = agg_num;
const int threads_per_block = 256;
const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (A_as_matrix->get_num_rows() + threads_per_block - 1) / threads_per_block);
int nrows = A_as_matrix->get_num_rows();
this->c_rows.resize(this->num_coarse);
this->f_rows.resize(nrows - this->num_coarse);
IVector tmap(nrows, -1);
// can use thrust permutation operator here.
agg_write_agg <<< num_blocks, threads_per_block>>>(agg_map->raw(), agg_map->size(), this->c_rows.raw());
cudaCheckError();
agg_write_agg <<< num_blocks, threads_per_block>>>(this->c_rows.raw(), this->num_coarse, tmap.raw());
cudaCheckError();
thrust::counting_iterator<int> zero(0);
thrust::counting_iterator<int> zero_plus_nrows = zero + nrows;
thrust::copy_if(zero, zero_plus_nrows, tmap.begin(), this->f_rows.begin(), is_eq_minus_one());
cudaCheckError();
}
else
{
FatalError("No info from AMG level was found for C-F separation, use different smoother or drink 1 beer", AMGX_ERR_BAD_PARAMETERS);
}
}
//
template<class T_Config>
void
CFJacobiSolver_Base<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero )
{
}
// Solve one iteration
template<class T_Config>
bool
CFJacobiSolver_Base<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero )
{
//bool done = false;
Matrix<T_Config> *A_as_matrix = (Matrix<T_Config> *) this->m_A;
int *smoothing_direction = A_as_matrix->template getParameterPtr<int> ("smoothing_direction");
// smoothing direction == 0 : presmoothing
// smoothing direction == 1 : postsmoothing
SmoothingOrder current_order;
switch (this->mode)
{
case CF_CF:
current_order = (*smoothing_direction == 0) ? CF_CF : CF_FC;
break;
case CF_FC:
current_order = (*smoothing_direction == 1) ? CF_CF : CF_FC;
break;
case CF_FCF:
current_order = (*smoothing_direction == 0) ? CF_FCF : CF_CFC;
break;
case CF_CFC:
current_order = (*smoothing_direction == 1) ? CF_FCF : CF_CFC;
break;
}
// no multi-gpu for now
ViewType flags = OWNED;
if (xIsZero) { x.dirtybit = 0; }
if (A_as_matrix->get_block_dimx() == 1 && A_as_matrix->get_block_dimy() == 1)
{
if (xIsZero)
{
smooth_with_0_initial_guess_1x1(*A_as_matrix, b, x, current_order, flags);
}
else
{
smooth_1x1(*A_as_matrix, b, x, current_order, flags);
}
}
else
{
FatalError("Unsupported block size for BlockJacobi_Solver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
return this->converged( b, x );
}
template<class T_Config>
void
CFJacobiSolver_Base<T_Config>::solve_finalize( VVector &b, VVector &x )
{}
template<class T_Config>
void CFJacobiSolver_Base<T_Config>::computeDinv( Matrix<T_Config> &A)
{
Matrix<T_Config> *A_as_matrix = (Matrix<T_Config> *) this->m_A;
ViewType oldView = A.currentView();
A.setView(A_as_matrix->getViewExterior());
if (A.get_block_dimx() == 1 && A.get_block_dimy() == 1)
{
this->computeDinv_1x1(A);
}
A.setView(oldView);
}
// Method to compute the inverse of the diagonal blocks
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void CFJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_1x1(const Matrix_d &A)
{
Matrix_d *A_as_matrix = (Matrix_d *) this->m_A;
// supports both diag
this->Dinv.resize(A.get_num_rows()*A.get_block_dimx()*A.get_block_dimy(), 0.0);
if ( A_as_matrix->hasProps(DIAG) )
{
const int num_values = A_as_matrix->diagOffset() * A_as_matrix->get_block_size();
thrust::copy( A_as_matrix->values.begin() + num_values, A_as_matrix->values.begin() + num_values + A_as_matrix->get_num_rows()*A_as_matrix->get_block_size(), this->Dinv.begin() );
cudaCheckError();
}
else
{
find_diag( *A_as_matrix );
}
}
// Method to compute the inverse of the diagonal blocks
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void CFJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeDinv_1x1(const Matrix_h &A)
{
// Do nothing
}
// Finding diag on device, CSR format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void CFJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::find_diag( const Matrix_h &A )
{
//for each row
for (int i = 0; i < A.get_num_rows(); i++)
{
//for each column
for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++)
{
if (A.col_indices[j] == i)
{
this->Dinv[i] = A.values[j];
break;
}
if (j == A.row_offsets[i + 1] - 1)
{
FatalError("Could not find a diagonal value", AMGX_ERR_BAD_PARAMETERS);
}
}
}
}
// Finding diag on device, CSR format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void CFJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::find_diag( const Matrix_d &A )
{
AMGX_CPU_PROFILER( "JacobiSolver::find_diag " );
const size_t THREADS_PER_BLOCK = 128;
const size_t NUM_BLOCKS = min(AMGX_GRID_MAX_SIZE, (int)ceil((ValueTypeB)A.get_num_rows() / (ValueTypeB)THREADS_PER_BLOCK));
find_diag_kernel_indexed_dia <<< (unsigned int)NUM_BLOCKS, (unsigned int)THREADS_PER_BLOCK>>>(
A.get_num_rows(),
A.diag.raw(),
A.values.raw(),
this->Dinv.raw());
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void CFJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(Matrix_h &A, VVector &b, VVector &x, SmoothingOrder order, ViewType separation_flags)
{
VVector newx(x.size());
//for each row
for (int i = 0; i < A.get_num_rows(); i++)
{
ValueTypeB Axi = 0.0;
ValueTypeB d = A.values[A.diag[i]];
ValueTypeB mydiaginv = this->weight / (isNotCloseToZero(d) ? d : epsilon(d) );
//for each column
for (int j = A.row_offsets[i]; j < A.row_offsets[i + 1]; j++)
{
Axi += A.values[j] * x[A.col_indices[j]];
}
newx[i] = x[i] + (b[i] - Axi) * mydiaginv ;
}
x.swap(newx);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void CFJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(Matrix_d &A, VVector &b, VVector &x, SmoothingOrder order, ViewType separation_flags)
{
AMGX_CPU_PROFILER( "JacobiSolver::smooth_1x1 " );
if (this->y.size() != b.size())
{
this->y.resize(b.size());
this->y.tag = this->tag * 100 + 3;
this->y.set_block_dimx(b.get_block_dimx());
this->y.set_block_dimy(b.get_block_dimy());
}
int num_rows = A.get_num_rows();
int offset = 0;
A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows);
const int threads_per_block = 256;
const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() + threads_per_block - 1) / threads_per_block);
if (order == CF_CF || order == CF_FC)
{
IVector &rows_first = (order == CF_CF) ? this->c_rows : this->f_rows;
IVector &rows_second = (order == CF_CF) ? this->f_rows : this->c_rows;
const int threads_per_block = 256;
const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() + threads_per_block - 1) / threads_per_block);
// if use transform + permutation iterator it will yield into two separate permutation reads - for src and dst, so using simple kernel here
multiply_masked( A, x, this->y, rows_first, separation_flags );
jacobi_masked_step<IndexType, ValueTypeA, ValueTypeB> <<< num_blocks, threads_per_block>>>(
rows_first.raw(),
rows_first.size(),
this->Dinv.raw(),
b.raw(),
this->y.raw(),
this->weight,
x.raw());
cudaCheckError();
multiply_masked( A, x, this->y, rows_second, separation_flags );
jacobi_masked_step<IndexType, ValueTypeA, ValueTypeB> <<< num_blocks, threads_per_block>>>(
rows_second.raw(),
rows_second.size(),
this->Dinv.raw(),
b.raw(),
this->y.raw(),
this->weight,
x.raw());
cudaCheckError();
}
else
{
FatalError("CF_FCF and CF_CFC is not yet done", AMGX_ERR_NOT_IMPLEMENTED);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void CFJacobiSolver<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1( Matrix_h &A, VVector &b, VVector &x, SmoothingOrder order, ViewType separation_flags)
{
//for each row
for (int i = 0; i < A.get_num_rows(); i++)
{
ValueTypeB d = A.values[A.diag[i]];
ValueTypeB mydiag = this->weight / (isNotCloseToZero(d) ? d : epsilon(d));
x[i] = b[i] * mydiag;
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void CFJacobiSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1(Matrix_d &A, VVector &b, VVector &x, SmoothingOrder order, ViewType separation_flags)
{
AMGX_CPU_PROFILER( "JacobiSolver::smooth_with_0_initial_guess_1x1 " );
int num_rows = A.get_num_rows();
int offset = 0;
A.getOffsetAndSizeForView(separation_flags, &offset, &num_rows);
const int threads_per_block = 256;
const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() + threads_per_block - 1) / threads_per_block);
if (this->y.size() != b.size())
{
this->y.resize(b.size());
this->y.tag = this->tag * 100 + 3;
this->y.set_block_dimx(b.get_block_dimx());
this->y.set_block_dimy(b.get_block_dimy());
}
if (order == CF_CF || order == CF_FC)
{
// IVector& rows_first = (order == CF_CF) ? this->c_rows : this->f_rows;
IVector &rows_second = (order == CF_CF) ? this->f_rows : this->c_rows;
const int threads_per_block = 256;
const int num_blocks = min(AMGX_GRID_MAX_SIZE, (int) (A.get_num_rows() + threads_per_block - 1) / threads_per_block);
/*jacobi_zero_ini_masked_step<IndexType, ValueTypeA, ValueTypeB><<<num_blocks,threads_per_block>>>(
rows_first.raw(),
rows_first.size(),
this->Dinv.raw(),
b.raw(),
this->weight,
x.raw());*/
// it is not so much harder to initialize whole vector instead of just C or F points
thrust::transform(b.begin( ),
b.begin( ) + A.get_num_rows(),
this->Dinv.begin( ),
x.begin( ),
jacobi_presmooth_functor<ValueTypeA, ValueTypeB>( this->weight ));
cudaCheckError();
multiply_masked( A, x, this->y, rows_second, separation_flags );
cudaCheckError();
jacobi_masked_step<IndexType, ValueTypeA, ValueTypeB> <<< num_blocks, threads_per_block>>>(
rows_second.raw(),
rows_second.size(),
this->Dinv.raw(),
b.raw(),
this->y.raw(),
this->weight,
x.raw());
cudaCheckError();
}
else
{
FatalError("CF_FCF and CF_CFC is not yet done", AMGX_ERR_NOT_IMPLEMENTED);
}
cudaCheckError();
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class CFJacobiSolver_Base<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class CFJacobiSolver<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace block_jacobi
} // namespace amgx
|
the_stack
|
#include <LightGBM/cuda/cuda_algorithms.hpp>
#include "cuda_single_gpu_tree_learner.hpp"
#include <algorithm>
namespace LightGBM {
__global__ void ReduceLeafStatKernel_SharedMemory(
const score_t* gradients,
const score_t* hessians,
const int num_leaves,
const data_size_t num_data,
const int* data_index_to_leaf_index,
double* leaf_grad_stat_buffer,
double* leaf_hess_stat_buffer) {
extern __shared__ double shared_mem[];
double* shared_grad_sum = shared_mem;
double* shared_hess_sum = shared_mem + num_leaves;
const data_size_t data_index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x);
for (int leaf_index = static_cast<int>(threadIdx.x); leaf_index < num_leaves; leaf_index += static_cast<int>(blockDim.x)) {
shared_grad_sum[leaf_index] = 0.0f;
shared_hess_sum[leaf_index] = 0.0f;
}
__syncthreads();
if (data_index < num_data) {
const int leaf_index = data_index_to_leaf_index[data_index];
atomicAdd_block(shared_grad_sum + leaf_index, gradients[data_index]);
atomicAdd_block(shared_hess_sum + leaf_index, hessians[data_index]);
}
__syncthreads();
for (int leaf_index = static_cast<int>(threadIdx.x); leaf_index < num_leaves; leaf_index += static_cast<int>(blockDim.x)) {
atomicAdd_system(leaf_grad_stat_buffer + leaf_index, shared_grad_sum[leaf_index]);
atomicAdd_system(leaf_hess_stat_buffer + leaf_index, shared_hess_sum[leaf_index]);
}
}
__global__ void ReduceLeafStatKernel_GlobalMemory(
const score_t* gradients,
const score_t* hessians,
const int num_leaves,
const data_size_t num_data,
const int* data_index_to_leaf_index,
double* leaf_grad_stat_buffer,
double* leaf_hess_stat_buffer) {
const size_t offset = static_cast<size_t>(num_leaves) * (blockIdx.x + 1);
double* grad_sum = leaf_grad_stat_buffer + offset;
double* hess_sum = leaf_hess_stat_buffer + offset;
const data_size_t data_index = static_cast<data_size_t>(threadIdx.x + blockIdx.x * blockDim.x);
for (int leaf_index = static_cast<int>(threadIdx.x); leaf_index < num_leaves; leaf_index += static_cast<int>(blockDim.x)) {
grad_sum[leaf_index] = 0.0f;
hess_sum[leaf_index] = 0.0f;
}
__syncthreads();
if (data_index < num_data) {
const int leaf_index = data_index_to_leaf_index[data_index];
atomicAdd_block(grad_sum + leaf_index, gradients[data_index]);
atomicAdd_block(hess_sum + leaf_index, hessians[data_index]);
}
__syncthreads();
for (int leaf_index = static_cast<int>(threadIdx.x); leaf_index < num_leaves; leaf_index += static_cast<int>(blockDim.x)) {
atomicAdd_system(leaf_grad_stat_buffer + leaf_index, grad_sum[leaf_index]);
atomicAdd_system(leaf_hess_stat_buffer + leaf_index, hess_sum[leaf_index]);
}
}
template <bool USE_L1, bool USE_SMOOTHING>
__global__ void CalcRefitLeafOutputKernel(
const int num_leaves,
const double* leaf_grad_stat_buffer,
const double* leaf_hess_stat_buffer,
const data_size_t* num_data_in_leaf,
const int* leaf_parent,
const int* left_child,
const int* right_child,
const double lambda_l1,
const double lambda_l2,
const double path_smooth,
const double shrinkage_rate,
const double refit_decay_rate,
double* leaf_value) {
const int leaf_index = static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x);
if (leaf_index < num_leaves) {
const double sum_gradients = leaf_grad_stat_buffer[leaf_index];
const double sum_hessians = leaf_hess_stat_buffer[leaf_index];
const data_size_t num_data = num_data_in_leaf[leaf_index];
const double old_leaf_value = leaf_value[leaf_index];
double new_leaf_value = 0.0f;
if (!USE_SMOOTHING) {
new_leaf_value = CUDALeafSplits::CalculateSplittedLeafOutput<false, false>(sum_gradients, sum_hessians, lambda_l1, lambda_l2, 0.0f, 0, 0.0f);
} else {
const int parent = leaf_parent[leaf_index];
if (parent >= 0) {
const int sibliing = left_child[parent] == leaf_index ? right_child[parent] : left_child[parent];
const double sum_gradients_of_parent = sum_gradients + leaf_grad_stat_buffer[sibliing];
const double sum_hessians_of_parent = sum_hessians + leaf_hess_stat_buffer[sibliing];
const data_size_t num_data_in_parent = num_data + num_data_in_leaf[sibliing];
const double parent_output =
CUDALeafSplits::CalculateSplittedLeafOutput<false, true>(
sum_gradients_of_parent, sum_hessians_of_parent, lambda_l1, lambda_l2, 0.0f, 0, 0.0f);
new_leaf_value = CUDALeafSplits::CalculateSplittedLeafOutput<false, true>(
sum_gradients, sum_hessians, lambda_l1, lambda_l2, path_smooth, num_data_in_parent, parent_output);
} else {
new_leaf_value = CUDALeafSplits::CalculateSplittedLeafOutput<false, false>(sum_gradients, sum_hessians, lambda_l1, lambda_l2, 0.0f, 0, 0.0f);
}
}
if (isnan(new_leaf_value)) {
new_leaf_value = 0.0f;
} else {
new_leaf_value *= shrinkage_rate;
}
leaf_value[leaf_index] = refit_decay_rate * old_leaf_value + (1.0f - refit_decay_rate) * new_leaf_value;
}
}
void CUDASingleGPUTreeLearner::LaunchReduceLeafStatKernel(
const score_t* gradients, const score_t* hessians, const data_size_t* num_data_in_leaf,
const int* leaf_parent, const int* left_child, const int* right_child, const int num_leaves,
const data_size_t num_data, double* cuda_leaf_value, const double shrinkage_rate) const {
int num_block = (num_data + CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE - 1) / CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE;
if (num_leaves <= 2048) {
ReduceLeafStatKernel_SharedMemory<<<num_block, CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE, 2 * num_leaves * sizeof(double)>>>(
gradients, hessians, num_leaves, num_data, cuda_data_partition_->cuda_data_index_to_leaf_index(),
cuda_leaf_gradient_stat_buffer_, cuda_leaf_hessian_stat_buffer_);
} else {
ReduceLeafStatKernel_GlobalMemory<<<num_block, CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE>>>(
gradients, hessians, num_leaves, num_data, cuda_data_partition_->cuda_data_index_to_leaf_index(),
cuda_leaf_gradient_stat_buffer_, cuda_leaf_hessian_stat_buffer_);
}
const bool use_l1 = config_->lambda_l1 > 0.0f;
const bool use_smoothing = config_->path_smooth > 0.0f;
num_block = (num_leaves + CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE - 1) / CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE;
#define CalcRefitLeafOutputKernel_ARGS \
num_leaves, cuda_leaf_gradient_stat_buffer_, cuda_leaf_hessian_stat_buffer_, num_data_in_leaf, \
leaf_parent, left_child, right_child, \
config_->lambda_l1, config_->lambda_l2, config_->path_smooth, \
shrinkage_rate, config_->refit_decay_rate, cuda_leaf_value
if (!use_l1) {
if (!use_smoothing) {
CalcRefitLeafOutputKernel<false, false>
<<<num_block, CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE>>>(CalcRefitLeafOutputKernel_ARGS);
} else {
CalcRefitLeafOutputKernel<false, true>
<<<num_block, CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE>>>(CalcRefitLeafOutputKernel_ARGS);
}
} else {
if (!use_smoothing) {
CalcRefitLeafOutputKernel<true, false>
<<<num_block, CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE>>>(CalcRefitLeafOutputKernel_ARGS);
} else {
CalcRefitLeafOutputKernel<true, true>
<<<num_block, CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE>>>(CalcRefitLeafOutputKernel_ARGS);
}
}
}
template <typename T, bool IS_INNER>
__global__ void CalcBitsetLenKernel(const CUDASplitInfo* best_split_info, size_t* out_len_buffer) {
__shared__ size_t shared_mem_buffer[32];
const T* vals = nullptr;
if (IS_INNER) {
vals = reinterpret_cast<const T*>(best_split_info->cat_threshold);
} else {
vals = reinterpret_cast<const T*>(best_split_info->cat_threshold_real);
}
const int i = static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x);
size_t len = 0;
if (i < best_split_info->num_cat_threshold) {
const T val = vals[i];
len = (val / 32) + 1;
}
const size_t block_max_len = ShuffleReduceMax<size_t>(len, shared_mem_buffer, blockDim.x);
if (threadIdx.x == 0) {
out_len_buffer[blockIdx.x] = block_max_len;
}
}
__global__ void ReduceBlockMaxLen(size_t* out_len_buffer, const int num_blocks) {
__shared__ size_t shared_mem_buffer[32];
size_t max_len = 0;
for (int i = static_cast<int>(threadIdx.x); i < num_blocks; i += static_cast<int>(blockDim.x)) {
max_len = max(out_len_buffer[i], max_len);
}
const size_t all_max_len = ShuffleReduceMax<size_t>(max_len, shared_mem_buffer, blockDim.x);
if (threadIdx.x == 0) {
out_len_buffer[0] = max_len;
}
}
template <typename T, bool IS_INNER>
__global__ void CUDAConstructBitsetKernel(const CUDASplitInfo* best_split_info, uint32_t* out, size_t cuda_bitset_len) {
const T* vals = nullptr;
if (IS_INNER) {
vals = reinterpret_cast<const T*>(best_split_info->cat_threshold);
} else {
vals = reinterpret_cast<const T*>(best_split_info->cat_threshold_real);
}
const int i = static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x);
if (i < best_split_info->num_cat_threshold) {
const T val = vals[i];
// can use add instead of or here, because each bit will only be added once
atomicAdd_system(out + (val / 32), (0x1 << (val % 32)));
}
}
__global__ void SetRealThresholdKernel(
const CUDASplitInfo* best_split_info,
const int* categorical_bin_to_value,
const int* categorical_bin_offsets) {
const int num_cat_threshold = best_split_info->num_cat_threshold;
const int* categorical_bin_to_value_ptr = categorical_bin_to_value + categorical_bin_offsets[best_split_info->inner_feature_index];
int* cat_threshold_real = best_split_info->cat_threshold_real;
const uint32_t* cat_threshold = best_split_info->cat_threshold;
const int index = static_cast<int>(threadIdx.x + blockIdx.x * blockDim.x);
if (index < num_cat_threshold) {
cat_threshold_real[index] = categorical_bin_to_value_ptr[cat_threshold[index]];
}
}
template <typename T, bool IS_INNER>
void CUDAConstructBitset(const CUDASplitInfo* best_split_info, const int num_cat_threshold, uint32_t* out, size_t bitset_len) {
const int num_blocks = (num_cat_threshold + CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE - 1) / CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE;
// clear the bitset vector first
SetCUDAMemory<uint32_t>(out, 0, bitset_len, __FILE__, __LINE__);
CUDAConstructBitsetKernel<T, IS_INNER><<<num_blocks, CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE>>>(best_split_info, out, bitset_len);
}
template <typename T, bool IS_INNER>
size_t CUDABitsetLen(const CUDASplitInfo* best_split_info, const int num_cat_threshold, size_t* out_len_buffer) {
const int num_blocks = (num_cat_threshold + CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE - 1) / CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE;
CalcBitsetLenKernel<T, IS_INNER><<<num_blocks, CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE>>>(best_split_info, out_len_buffer);
ReduceBlockMaxLen<<<1, CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE>>>(out_len_buffer, num_blocks);
size_t host_max_len = 0;
CopyFromCUDADeviceToHost<size_t>(&host_max_len, out_len_buffer, 1, __FILE__, __LINE__);
return host_max_len;
}
void CUDASingleGPUTreeLearner::LaunchConstructBitsetForCategoricalSplitKernel(
const CUDASplitInfo* best_split_info) {
const int num_blocks = (num_cat_threshold_ + CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE - 1) / CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE;
SetRealThresholdKernel<<<num_blocks, CUDA_SINGLE_GPU_TREE_LEARNER_BLOCK_SIZE>>>
(best_split_info, cuda_categorical_bin_to_value_, cuda_categorical_bin_offsets_);
cuda_bitset_inner_len_ = CUDABitsetLen<uint32_t, true>(best_split_info, num_cat_threshold_, cuda_block_bitset_len_buffer_);
CUDAConstructBitset<uint32_t, true>(best_split_info, num_cat_threshold_, cuda_bitset_inner_, cuda_bitset_inner_len_);
cuda_bitset_len_ = CUDABitsetLen<int, false>(best_split_info, num_cat_threshold_, cuda_block_bitset_len_buffer_);
CUDAConstructBitset<int, false>(best_split_info, num_cat_threshold_, cuda_bitset_, cuda_bitset_len_);
}
} // namespace LightGBM
#endif // USE_CUDA_EXP
|
the_stack
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.